1 // SPDX-License-Identifier: GPL-2.0 2 // 3 // Register map access API - debugfs 4 // 5 // Copyright 2011 Wolfson Microelectronics plc 6 // 7 // Author: Mark Brown <broonie@opensource.wolfsonmicro.com> 8 9 #include <linux/slab.h> 10 #include <linux/mutex.h> 11 #include <linux/debugfs.h> 12 #include <linux/uaccess.h> 13 #include <linux/device.h> 14 #include <linux/list.h> 15 16 #include "internal.h" 17 18 struct regmap_debugfs_node { 19 struct regmap *map; 20 const char *name; 21 struct list_head link; 22 }; 23 24 static unsigned int dummy_index; 25 static struct dentry *regmap_debugfs_root; 26 static LIST_HEAD(regmap_debugfs_early_list); 27 static DEFINE_MUTEX(regmap_debugfs_early_lock); 28 29 /* Calculate the length of a fixed format */ 30 static size_t regmap_calc_reg_len(int max_val) 31 { 32 return snprintf(NULL, 0, "%x", max_val); 33 } 34 35 static ssize_t regmap_name_read_file(struct file *file, 36 char __user *user_buf, size_t count, 37 loff_t *ppos) 38 { 39 struct regmap *map = file->private_data; 40 const char *name = "nodev"; 41 int ret; 42 char *buf; 43 44 buf = kmalloc(PAGE_SIZE, GFP_KERNEL); 45 if (!buf) 46 return -ENOMEM; 47 48 if (map->dev && map->dev->driver) 49 name = map->dev->driver->name; 50 51 ret = snprintf(buf, PAGE_SIZE, "%s\n", name); 52 if (ret < 0) { 53 kfree(buf); 54 return ret; 55 } 56 57 ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); 58 kfree(buf); 59 return ret; 60 } 61 62 static const struct file_operations regmap_name_fops = { 63 .open = simple_open, 64 .read = regmap_name_read_file, 65 .llseek = default_llseek, 66 }; 67 68 static void regmap_debugfs_free_dump_cache(struct regmap *map) 69 { 70 struct regmap_debugfs_off_cache *c; 71 72 while (!list_empty(&map->debugfs_off_cache)) { 73 c = list_first_entry(&map->debugfs_off_cache, 74 struct regmap_debugfs_off_cache, 75 list); 76 list_del(&c->list); 77 kfree(c); 78 } 79 } 80 81 static bool regmap_printable(struct regmap *map, unsigned int reg) 82 { 83 if (regmap_precious(map, reg)) 84 return false; 85 86 if (!regmap_readable(map, reg) && !regmap_cached(map, reg)) 87 return false; 88 89 return true; 90 } 91 92 /* 93 * Work out where the start offset maps into register numbers, bearing 94 * in mind that we suppress hidden registers. 95 */ 96 static unsigned int regmap_debugfs_get_dump_start(struct regmap *map, 97 unsigned int base, 98 loff_t from, 99 loff_t *pos) 100 { 101 struct regmap_debugfs_off_cache *c = NULL; 102 loff_t p = 0; 103 unsigned int i, ret; 104 unsigned int fpos_offset; 105 unsigned int reg_offset; 106 107 /* Suppress the cache if we're using a subrange */ 108 if (base) 109 return base; 110 111 /* 112 * If we don't have a cache build one so we don't have to do a 113 * linear scan each time. 114 */ 115 mutex_lock(&map->cache_lock); 116 i = base; 117 if (list_empty(&map->debugfs_off_cache)) { 118 for (; i <= map->max_register; i += map->reg_stride) { 119 /* Skip unprinted registers, closing off cache entry */ 120 if (!regmap_printable(map, i)) { 121 if (c) { 122 c->max = p - 1; 123 c->max_reg = i - map->reg_stride; 124 list_add_tail(&c->list, 125 &map->debugfs_off_cache); 126 c = NULL; 127 } 128 129 continue; 130 } 131 132 /* No cache entry? Start a new one */ 133 if (!c) { 134 c = kzalloc(sizeof(*c), GFP_KERNEL); 135 if (!c) { 136 regmap_debugfs_free_dump_cache(map); 137 mutex_unlock(&map->cache_lock); 138 return base; 139 } 140 c->min = p; 141 c->base_reg = i; 142 } 143 144 p += map->debugfs_tot_len; 145 } 146 } 147 148 /* Close the last entry off if we didn't scan beyond it */ 149 if (c) { 150 c->max = p - 1; 151 c->max_reg = i - map->reg_stride; 152 list_add_tail(&c->list, 153 &map->debugfs_off_cache); 154 } 155 156 /* 157 * This should never happen; we return above if we fail to 158 * allocate and we should never be in this code if there are 159 * no registers at all. 160 */ 161 WARN_ON(list_empty(&map->debugfs_off_cache)); 162 ret = base; 163 164 /* Find the relevant block:offset */ 165 list_for_each_entry(c, &map->debugfs_off_cache, list) { 166 if (from >= c->min && from <= c->max) { 167 fpos_offset = from - c->min; 168 reg_offset = fpos_offset / map->debugfs_tot_len; 169 *pos = c->min + (reg_offset * map->debugfs_tot_len); 170 mutex_unlock(&map->cache_lock); 171 return c->base_reg + (reg_offset * map->reg_stride); 172 } 173 174 *pos = c->max; 175 ret = c->max_reg; 176 } 177 mutex_unlock(&map->cache_lock); 178 179 return ret; 180 } 181 182 static inline void regmap_calc_tot_len(struct regmap *map, 183 void *buf, size_t count) 184 { 185 /* Calculate the length of a fixed format */ 186 if (!map->debugfs_tot_len) { 187 map->debugfs_reg_len = regmap_calc_reg_len(map->max_register), 188 map->debugfs_val_len = 2 * map->format.val_bytes; 189 map->debugfs_tot_len = map->debugfs_reg_len + 190 map->debugfs_val_len + 3; /* : \n */ 191 } 192 } 193 194 static int regmap_next_readable_reg(struct regmap *map, int reg) 195 { 196 struct regmap_debugfs_off_cache *c; 197 int ret = -EINVAL; 198 199 if (regmap_printable(map, reg + map->reg_stride)) { 200 ret = reg + map->reg_stride; 201 } else { 202 mutex_lock(&map->cache_lock); 203 list_for_each_entry(c, &map->debugfs_off_cache, list) { 204 if (reg > c->max_reg) 205 continue; 206 if (reg < c->base_reg) { 207 ret = c->base_reg; 208 break; 209 } 210 } 211 mutex_unlock(&map->cache_lock); 212 } 213 return ret; 214 } 215 216 static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from, 217 unsigned int to, char __user *user_buf, 218 size_t count, loff_t *ppos) 219 { 220 size_t buf_pos = 0; 221 loff_t p = *ppos; 222 ssize_t ret; 223 int i; 224 char *buf; 225 unsigned int val, start_reg; 226 227 if (*ppos < 0 || !count) 228 return -EINVAL; 229 230 if (count > (PAGE_SIZE << (MAX_ORDER - 1))) 231 count = PAGE_SIZE << (MAX_ORDER - 1); 232 233 buf = kmalloc(count, GFP_KERNEL); 234 if (!buf) 235 return -ENOMEM; 236 237 regmap_calc_tot_len(map, buf, count); 238 239 /* Work out which register we're starting at */ 240 start_reg = regmap_debugfs_get_dump_start(map, from, *ppos, &p); 241 242 for (i = start_reg; i >= 0 && i <= to; 243 i = regmap_next_readable_reg(map, i)) { 244 245 /* If we're in the region the user is trying to read */ 246 if (p >= *ppos) { 247 /* ...but not beyond it */ 248 if (buf_pos + map->debugfs_tot_len > count) 249 break; 250 251 /* Format the register */ 252 snprintf(buf + buf_pos, count - buf_pos, "%.*x: ", 253 map->debugfs_reg_len, i - from); 254 buf_pos += map->debugfs_reg_len + 2; 255 256 /* Format the value, write all X if we can't read */ 257 ret = regmap_read(map, i, &val); 258 if (ret == 0) 259 snprintf(buf + buf_pos, count - buf_pos, 260 "%.*x", map->debugfs_val_len, val); 261 else 262 memset(buf + buf_pos, 'X', 263 map->debugfs_val_len); 264 buf_pos += 2 * map->format.val_bytes; 265 266 buf[buf_pos++] = '\n'; 267 } 268 p += map->debugfs_tot_len; 269 } 270 271 ret = buf_pos; 272 273 if (copy_to_user(user_buf, buf, buf_pos)) { 274 ret = -EFAULT; 275 goto out; 276 } 277 278 *ppos += buf_pos; 279 280 out: 281 kfree(buf); 282 return ret; 283 } 284 285 static ssize_t regmap_map_read_file(struct file *file, char __user *user_buf, 286 size_t count, loff_t *ppos) 287 { 288 struct regmap *map = file->private_data; 289 290 return regmap_read_debugfs(map, 0, map->max_register, user_buf, 291 count, ppos); 292 } 293 294 #undef REGMAP_ALLOW_WRITE_DEBUGFS 295 #ifdef REGMAP_ALLOW_WRITE_DEBUGFS 296 /* 297 * This can be dangerous especially when we have clients such as 298 * PMICs, therefore don't provide any real compile time configuration option 299 * for this feature, people who want to use this will need to modify 300 * the source code directly. 301 */ 302 static ssize_t regmap_map_write_file(struct file *file, 303 const char __user *user_buf, 304 size_t count, loff_t *ppos) 305 { 306 char buf[32]; 307 size_t buf_size; 308 char *start = buf; 309 unsigned long reg, value; 310 struct regmap *map = file->private_data; 311 int ret; 312 313 buf_size = min(count, (sizeof(buf)-1)); 314 if (copy_from_user(buf, user_buf, buf_size)) 315 return -EFAULT; 316 buf[buf_size] = 0; 317 318 while (*start == ' ') 319 start++; 320 reg = simple_strtoul(start, &start, 16); 321 while (*start == ' ') 322 start++; 323 if (kstrtoul(start, 16, &value)) 324 return -EINVAL; 325 326 /* Userspace has been fiddling around behind the kernel's back */ 327 add_taint(TAINT_USER, LOCKDEP_STILL_OK); 328 329 ret = regmap_write(map, reg, value); 330 if (ret < 0) 331 return ret; 332 return buf_size; 333 } 334 #else 335 #define regmap_map_write_file NULL 336 #endif 337 338 static const struct file_operations regmap_map_fops = { 339 .open = simple_open, 340 .read = regmap_map_read_file, 341 .write = regmap_map_write_file, 342 .llseek = default_llseek, 343 }; 344 345 static ssize_t regmap_range_read_file(struct file *file, char __user *user_buf, 346 size_t count, loff_t *ppos) 347 { 348 struct regmap_range_node *range = file->private_data; 349 struct regmap *map = range->map; 350 351 return regmap_read_debugfs(map, range->range_min, range->range_max, 352 user_buf, count, ppos); 353 } 354 355 static const struct file_operations regmap_range_fops = { 356 .open = simple_open, 357 .read = regmap_range_read_file, 358 .llseek = default_llseek, 359 }; 360 361 static ssize_t regmap_reg_ranges_read_file(struct file *file, 362 char __user *user_buf, size_t count, 363 loff_t *ppos) 364 { 365 struct regmap *map = file->private_data; 366 struct regmap_debugfs_off_cache *c; 367 loff_t p = 0; 368 size_t buf_pos = 0; 369 char *buf; 370 char *entry; 371 int ret; 372 unsigned entry_len; 373 374 if (*ppos < 0 || !count) 375 return -EINVAL; 376 377 if (count > (PAGE_SIZE << (MAX_ORDER - 1))) 378 count = PAGE_SIZE << (MAX_ORDER - 1); 379 380 buf = kmalloc(count, GFP_KERNEL); 381 if (!buf) 382 return -ENOMEM; 383 384 entry = kmalloc(PAGE_SIZE, GFP_KERNEL); 385 if (!entry) { 386 kfree(buf); 387 return -ENOMEM; 388 } 389 390 /* While we are at it, build the register dump cache 391 * now so the read() operation on the `registers' file 392 * can benefit from using the cache. We do not care 393 * about the file position information that is contained 394 * in the cache, just about the actual register blocks */ 395 regmap_calc_tot_len(map, buf, count); 396 regmap_debugfs_get_dump_start(map, 0, *ppos, &p); 397 398 /* Reset file pointer as the fixed-format of the `registers' 399 * file is not compatible with the `range' file */ 400 p = 0; 401 mutex_lock(&map->cache_lock); 402 list_for_each_entry(c, &map->debugfs_off_cache, list) { 403 entry_len = snprintf(entry, PAGE_SIZE, "%x-%x\n", 404 c->base_reg, c->max_reg); 405 if (p >= *ppos) { 406 if (buf_pos + entry_len > count) 407 break; 408 memcpy(buf + buf_pos, entry, entry_len); 409 buf_pos += entry_len; 410 } 411 p += entry_len; 412 } 413 mutex_unlock(&map->cache_lock); 414 415 kfree(entry); 416 ret = buf_pos; 417 418 if (copy_to_user(user_buf, buf, buf_pos)) { 419 ret = -EFAULT; 420 goto out_buf; 421 } 422 423 *ppos += buf_pos; 424 out_buf: 425 kfree(buf); 426 return ret; 427 } 428 429 static const struct file_operations regmap_reg_ranges_fops = { 430 .open = simple_open, 431 .read = regmap_reg_ranges_read_file, 432 .llseek = default_llseek, 433 }; 434 435 static int regmap_access_show(struct seq_file *s, void *ignored) 436 { 437 struct regmap *map = s->private; 438 int i, reg_len; 439 440 reg_len = regmap_calc_reg_len(map->max_register); 441 442 for (i = 0; i <= map->max_register; i += map->reg_stride) { 443 /* Ignore registers which are neither readable nor writable */ 444 if (!regmap_readable(map, i) && !regmap_writeable(map, i)) 445 continue; 446 447 /* Format the register */ 448 seq_printf(s, "%.*x: %c %c %c %c\n", reg_len, i, 449 regmap_readable(map, i) ? 'y' : 'n', 450 regmap_writeable(map, i) ? 'y' : 'n', 451 regmap_volatile(map, i) ? 'y' : 'n', 452 regmap_precious(map, i) ? 'y' : 'n'); 453 } 454 455 return 0; 456 } 457 458 DEFINE_SHOW_ATTRIBUTE(regmap_access); 459 460 static ssize_t regmap_cache_only_write_file(struct file *file, 461 const char __user *user_buf, 462 size_t count, loff_t *ppos) 463 { 464 struct regmap *map = container_of(file->private_data, 465 struct regmap, cache_only); 466 bool new_val, require_sync = false; 467 int err; 468 469 err = kstrtobool_from_user(user_buf, count, &new_val); 470 /* Ignore malforned data like debugfs_write_file_bool() */ 471 if (err) 472 return count; 473 474 err = debugfs_file_get(file->f_path.dentry); 475 if (err) 476 return err; 477 478 map->lock(map->lock_arg); 479 480 if (new_val && !map->cache_only) { 481 dev_warn(map->dev, "debugfs cache_only=Y forced\n"); 482 add_taint(TAINT_USER, LOCKDEP_STILL_OK); 483 } else if (!new_val && map->cache_only) { 484 dev_warn(map->dev, "debugfs cache_only=N forced: syncing cache\n"); 485 require_sync = true; 486 } 487 map->cache_only = new_val; 488 489 map->unlock(map->lock_arg); 490 debugfs_file_put(file->f_path.dentry); 491 492 if (require_sync) { 493 err = regcache_sync(map); 494 if (err) 495 dev_err(map->dev, "Failed to sync cache %d\n", err); 496 } 497 498 return count; 499 } 500 501 static const struct file_operations regmap_cache_only_fops = { 502 .open = simple_open, 503 .read = debugfs_read_file_bool, 504 .write = regmap_cache_only_write_file, 505 }; 506 507 static ssize_t regmap_cache_bypass_write_file(struct file *file, 508 const char __user *user_buf, 509 size_t count, loff_t *ppos) 510 { 511 struct regmap *map = container_of(file->private_data, 512 struct regmap, cache_bypass); 513 bool new_val; 514 int err; 515 516 err = kstrtobool_from_user(user_buf, count, &new_val); 517 /* Ignore malforned data like debugfs_write_file_bool() */ 518 if (err) 519 return count; 520 521 err = debugfs_file_get(file->f_path.dentry); 522 if (err) 523 return err; 524 525 map->lock(map->lock_arg); 526 527 if (new_val && !map->cache_bypass) { 528 dev_warn(map->dev, "debugfs cache_bypass=Y forced\n"); 529 add_taint(TAINT_USER, LOCKDEP_STILL_OK); 530 } else if (!new_val && map->cache_bypass) { 531 dev_warn(map->dev, "debugfs cache_bypass=N forced\n"); 532 } 533 map->cache_bypass = new_val; 534 535 map->unlock(map->lock_arg); 536 debugfs_file_put(file->f_path.dentry); 537 538 return count; 539 } 540 541 static const struct file_operations regmap_cache_bypass_fops = { 542 .open = simple_open, 543 .read = debugfs_read_file_bool, 544 .write = regmap_cache_bypass_write_file, 545 }; 546 547 void regmap_debugfs_init(struct regmap *map, const char *name) 548 { 549 struct rb_node *next; 550 struct regmap_range_node *range_node; 551 const char *devname = "dummy"; 552 553 /* 554 * Userspace can initiate reads from the hardware over debugfs. 555 * Normally internal regmap structures and buffers are protected with 556 * a mutex or a spinlock, but if the regmap owner decided to disable 557 * all locking mechanisms, this is no longer the case. For safety: 558 * don't create the debugfs entries if locking is disabled. 559 */ 560 if (map->debugfs_disable) { 561 dev_dbg(map->dev, "regmap locking disabled - not creating debugfs entries\n"); 562 return; 563 } 564 565 /* If we don't have the debugfs root yet, postpone init */ 566 if (!regmap_debugfs_root) { 567 struct regmap_debugfs_node *node; 568 node = kzalloc(sizeof(*node), GFP_KERNEL); 569 if (!node) 570 return; 571 node->map = map; 572 node->name = name; 573 mutex_lock(®map_debugfs_early_lock); 574 list_add(&node->link, ®map_debugfs_early_list); 575 mutex_unlock(®map_debugfs_early_lock); 576 return; 577 } 578 579 INIT_LIST_HEAD(&map->debugfs_off_cache); 580 mutex_init(&map->cache_lock); 581 582 if (map->dev) 583 devname = dev_name(map->dev); 584 585 if (name) { 586 map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s", 587 devname, name); 588 name = map->debugfs_name; 589 } else { 590 name = devname; 591 } 592 593 if (!strcmp(name, "dummy")) { 594 kfree(map->debugfs_name); 595 596 map->debugfs_name = kasprintf(GFP_KERNEL, "dummy%d", 597 dummy_index); 598 name = map->debugfs_name; 599 dummy_index++; 600 } 601 602 map->debugfs = debugfs_create_dir(name, regmap_debugfs_root); 603 604 debugfs_create_file("name", 0400, map->debugfs, 605 map, ®map_name_fops); 606 607 debugfs_create_file("range", 0400, map->debugfs, 608 map, ®map_reg_ranges_fops); 609 610 if (map->max_register || regmap_readable(map, 0)) { 611 umode_t registers_mode; 612 613 #if defined(REGMAP_ALLOW_WRITE_DEBUGFS) 614 registers_mode = 0600; 615 #else 616 registers_mode = 0400; 617 #endif 618 619 debugfs_create_file("registers", registers_mode, map->debugfs, 620 map, ®map_map_fops); 621 debugfs_create_file("access", 0400, map->debugfs, 622 map, ®map_access_fops); 623 } 624 625 if (map->cache_type) { 626 debugfs_create_file("cache_only", 0600, map->debugfs, 627 &map->cache_only, ®map_cache_only_fops); 628 debugfs_create_bool("cache_dirty", 0400, map->debugfs, 629 &map->cache_dirty); 630 debugfs_create_file("cache_bypass", 0600, map->debugfs, 631 &map->cache_bypass, 632 ®map_cache_bypass_fops); 633 } 634 635 next = rb_first(&map->range_tree); 636 while (next) { 637 range_node = rb_entry(next, struct regmap_range_node, node); 638 639 if (range_node->name) 640 debugfs_create_file(range_node->name, 0400, 641 map->debugfs, range_node, 642 ®map_range_fops); 643 644 next = rb_next(&range_node->node); 645 } 646 647 if (map->cache_ops && map->cache_ops->debugfs_init) 648 map->cache_ops->debugfs_init(map); 649 } 650 651 void regmap_debugfs_exit(struct regmap *map) 652 { 653 if (map->debugfs) { 654 debugfs_remove_recursive(map->debugfs); 655 mutex_lock(&map->cache_lock); 656 regmap_debugfs_free_dump_cache(map); 657 mutex_unlock(&map->cache_lock); 658 kfree(map->debugfs_name); 659 } else { 660 struct regmap_debugfs_node *node, *tmp; 661 662 mutex_lock(®map_debugfs_early_lock); 663 list_for_each_entry_safe(node, tmp, ®map_debugfs_early_list, 664 link) { 665 if (node->map == map) { 666 list_del(&node->link); 667 kfree(node); 668 } 669 } 670 mutex_unlock(®map_debugfs_early_lock); 671 } 672 } 673 674 void regmap_debugfs_initcall(void) 675 { 676 struct regmap_debugfs_node *node, *tmp; 677 678 regmap_debugfs_root = debugfs_create_dir("regmap", NULL); 679 680 mutex_lock(®map_debugfs_early_lock); 681 list_for_each_entry_safe(node, tmp, ®map_debugfs_early_list, link) { 682 regmap_debugfs_init(node->map, node->name); 683 list_del(&node->link); 684 kfree(node); 685 } 686 mutex_unlock(®map_debugfs_early_lock); 687 } 688