1 /* 2 * Register map access API - debugfs 3 * 4 * Copyright 2011 Wolfson Microelectronics plc 5 * 6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 */ 12 13 #include <linux/slab.h> 14 #include <linux/mutex.h> 15 #include <linux/debugfs.h> 16 #include <linux/uaccess.h> 17 #include <linux/device.h> 18 #include <linux/list.h> 19 20 #include "internal.h" 21 22 struct regmap_debugfs_node { 23 struct regmap *map; 24 const char *name; 25 struct list_head link; 26 }; 27 28 static struct dentry *regmap_debugfs_root; 29 static LIST_HEAD(regmap_debugfs_early_list); 30 static DEFINE_MUTEX(regmap_debugfs_early_lock); 31 32 /* Calculate the length of a fixed format */ 33 static size_t regmap_calc_reg_len(int max_val) 34 { 35 return snprintf(NULL, 0, "%x", max_val); 36 } 37 38 static ssize_t regmap_name_read_file(struct file *file, 39 char __user *user_buf, size_t count, 40 loff_t *ppos) 41 { 42 struct regmap *map = file->private_data; 43 int ret; 44 char *buf; 45 46 buf = kmalloc(PAGE_SIZE, GFP_KERNEL); 47 if (!buf) 48 return -ENOMEM; 49 50 ret = snprintf(buf, PAGE_SIZE, "%s\n", map->dev->driver->name); 51 if (ret < 0) { 52 kfree(buf); 53 return ret; 54 } 55 56 ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); 57 kfree(buf); 58 return ret; 59 } 60 61 static const struct file_operations regmap_name_fops = { 62 .open = simple_open, 63 .read = regmap_name_read_file, 64 .llseek = default_llseek, 65 }; 66 67 static void regmap_debugfs_free_dump_cache(struct regmap *map) 68 { 69 struct regmap_debugfs_off_cache *c; 70 71 while (!list_empty(&map->debugfs_off_cache)) { 72 c = list_first_entry(&map->debugfs_off_cache, 73 struct regmap_debugfs_off_cache, 74 list); 75 list_del(&c->list); 76 kfree(c); 77 } 78 } 79 80 static bool regmap_printable(struct regmap *map, unsigned int reg) 81 { 82 if (regmap_precious(map, reg)) 83 return false; 84 85 if (!regmap_readable(map, reg) && !regmap_cached(map, reg)) 86 return false; 87 88 return true; 89 } 90 91 /* 92 * Work out where the start offset maps into register numbers, bearing 93 * in mind that we suppress hidden registers. 94 */ 95 static unsigned int regmap_debugfs_get_dump_start(struct regmap *map, 96 unsigned int base, 97 loff_t from, 98 loff_t *pos) 99 { 100 struct regmap_debugfs_off_cache *c = NULL; 101 loff_t p = 0; 102 unsigned int i, ret; 103 unsigned int fpos_offset; 104 unsigned int reg_offset; 105 106 /* Suppress the cache if we're using a subrange */ 107 if (base) 108 return base; 109 110 /* 111 * If we don't have a cache build one so we don't have to do a 112 * linear scan each time. 113 */ 114 mutex_lock(&map->cache_lock); 115 i = base; 116 if (list_empty(&map->debugfs_off_cache)) { 117 for (; i <= map->max_register; i += map->reg_stride) { 118 /* Skip unprinted registers, closing off cache entry */ 119 if (!regmap_printable(map, i)) { 120 if (c) { 121 c->max = p - 1; 122 c->max_reg = i - map->reg_stride; 123 list_add_tail(&c->list, 124 &map->debugfs_off_cache); 125 c = NULL; 126 } 127 128 continue; 129 } 130 131 /* No cache entry? Start a new one */ 132 if (!c) { 133 c = kzalloc(sizeof(*c), GFP_KERNEL); 134 if (!c) { 135 regmap_debugfs_free_dump_cache(map); 136 mutex_unlock(&map->cache_lock); 137 return base; 138 } 139 c->min = p; 140 c->base_reg = i; 141 } 142 143 p += map->debugfs_tot_len; 144 } 145 } 146 147 /* Close the last entry off if we didn't scan beyond it */ 148 if (c) { 149 c->max = p - 1; 150 c->max_reg = i - map->reg_stride; 151 list_add_tail(&c->list, 152 &map->debugfs_off_cache); 153 } 154 155 /* 156 * This should never happen; we return above if we fail to 157 * allocate and we should never be in this code if there are 158 * no registers at all. 159 */ 160 WARN_ON(list_empty(&map->debugfs_off_cache)); 161 ret = base; 162 163 /* Find the relevant block:offset */ 164 list_for_each_entry(c, &map->debugfs_off_cache, list) { 165 if (from >= c->min && from <= c->max) { 166 fpos_offset = from - c->min; 167 reg_offset = fpos_offset / map->debugfs_tot_len; 168 *pos = c->min + (reg_offset * map->debugfs_tot_len); 169 mutex_unlock(&map->cache_lock); 170 return c->base_reg + (reg_offset * map->reg_stride); 171 } 172 173 *pos = c->max; 174 ret = c->max_reg; 175 } 176 mutex_unlock(&map->cache_lock); 177 178 return ret; 179 } 180 181 static inline void regmap_calc_tot_len(struct regmap *map, 182 void *buf, size_t count) 183 { 184 /* Calculate the length of a fixed format */ 185 if (!map->debugfs_tot_len) { 186 map->debugfs_reg_len = regmap_calc_reg_len(map->max_register), 187 map->debugfs_val_len = 2 * map->format.val_bytes; 188 map->debugfs_tot_len = map->debugfs_reg_len + 189 map->debugfs_val_len + 3; /* : \n */ 190 } 191 } 192 193 static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from, 194 unsigned int to, char __user *user_buf, 195 size_t count, loff_t *ppos) 196 { 197 size_t buf_pos = 0; 198 loff_t p = *ppos; 199 ssize_t ret; 200 int i; 201 char *buf; 202 unsigned int val, start_reg; 203 204 if (*ppos < 0 || !count) 205 return -EINVAL; 206 207 buf = kmalloc(count, GFP_KERNEL); 208 if (!buf) 209 return -ENOMEM; 210 211 regmap_calc_tot_len(map, buf, count); 212 213 /* Work out which register we're starting at */ 214 start_reg = regmap_debugfs_get_dump_start(map, from, *ppos, &p); 215 216 for (i = start_reg; i <= to; i += map->reg_stride) { 217 if (!regmap_readable(map, i) && !regmap_cached(map, i)) 218 continue; 219 220 if (regmap_precious(map, i)) 221 continue; 222 223 /* If we're in the region the user is trying to read */ 224 if (p >= *ppos) { 225 /* ...but not beyond it */ 226 if (buf_pos + map->debugfs_tot_len > count) 227 break; 228 229 /* Format the register */ 230 snprintf(buf + buf_pos, count - buf_pos, "%.*x: ", 231 map->debugfs_reg_len, i - from); 232 buf_pos += map->debugfs_reg_len + 2; 233 234 /* Format the value, write all X if we can't read */ 235 ret = regmap_read(map, i, &val); 236 if (ret == 0) 237 snprintf(buf + buf_pos, count - buf_pos, 238 "%.*x", map->debugfs_val_len, val); 239 else 240 memset(buf + buf_pos, 'X', 241 map->debugfs_val_len); 242 buf_pos += 2 * map->format.val_bytes; 243 244 buf[buf_pos++] = '\n'; 245 } 246 p += map->debugfs_tot_len; 247 } 248 249 ret = buf_pos; 250 251 if (copy_to_user(user_buf, buf, buf_pos)) { 252 ret = -EFAULT; 253 goto out; 254 } 255 256 *ppos += buf_pos; 257 258 out: 259 kfree(buf); 260 return ret; 261 } 262 263 static ssize_t regmap_map_read_file(struct file *file, char __user *user_buf, 264 size_t count, loff_t *ppos) 265 { 266 struct regmap *map = file->private_data; 267 268 return regmap_read_debugfs(map, 0, map->max_register, user_buf, 269 count, ppos); 270 } 271 272 #undef REGMAP_ALLOW_WRITE_DEBUGFS 273 #ifdef REGMAP_ALLOW_WRITE_DEBUGFS 274 /* 275 * This can be dangerous especially when we have clients such as 276 * PMICs, therefore don't provide any real compile time configuration option 277 * for this feature, people who want to use this will need to modify 278 * the source code directly. 279 */ 280 static ssize_t regmap_map_write_file(struct file *file, 281 const char __user *user_buf, 282 size_t count, loff_t *ppos) 283 { 284 char buf[32]; 285 size_t buf_size; 286 char *start = buf; 287 unsigned long reg, value; 288 struct regmap *map = file->private_data; 289 int ret; 290 291 buf_size = min(count, (sizeof(buf)-1)); 292 if (copy_from_user(buf, user_buf, buf_size)) 293 return -EFAULT; 294 buf[buf_size] = 0; 295 296 while (*start == ' ') 297 start++; 298 reg = simple_strtoul(start, &start, 16); 299 while (*start == ' ') 300 start++; 301 if (kstrtoul(start, 16, &value)) 302 return -EINVAL; 303 304 /* Userspace has been fiddling around behind the kernel's back */ 305 add_taint(TAINT_USER, LOCKDEP_STILL_OK); 306 307 ret = regmap_write(map, reg, value); 308 if (ret < 0) 309 return ret; 310 return buf_size; 311 } 312 #else 313 #define regmap_map_write_file NULL 314 #endif 315 316 static const struct file_operations regmap_map_fops = { 317 .open = simple_open, 318 .read = regmap_map_read_file, 319 .write = regmap_map_write_file, 320 .llseek = default_llseek, 321 }; 322 323 static ssize_t regmap_range_read_file(struct file *file, char __user *user_buf, 324 size_t count, loff_t *ppos) 325 { 326 struct regmap_range_node *range = file->private_data; 327 struct regmap *map = range->map; 328 329 return regmap_read_debugfs(map, range->range_min, range->range_max, 330 user_buf, count, ppos); 331 } 332 333 static const struct file_operations regmap_range_fops = { 334 .open = simple_open, 335 .read = regmap_range_read_file, 336 .llseek = default_llseek, 337 }; 338 339 static ssize_t regmap_reg_ranges_read_file(struct file *file, 340 char __user *user_buf, size_t count, 341 loff_t *ppos) 342 { 343 struct regmap *map = file->private_data; 344 struct regmap_debugfs_off_cache *c; 345 loff_t p = 0; 346 size_t buf_pos = 0; 347 char *buf; 348 char *entry; 349 int ret; 350 unsigned entry_len; 351 352 if (*ppos < 0 || !count) 353 return -EINVAL; 354 355 buf = kmalloc(count, GFP_KERNEL); 356 if (!buf) 357 return -ENOMEM; 358 359 entry = kmalloc(PAGE_SIZE, GFP_KERNEL); 360 if (!entry) { 361 kfree(buf); 362 return -ENOMEM; 363 } 364 365 /* While we are at it, build the register dump cache 366 * now so the read() operation on the `registers' file 367 * can benefit from using the cache. We do not care 368 * about the file position information that is contained 369 * in the cache, just about the actual register blocks */ 370 regmap_calc_tot_len(map, buf, count); 371 regmap_debugfs_get_dump_start(map, 0, *ppos, &p); 372 373 /* Reset file pointer as the fixed-format of the `registers' 374 * file is not compatible with the `range' file */ 375 p = 0; 376 mutex_lock(&map->cache_lock); 377 list_for_each_entry(c, &map->debugfs_off_cache, list) { 378 entry_len = snprintf(entry, PAGE_SIZE, "%x-%x\n", 379 c->base_reg, c->max_reg); 380 if (p >= *ppos) { 381 if (buf_pos + entry_len > count) 382 break; 383 memcpy(buf + buf_pos, entry, entry_len); 384 buf_pos += entry_len; 385 } 386 p += entry_len; 387 } 388 mutex_unlock(&map->cache_lock); 389 390 kfree(entry); 391 ret = buf_pos; 392 393 if (copy_to_user(user_buf, buf, buf_pos)) { 394 ret = -EFAULT; 395 goto out_buf; 396 } 397 398 *ppos += buf_pos; 399 out_buf: 400 kfree(buf); 401 return ret; 402 } 403 404 static const struct file_operations regmap_reg_ranges_fops = { 405 .open = simple_open, 406 .read = regmap_reg_ranges_read_file, 407 .llseek = default_llseek, 408 }; 409 410 static int regmap_access_show(struct seq_file *s, void *ignored) 411 { 412 struct regmap *map = s->private; 413 int i, reg_len; 414 415 reg_len = regmap_calc_reg_len(map->max_register); 416 417 for (i = 0; i <= map->max_register; i += map->reg_stride) { 418 /* Ignore registers which are neither readable nor writable */ 419 if (!regmap_readable(map, i) && !regmap_writeable(map, i)) 420 continue; 421 422 /* Format the register */ 423 seq_printf(s, "%.*x: %c %c %c %c\n", reg_len, i, 424 regmap_readable(map, i) ? 'y' : 'n', 425 regmap_writeable(map, i) ? 'y' : 'n', 426 regmap_volatile(map, i) ? 'y' : 'n', 427 regmap_precious(map, i) ? 'y' : 'n'); 428 } 429 430 return 0; 431 } 432 433 static int access_open(struct inode *inode, struct file *file) 434 { 435 return single_open(file, regmap_access_show, inode->i_private); 436 } 437 438 static const struct file_operations regmap_access_fops = { 439 .open = access_open, 440 .read = seq_read, 441 .llseek = seq_lseek, 442 .release = single_release, 443 }; 444 445 static ssize_t regmap_cache_only_write_file(struct file *file, 446 const char __user *user_buf, 447 size_t count, loff_t *ppos) 448 { 449 struct regmap *map = container_of(file->private_data, 450 struct regmap, cache_only); 451 ssize_t result; 452 bool was_enabled, require_sync = false; 453 int err; 454 455 map->lock(map->lock_arg); 456 457 was_enabled = map->cache_only; 458 459 result = debugfs_write_file_bool(file, user_buf, count, ppos); 460 if (result < 0) { 461 map->unlock(map->lock_arg); 462 return result; 463 } 464 465 if (map->cache_only && !was_enabled) { 466 dev_warn(map->dev, "debugfs cache_only=Y forced\n"); 467 add_taint(TAINT_USER, LOCKDEP_STILL_OK); 468 } else if (!map->cache_only && was_enabled) { 469 dev_warn(map->dev, "debugfs cache_only=N forced: syncing cache\n"); 470 require_sync = true; 471 } 472 473 map->unlock(map->lock_arg); 474 475 if (require_sync) { 476 err = regcache_sync(map); 477 if (err) 478 dev_err(map->dev, "Failed to sync cache %d\n", err); 479 } 480 481 return result; 482 } 483 484 static const struct file_operations regmap_cache_only_fops = { 485 .open = simple_open, 486 .read = debugfs_read_file_bool, 487 .write = regmap_cache_only_write_file, 488 }; 489 490 static ssize_t regmap_cache_bypass_write_file(struct file *file, 491 const char __user *user_buf, 492 size_t count, loff_t *ppos) 493 { 494 struct regmap *map = container_of(file->private_data, 495 struct regmap, cache_bypass); 496 ssize_t result; 497 bool was_enabled; 498 499 map->lock(map->lock_arg); 500 501 was_enabled = map->cache_bypass; 502 503 result = debugfs_write_file_bool(file, user_buf, count, ppos); 504 if (result < 0) 505 goto out; 506 507 if (map->cache_bypass && !was_enabled) { 508 dev_warn(map->dev, "debugfs cache_bypass=Y forced\n"); 509 add_taint(TAINT_USER, LOCKDEP_STILL_OK); 510 } else if (!map->cache_bypass && was_enabled) { 511 dev_warn(map->dev, "debugfs cache_bypass=N forced\n"); 512 } 513 514 out: 515 map->unlock(map->lock_arg); 516 517 return result; 518 } 519 520 static const struct file_operations regmap_cache_bypass_fops = { 521 .open = simple_open, 522 .read = debugfs_read_file_bool, 523 .write = regmap_cache_bypass_write_file, 524 }; 525 526 void regmap_debugfs_init(struct regmap *map, const char *name) 527 { 528 struct rb_node *next; 529 struct regmap_range_node *range_node; 530 const char *devname = "dummy"; 531 532 /* 533 * Userspace can initiate reads from the hardware over debugfs. 534 * Normally internal regmap structures and buffers are protected with 535 * a mutex or a spinlock, but if the regmap owner decided to disable 536 * all locking mechanisms, this is no longer the case. For safety: 537 * don't create the debugfs entries if locking is disabled. 538 */ 539 if (map->debugfs_disable) { 540 dev_dbg(map->dev, "regmap locking disabled - not creating debugfs entries\n"); 541 return; 542 } 543 544 /* If we don't have the debugfs root yet, postpone init */ 545 if (!regmap_debugfs_root) { 546 struct regmap_debugfs_node *node; 547 node = kzalloc(sizeof(*node), GFP_KERNEL); 548 if (!node) 549 return; 550 node->map = map; 551 node->name = name; 552 mutex_lock(®map_debugfs_early_lock); 553 list_add(&node->link, ®map_debugfs_early_list); 554 mutex_unlock(®map_debugfs_early_lock); 555 return; 556 } 557 558 INIT_LIST_HEAD(&map->debugfs_off_cache); 559 mutex_init(&map->cache_lock); 560 561 if (map->dev) 562 devname = dev_name(map->dev); 563 564 if (name) { 565 map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s", 566 devname, name); 567 name = map->debugfs_name; 568 } else { 569 name = devname; 570 } 571 572 map->debugfs = debugfs_create_dir(name, regmap_debugfs_root); 573 if (!map->debugfs) { 574 dev_warn(map->dev, "Failed to create debugfs directory\n"); 575 return; 576 } 577 578 debugfs_create_file("name", 0400, map->debugfs, 579 map, ®map_name_fops); 580 581 debugfs_create_file("range", 0400, map->debugfs, 582 map, ®map_reg_ranges_fops); 583 584 if (map->max_register || regmap_readable(map, 0)) { 585 umode_t registers_mode; 586 587 #if defined(REGMAP_ALLOW_WRITE_DEBUGFS) 588 registers_mode = 0600; 589 #else 590 registers_mode = 0400; 591 #endif 592 593 debugfs_create_file("registers", registers_mode, map->debugfs, 594 map, ®map_map_fops); 595 debugfs_create_file("access", 0400, map->debugfs, 596 map, ®map_access_fops); 597 } 598 599 if (map->cache_type) { 600 debugfs_create_file("cache_only", 0600, map->debugfs, 601 &map->cache_only, ®map_cache_only_fops); 602 debugfs_create_bool("cache_dirty", 0400, map->debugfs, 603 &map->cache_dirty); 604 debugfs_create_file("cache_bypass", 0600, map->debugfs, 605 &map->cache_bypass, 606 ®map_cache_bypass_fops); 607 } 608 609 next = rb_first(&map->range_tree); 610 while (next) { 611 range_node = rb_entry(next, struct regmap_range_node, node); 612 613 if (range_node->name) 614 debugfs_create_file(range_node->name, 0400, 615 map->debugfs, range_node, 616 ®map_range_fops); 617 618 next = rb_next(&range_node->node); 619 } 620 621 if (map->cache_ops && map->cache_ops->debugfs_init) 622 map->cache_ops->debugfs_init(map); 623 } 624 625 void regmap_debugfs_exit(struct regmap *map) 626 { 627 if (map->debugfs) { 628 debugfs_remove_recursive(map->debugfs); 629 mutex_lock(&map->cache_lock); 630 regmap_debugfs_free_dump_cache(map); 631 mutex_unlock(&map->cache_lock); 632 kfree(map->debugfs_name); 633 } else { 634 struct regmap_debugfs_node *node, *tmp; 635 636 mutex_lock(®map_debugfs_early_lock); 637 list_for_each_entry_safe(node, tmp, ®map_debugfs_early_list, 638 link) { 639 if (node->map == map) { 640 list_del(&node->link); 641 kfree(node); 642 } 643 } 644 mutex_unlock(®map_debugfs_early_lock); 645 } 646 } 647 648 void regmap_debugfs_initcall(void) 649 { 650 struct regmap_debugfs_node *node, *tmp; 651 652 regmap_debugfs_root = debugfs_create_dir("regmap", NULL); 653 if (!regmap_debugfs_root) { 654 pr_warn("regmap: Failed to create debugfs root\n"); 655 return; 656 } 657 658 mutex_lock(®map_debugfs_early_lock); 659 list_for_each_entry_safe(node, tmp, ®map_debugfs_early_list, link) { 660 regmap_debugfs_init(node->map, node->name); 661 list_del(&node->link); 662 kfree(node); 663 } 664 mutex_unlock(®map_debugfs_early_lock); 665 } 666