1 /* 2 * SuperH clock framework 3 * 4 * Copyright (C) 2005 - 2010 Paul Mundt 5 * 6 * This clock framework is derived from the OMAP version by: 7 * 8 * Copyright (C) 2004 - 2008 Nokia Corporation 9 * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com> 10 * 11 * Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com> 12 * 13 * This file is subject to the terms and conditions of the GNU General Public 14 * License. See the file "COPYING" in the main directory of this archive 15 * for more details. 16 */ 17 #define pr_fmt(fmt) "clock: " fmt 18 19 #include <linux/kernel.h> 20 #include <linux/init.h> 21 #include <linux/module.h> 22 #include <linux/mutex.h> 23 #include <linux/list.h> 24 #include <linux/sysdev.h> 25 #include <linux/seq_file.h> 26 #include <linux/err.h> 27 #include <linux/io.h> 28 #include <linux/debugfs.h> 29 #include <linux/cpufreq.h> 30 #include <linux/clk.h> 31 #include <linux/sh_clk.h> 32 33 static LIST_HEAD(clock_list); 34 static DEFINE_SPINLOCK(clock_lock); 35 static DEFINE_MUTEX(clock_list_sem); 36 37 void clk_rate_table_build(struct clk *clk, 38 struct cpufreq_frequency_table *freq_table, 39 int nr_freqs, 40 struct clk_div_mult_table *src_table, 41 unsigned long *bitmap) 42 { 43 unsigned long mult, div; 44 unsigned long freq; 45 int i; 46 47 clk->nr_freqs = nr_freqs; 48 49 for (i = 0; i < nr_freqs; i++) { 50 div = 1; 51 mult = 1; 52 53 if (src_table->divisors && i < src_table->nr_divisors) 54 div = src_table->divisors[i]; 55 56 if (src_table->multipliers && i < src_table->nr_multipliers) 57 mult = src_table->multipliers[i]; 58 59 if (!div || !mult || (bitmap && !test_bit(i, bitmap))) 60 freq = CPUFREQ_ENTRY_INVALID; 61 else 62 freq = clk->parent->rate * mult / div; 63 64 freq_table[i].index = i; 65 freq_table[i].frequency = freq; 66 } 67 68 /* Termination entry */ 69 freq_table[i].index = i; 70 freq_table[i].frequency = CPUFREQ_TABLE_END; 71 } 72 73 struct clk_rate_round_data; 74 75 struct clk_rate_round_data { 76 unsigned long rate; 77 unsigned int min, max; 78 long (*func)(unsigned int, struct clk_rate_round_data *); 79 void *arg; 80 }; 81 82 #define for_each_frequency(pos, r, freq) \ 83 for (pos = r->min, freq = r->func(pos, r); \ 84 pos <= r->max; pos++, freq = r->func(pos, r)) \ 85 if (unlikely(freq == 0)) \ 86 ; \ 87 else 88 89 static long clk_rate_round_helper(struct clk_rate_round_data *rounder) 90 { 91 unsigned long rate_error, rate_error_prev = ~0UL; 92 unsigned long highest, lowest, freq; 93 long rate_best_fit = -ENOENT; 94 int i; 95 96 highest = 0; 97 lowest = ~0UL; 98 99 for_each_frequency(i, rounder, freq) { 100 if (freq > highest) 101 highest = freq; 102 if (freq < lowest) 103 lowest = freq; 104 105 rate_error = abs(freq - rounder->rate); 106 if (rate_error < rate_error_prev) { 107 rate_best_fit = freq; 108 rate_error_prev = rate_error; 109 } 110 111 if (rate_error == 0) 112 break; 113 } 114 115 if (rounder->rate >= highest) 116 rate_best_fit = highest; 117 if (rounder->rate <= lowest) 118 rate_best_fit = lowest; 119 120 return rate_best_fit; 121 } 122 123 static long clk_rate_table_iter(unsigned int pos, 124 struct clk_rate_round_data *rounder) 125 { 126 struct cpufreq_frequency_table *freq_table = rounder->arg; 127 unsigned long freq = freq_table[pos].frequency; 128 129 if (freq == CPUFREQ_ENTRY_INVALID) 130 freq = 0; 131 132 return freq; 133 } 134 135 long clk_rate_table_round(struct clk *clk, 136 struct cpufreq_frequency_table *freq_table, 137 unsigned long rate) 138 { 139 struct clk_rate_round_data table_round = { 140 .min = 0, 141 .max = clk->nr_freqs - 1, 142 .func = clk_rate_table_iter, 143 .arg = freq_table, 144 .rate = rate, 145 }; 146 147 if (clk->nr_freqs < 1) 148 return -ENOSYS; 149 150 return clk_rate_round_helper(&table_round); 151 } 152 153 static long clk_rate_div_range_iter(unsigned int pos, 154 struct clk_rate_round_data *rounder) 155 { 156 return clk_get_rate(rounder->arg) / pos; 157 } 158 159 long clk_rate_div_range_round(struct clk *clk, unsigned int div_min, 160 unsigned int div_max, unsigned long rate) 161 { 162 struct clk_rate_round_data div_range_round = { 163 .min = div_min, 164 .max = div_max, 165 .func = clk_rate_div_range_iter, 166 .arg = clk_get_parent(clk), 167 .rate = rate, 168 }; 169 170 return clk_rate_round_helper(&div_range_round); 171 } 172 173 int clk_rate_table_find(struct clk *clk, 174 struct cpufreq_frequency_table *freq_table, 175 unsigned long rate) 176 { 177 int i; 178 179 for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) { 180 unsigned long freq = freq_table[i].frequency; 181 182 if (freq == CPUFREQ_ENTRY_INVALID) 183 continue; 184 185 if (freq == rate) 186 return i; 187 } 188 189 return -ENOENT; 190 } 191 192 /* Used for clocks that always have same value as the parent clock */ 193 unsigned long followparent_recalc(struct clk *clk) 194 { 195 return clk->parent ? clk->parent->rate : 0; 196 } 197 198 int clk_reparent(struct clk *child, struct clk *parent) 199 { 200 list_del_init(&child->sibling); 201 if (parent) 202 list_add(&child->sibling, &parent->children); 203 child->parent = parent; 204 205 /* now do the debugfs renaming to reattach the child 206 to the proper parent */ 207 208 return 0; 209 } 210 211 /* Propagate rate to children */ 212 void propagate_rate(struct clk *tclk) 213 { 214 struct clk *clkp; 215 216 list_for_each_entry(clkp, &tclk->children, sibling) { 217 if (clkp->ops && clkp->ops->recalc) 218 clkp->rate = clkp->ops->recalc(clkp); 219 220 propagate_rate(clkp); 221 } 222 } 223 224 static void __clk_disable(struct clk *clk) 225 { 226 if (WARN(!clk->usecount, "Trying to disable clock %p with 0 usecount\n", 227 clk)) 228 return; 229 230 if (!(--clk->usecount)) { 231 if (likely(clk->ops && clk->ops->disable)) 232 clk->ops->disable(clk); 233 if (likely(clk->parent)) 234 __clk_disable(clk->parent); 235 } 236 } 237 238 void clk_disable(struct clk *clk) 239 { 240 unsigned long flags; 241 242 if (!clk) 243 return; 244 245 spin_lock_irqsave(&clock_lock, flags); 246 __clk_disable(clk); 247 spin_unlock_irqrestore(&clock_lock, flags); 248 } 249 EXPORT_SYMBOL_GPL(clk_disable); 250 251 static int __clk_enable(struct clk *clk) 252 { 253 int ret = 0; 254 255 if (clk->usecount++ == 0) { 256 if (clk->parent) { 257 ret = __clk_enable(clk->parent); 258 if (unlikely(ret)) 259 goto err; 260 } 261 262 if (clk->ops && clk->ops->enable) { 263 ret = clk->ops->enable(clk); 264 if (ret) { 265 if (clk->parent) 266 __clk_disable(clk->parent); 267 goto err; 268 } 269 } 270 } 271 272 return ret; 273 err: 274 clk->usecount--; 275 return ret; 276 } 277 278 int clk_enable(struct clk *clk) 279 { 280 unsigned long flags; 281 int ret; 282 283 if (!clk) 284 return -EINVAL; 285 286 spin_lock_irqsave(&clock_lock, flags); 287 ret = __clk_enable(clk); 288 spin_unlock_irqrestore(&clock_lock, flags); 289 290 return ret; 291 } 292 EXPORT_SYMBOL_GPL(clk_enable); 293 294 static LIST_HEAD(root_clks); 295 296 /** 297 * recalculate_root_clocks - recalculate and propagate all root clocks 298 * 299 * Recalculates all root clocks (clocks with no parent), which if the 300 * clock's .recalc is set correctly, should also propagate their rates. 301 * Called at init. 302 */ 303 void recalculate_root_clocks(void) 304 { 305 struct clk *clkp; 306 307 list_for_each_entry(clkp, &root_clks, sibling) { 308 if (clkp->ops && clkp->ops->recalc) 309 clkp->rate = clkp->ops->recalc(clkp); 310 propagate_rate(clkp); 311 } 312 } 313 314 static struct clk_mapping dummy_mapping; 315 316 static struct clk *lookup_root_clock(struct clk *clk) 317 { 318 while (clk->parent) 319 clk = clk->parent; 320 321 return clk; 322 } 323 324 static int clk_establish_mapping(struct clk *clk) 325 { 326 struct clk_mapping *mapping = clk->mapping; 327 328 /* 329 * Propagate mappings. 330 */ 331 if (!mapping) { 332 struct clk *clkp; 333 334 /* 335 * dummy mapping for root clocks with no specified ranges 336 */ 337 if (!clk->parent) { 338 clk->mapping = &dummy_mapping; 339 return 0; 340 } 341 342 /* 343 * If we're on a child clock and it provides no mapping of its 344 * own, inherit the mapping from its root clock. 345 */ 346 clkp = lookup_root_clock(clk); 347 mapping = clkp->mapping; 348 BUG_ON(!mapping); 349 } 350 351 /* 352 * Establish initial mapping. 353 */ 354 if (!mapping->base && mapping->phys) { 355 kref_init(&mapping->ref); 356 357 mapping->base = ioremap_nocache(mapping->phys, mapping->len); 358 if (unlikely(!mapping->base)) 359 return -ENXIO; 360 } else if (mapping->base) { 361 /* 362 * Bump the refcount for an existing mapping 363 */ 364 kref_get(&mapping->ref); 365 } 366 367 clk->mapping = mapping; 368 return 0; 369 } 370 371 static void clk_destroy_mapping(struct kref *kref) 372 { 373 struct clk_mapping *mapping; 374 375 mapping = container_of(kref, struct clk_mapping, ref); 376 377 iounmap(mapping->base); 378 } 379 380 static void clk_teardown_mapping(struct clk *clk) 381 { 382 struct clk_mapping *mapping = clk->mapping; 383 384 /* Nothing to do */ 385 if (mapping == &dummy_mapping) 386 return; 387 388 kref_put(&mapping->ref, clk_destroy_mapping); 389 clk->mapping = NULL; 390 } 391 392 int clk_register(struct clk *clk) 393 { 394 int ret; 395 396 if (clk == NULL || IS_ERR(clk)) 397 return -EINVAL; 398 399 /* 400 * trap out already registered clocks 401 */ 402 if (clk->node.next || clk->node.prev) 403 return 0; 404 405 mutex_lock(&clock_list_sem); 406 407 INIT_LIST_HEAD(&clk->children); 408 clk->usecount = 0; 409 410 ret = clk_establish_mapping(clk); 411 if (unlikely(ret)) 412 goto out_unlock; 413 414 if (clk->parent) 415 list_add(&clk->sibling, &clk->parent->children); 416 else 417 list_add(&clk->sibling, &root_clks); 418 419 list_add(&clk->node, &clock_list); 420 421 #ifdef CONFIG_SH_CLK_CPG_LEGACY 422 if (clk->ops && clk->ops->init) 423 clk->ops->init(clk); 424 #endif 425 426 out_unlock: 427 mutex_unlock(&clock_list_sem); 428 429 return ret; 430 } 431 EXPORT_SYMBOL_GPL(clk_register); 432 433 void clk_unregister(struct clk *clk) 434 { 435 mutex_lock(&clock_list_sem); 436 list_del(&clk->sibling); 437 list_del(&clk->node); 438 clk_teardown_mapping(clk); 439 mutex_unlock(&clock_list_sem); 440 } 441 EXPORT_SYMBOL_GPL(clk_unregister); 442 443 void clk_enable_init_clocks(void) 444 { 445 struct clk *clkp; 446 447 list_for_each_entry(clkp, &clock_list, node) 448 if (clkp->flags & CLK_ENABLE_ON_INIT) 449 clk_enable(clkp); 450 } 451 452 unsigned long clk_get_rate(struct clk *clk) 453 { 454 return clk->rate; 455 } 456 EXPORT_SYMBOL_GPL(clk_get_rate); 457 458 int clk_set_rate(struct clk *clk, unsigned long rate) 459 { 460 int ret = -EOPNOTSUPP; 461 unsigned long flags; 462 463 spin_lock_irqsave(&clock_lock, flags); 464 465 if (likely(clk->ops && clk->ops->set_rate)) { 466 ret = clk->ops->set_rate(clk, rate); 467 if (ret != 0) 468 goto out_unlock; 469 } else { 470 clk->rate = rate; 471 ret = 0; 472 } 473 474 if (clk->ops && clk->ops->recalc) 475 clk->rate = clk->ops->recalc(clk); 476 477 propagate_rate(clk); 478 479 out_unlock: 480 spin_unlock_irqrestore(&clock_lock, flags); 481 482 return ret; 483 } 484 EXPORT_SYMBOL_GPL(clk_set_rate); 485 486 int clk_set_parent(struct clk *clk, struct clk *parent) 487 { 488 unsigned long flags; 489 int ret = -EINVAL; 490 491 if (!parent || !clk) 492 return ret; 493 if (clk->parent == parent) 494 return 0; 495 496 spin_lock_irqsave(&clock_lock, flags); 497 if (clk->usecount == 0) { 498 if (clk->ops->set_parent) 499 ret = clk->ops->set_parent(clk, parent); 500 else 501 ret = clk_reparent(clk, parent); 502 503 if (ret == 0) { 504 if (clk->ops->recalc) 505 clk->rate = clk->ops->recalc(clk); 506 pr_debug("set parent of %p to %p (new rate %ld)\n", 507 clk, clk->parent, clk->rate); 508 propagate_rate(clk); 509 } 510 } else 511 ret = -EBUSY; 512 spin_unlock_irqrestore(&clock_lock, flags); 513 514 return ret; 515 } 516 EXPORT_SYMBOL_GPL(clk_set_parent); 517 518 struct clk *clk_get_parent(struct clk *clk) 519 { 520 return clk->parent; 521 } 522 EXPORT_SYMBOL_GPL(clk_get_parent); 523 524 long clk_round_rate(struct clk *clk, unsigned long rate) 525 { 526 if (likely(clk->ops && clk->ops->round_rate)) { 527 unsigned long flags, rounded; 528 529 spin_lock_irqsave(&clock_lock, flags); 530 rounded = clk->ops->round_rate(clk, rate); 531 spin_unlock_irqrestore(&clock_lock, flags); 532 533 return rounded; 534 } 535 536 return clk_get_rate(clk); 537 } 538 EXPORT_SYMBOL_GPL(clk_round_rate); 539 540 long clk_round_parent(struct clk *clk, unsigned long target, 541 unsigned long *best_freq, unsigned long *parent_freq, 542 unsigned int div_min, unsigned int div_max) 543 { 544 struct cpufreq_frequency_table *freq, *best = NULL; 545 unsigned long error = ULONG_MAX, freq_high, freq_low, div; 546 struct clk *parent = clk_get_parent(clk); 547 548 if (!parent) { 549 *parent_freq = 0; 550 *best_freq = clk_round_rate(clk, target); 551 return abs(target - *best_freq); 552 } 553 554 for (freq = parent->freq_table; freq->frequency != CPUFREQ_TABLE_END; 555 freq++) { 556 if (freq->frequency == CPUFREQ_ENTRY_INVALID) 557 continue; 558 559 if (unlikely(freq->frequency / target <= div_min - 1)) { 560 unsigned long freq_max; 561 562 freq_max = (freq->frequency + div_min / 2) / div_min; 563 if (error > target - freq_max) { 564 error = target - freq_max; 565 best = freq; 566 if (best_freq) 567 *best_freq = freq_max; 568 } 569 570 pr_debug("too low freq %u, error %lu\n", freq->frequency, 571 target - freq_max); 572 573 if (!error) 574 break; 575 576 continue; 577 } 578 579 if (unlikely(freq->frequency / target >= div_max)) { 580 unsigned long freq_min; 581 582 freq_min = (freq->frequency + div_max / 2) / div_max; 583 if (error > freq_min - target) { 584 error = freq_min - target; 585 best = freq; 586 if (best_freq) 587 *best_freq = freq_min; 588 } 589 590 pr_debug("too high freq %u, error %lu\n", freq->frequency, 591 freq_min - target); 592 593 if (!error) 594 break; 595 596 continue; 597 } 598 599 div = freq->frequency / target; 600 freq_high = freq->frequency / div; 601 freq_low = freq->frequency / (div + 1); 602 603 if (freq_high - target < error) { 604 error = freq_high - target; 605 best = freq; 606 if (best_freq) 607 *best_freq = freq_high; 608 } 609 610 if (target - freq_low < error) { 611 error = target - freq_low; 612 best = freq; 613 if (best_freq) 614 *best_freq = freq_low; 615 } 616 617 pr_debug("%u / %lu = %lu, / %lu = %lu, best %lu, parent %u\n", 618 freq->frequency, div, freq_high, div + 1, freq_low, 619 *best_freq, best->frequency); 620 621 if (!error) 622 break; 623 } 624 625 if (parent_freq) 626 *parent_freq = best->frequency; 627 628 return error; 629 } 630 EXPORT_SYMBOL_GPL(clk_round_parent); 631 632 #ifdef CONFIG_PM 633 static int clks_sysdev_suspend(struct sys_device *dev, pm_message_t state) 634 { 635 static pm_message_t prev_state; 636 struct clk *clkp; 637 638 switch (state.event) { 639 case PM_EVENT_ON: 640 /* Resumeing from hibernation */ 641 if (prev_state.event != PM_EVENT_FREEZE) 642 break; 643 644 list_for_each_entry(clkp, &clock_list, node) { 645 if (likely(clkp->ops)) { 646 unsigned long rate = clkp->rate; 647 648 if (likely(clkp->ops->set_parent)) 649 clkp->ops->set_parent(clkp, 650 clkp->parent); 651 if (likely(clkp->ops->set_rate)) 652 clkp->ops->set_rate(clkp, rate); 653 else if (likely(clkp->ops->recalc)) 654 clkp->rate = clkp->ops->recalc(clkp); 655 } 656 } 657 break; 658 case PM_EVENT_FREEZE: 659 break; 660 case PM_EVENT_SUSPEND: 661 break; 662 } 663 664 prev_state = state; 665 return 0; 666 } 667 668 static int clks_sysdev_resume(struct sys_device *dev) 669 { 670 return clks_sysdev_suspend(dev, PMSG_ON); 671 } 672 673 static struct sysdev_class clks_sysdev_class = { 674 .name = "clks", 675 }; 676 677 static struct sysdev_driver clks_sysdev_driver = { 678 .suspend = clks_sysdev_suspend, 679 .resume = clks_sysdev_resume, 680 }; 681 682 static struct sys_device clks_sysdev_dev = { 683 .cls = &clks_sysdev_class, 684 }; 685 686 static int __init clk_sysdev_init(void) 687 { 688 sysdev_class_register(&clks_sysdev_class); 689 sysdev_driver_register(&clks_sysdev_class, &clks_sysdev_driver); 690 sysdev_register(&clks_sysdev_dev); 691 692 return 0; 693 } 694 subsys_initcall(clk_sysdev_init); 695 #endif 696 697 /* 698 * debugfs support to trace clock tree hierarchy and attributes 699 */ 700 static struct dentry *clk_debugfs_root; 701 702 static int clk_debugfs_register_one(struct clk *c) 703 { 704 int err; 705 struct dentry *d, *child, *child_tmp; 706 struct clk *pa = c->parent; 707 char s[255]; 708 char *p = s; 709 710 p += sprintf(p, "%p", c); 711 d = debugfs_create_dir(s, pa ? pa->dentry : clk_debugfs_root); 712 if (!d) 713 return -ENOMEM; 714 c->dentry = d; 715 716 d = debugfs_create_u8("usecount", S_IRUGO, c->dentry, (u8 *)&c->usecount); 717 if (!d) { 718 err = -ENOMEM; 719 goto err_out; 720 } 721 d = debugfs_create_u32("rate", S_IRUGO, c->dentry, (u32 *)&c->rate); 722 if (!d) { 723 err = -ENOMEM; 724 goto err_out; 725 } 726 d = debugfs_create_x32("flags", S_IRUGO, c->dentry, (u32 *)&c->flags); 727 if (!d) { 728 err = -ENOMEM; 729 goto err_out; 730 } 731 return 0; 732 733 err_out: 734 d = c->dentry; 735 list_for_each_entry_safe(child, child_tmp, &d->d_subdirs, d_u.d_child) 736 debugfs_remove(child); 737 debugfs_remove(c->dentry); 738 return err; 739 } 740 741 static int clk_debugfs_register(struct clk *c) 742 { 743 int err; 744 struct clk *pa = c->parent; 745 746 if (pa && !pa->dentry) { 747 err = clk_debugfs_register(pa); 748 if (err) 749 return err; 750 } 751 752 if (!c->dentry) { 753 err = clk_debugfs_register_one(c); 754 if (err) 755 return err; 756 } 757 return 0; 758 } 759 760 static int __init clk_debugfs_init(void) 761 { 762 struct clk *c; 763 struct dentry *d; 764 int err; 765 766 d = debugfs_create_dir("clock", NULL); 767 if (!d) 768 return -ENOMEM; 769 clk_debugfs_root = d; 770 771 list_for_each_entry(c, &clock_list, node) { 772 err = clk_debugfs_register(c); 773 if (err) 774 goto err_out; 775 } 776 return 0; 777 err_out: 778 debugfs_remove_recursive(clk_debugfs_root); 779 return err; 780 } 781 late_initcall(clk_debugfs_init); 782