1 /* 2 * linux/include/linux/clk.h 3 * 4 * Copyright (C) 2004 ARM Limited. 5 * Written by Deep Blue Solutions Limited. 6 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 */ 12 #ifndef __LINUX_CLK_H 13 #define __LINUX_CLK_H 14 15 #include <linux/err.h> 16 #include <linux/kernel.h> 17 #include <linux/notifier.h> 18 19 struct device; 20 struct clk; 21 struct device_node; 22 struct of_phandle_args; 23 24 /** 25 * DOC: clk notifier callback types 26 * 27 * PRE_RATE_CHANGE - called immediately before the clk rate is changed, 28 * to indicate that the rate change will proceed. Drivers must 29 * immediately terminate any operations that will be affected by the 30 * rate change. Callbacks may either return NOTIFY_DONE, NOTIFY_OK, 31 * NOTIFY_STOP or NOTIFY_BAD. 32 * 33 * ABORT_RATE_CHANGE: called if the rate change failed for some reason 34 * after PRE_RATE_CHANGE. In this case, all registered notifiers on 35 * the clk will be called with ABORT_RATE_CHANGE. Callbacks must 36 * always return NOTIFY_DONE or NOTIFY_OK. 37 * 38 * POST_RATE_CHANGE - called after the clk rate change has successfully 39 * completed. Callbacks must always return NOTIFY_DONE or NOTIFY_OK. 40 * 41 */ 42 #define PRE_RATE_CHANGE BIT(0) 43 #define POST_RATE_CHANGE BIT(1) 44 #define ABORT_RATE_CHANGE BIT(2) 45 46 /** 47 * struct clk_notifier - associate a clk with a notifier 48 * @clk: struct clk * to associate the notifier with 49 * @notifier_head: a blocking_notifier_head for this clk 50 * @node: linked list pointers 51 * 52 * A list of struct clk_notifier is maintained by the notifier code. 53 * An entry is created whenever code registers the first notifier on a 54 * particular @clk. Future notifiers on that @clk are added to the 55 * @notifier_head. 56 */ 57 struct clk_notifier { 58 struct clk *clk; 59 struct srcu_notifier_head notifier_head; 60 struct list_head node; 61 }; 62 63 /** 64 * struct clk_notifier_data - rate data to pass to the notifier callback 65 * @clk: struct clk * being changed 66 * @old_rate: previous rate of this clk 67 * @new_rate: new rate of this clk 68 * 69 * For a pre-notifier, old_rate is the clk's rate before this rate 70 * change, and new_rate is what the rate will be in the future. For a 71 * post-notifier, old_rate and new_rate are both set to the clk's 72 * current rate (this was done to optimize the implementation). 73 */ 74 struct clk_notifier_data { 75 struct clk *clk; 76 unsigned long old_rate; 77 unsigned long new_rate; 78 }; 79 80 /** 81 * struct clk_bulk_data - Data used for bulk clk operations. 82 * 83 * @id: clock consumer ID 84 * @clk: struct clk * to store the associated clock 85 * 86 * The CLK APIs provide a series of clk_bulk_() API calls as 87 * a convenience to consumers which require multiple clks. This 88 * structure is used to manage data for these calls. 89 */ 90 struct clk_bulk_data { 91 const char *id; 92 struct clk *clk; 93 }; 94 95 #ifdef CONFIG_COMMON_CLK 96 97 /** 98 * clk_notifier_register: register a clock rate-change notifier callback 99 * @clk: clock whose rate we are interested in 100 * @nb: notifier block with callback function pointer 101 * 102 * ProTip: debugging across notifier chains can be frustrating. Make sure that 103 * your notifier callback function prints a nice big warning in case of 104 * failure. 105 */ 106 int clk_notifier_register(struct clk *clk, struct notifier_block *nb); 107 108 /** 109 * clk_notifier_unregister: unregister a clock rate-change notifier callback 110 * @clk: clock whose rate we are no longer interested in 111 * @nb: notifier block which will be unregistered 112 */ 113 int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb); 114 115 /** 116 * clk_get_accuracy - obtain the clock accuracy in ppb (parts per billion) 117 * for a clock source. 118 * @clk: clock source 119 * 120 * This gets the clock source accuracy expressed in ppb. 121 * A perfect clock returns 0. 122 */ 123 long clk_get_accuracy(struct clk *clk); 124 125 /** 126 * clk_set_phase - adjust the phase shift of a clock signal 127 * @clk: clock signal source 128 * @degrees: number of degrees the signal is shifted 129 * 130 * Shifts the phase of a clock signal by the specified degrees. Returns 0 on 131 * success, -EERROR otherwise. 132 */ 133 int clk_set_phase(struct clk *clk, int degrees); 134 135 /** 136 * clk_get_phase - return the phase shift of a clock signal 137 * @clk: clock signal source 138 * 139 * Returns the phase shift of a clock node in degrees, otherwise returns 140 * -EERROR. 141 */ 142 int clk_get_phase(struct clk *clk); 143 144 /** 145 * clk_set_duty_cycle - adjust the duty cycle ratio of a clock signal 146 * @clk: clock signal source 147 * @num: numerator of the duty cycle ratio to be applied 148 * @den: denominator of the duty cycle ratio to be applied 149 * 150 * Adjust the duty cycle of a clock signal by the specified ratio. Returns 0 on 151 * success, -EERROR otherwise. 152 */ 153 int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den); 154 155 /** 156 * clk_get_duty_cycle - return the duty cycle ratio of a clock signal 157 * @clk: clock signal source 158 * @scale: scaling factor to be applied to represent the ratio as an integer 159 * 160 * Returns the duty cycle ratio multiplied by the scale provided, otherwise 161 * returns -EERROR. 162 */ 163 int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale); 164 165 /** 166 * clk_is_match - check if two clk's point to the same hardware clock 167 * @p: clk compared against q 168 * @q: clk compared against p 169 * 170 * Returns true if the two struct clk pointers both point to the same hardware 171 * clock node. Put differently, returns true if @p and @q 172 * share the same &struct clk_core object. 173 * 174 * Returns false otherwise. Note that two NULL clks are treated as matching. 175 */ 176 bool clk_is_match(const struct clk *p, const struct clk *q); 177 178 #else 179 180 static inline int clk_notifier_register(struct clk *clk, 181 struct notifier_block *nb) 182 { 183 return -ENOTSUPP; 184 } 185 186 static inline int clk_notifier_unregister(struct clk *clk, 187 struct notifier_block *nb) 188 { 189 return -ENOTSUPP; 190 } 191 192 static inline long clk_get_accuracy(struct clk *clk) 193 { 194 return -ENOTSUPP; 195 } 196 197 static inline long clk_set_phase(struct clk *clk, int phase) 198 { 199 return -ENOTSUPP; 200 } 201 202 static inline long clk_get_phase(struct clk *clk) 203 { 204 return -ENOTSUPP; 205 } 206 207 static inline int clk_set_duty_cycle(struct clk *clk, unsigned int num, 208 unsigned int den) 209 { 210 return -ENOTSUPP; 211 } 212 213 static inline unsigned int clk_get_scaled_duty_cycle(struct clk *clk, 214 unsigned int scale) 215 { 216 return 0; 217 } 218 219 static inline bool clk_is_match(const struct clk *p, const struct clk *q) 220 { 221 return p == q; 222 } 223 224 #endif 225 226 /** 227 * clk_prepare - prepare a clock source 228 * @clk: clock source 229 * 230 * This prepares the clock source for use. 231 * 232 * Must not be called from within atomic context. 233 */ 234 #ifdef CONFIG_HAVE_CLK_PREPARE 235 int clk_prepare(struct clk *clk); 236 int __must_check clk_bulk_prepare(int num_clks, 237 const struct clk_bulk_data *clks); 238 #else 239 static inline int clk_prepare(struct clk *clk) 240 { 241 might_sleep(); 242 return 0; 243 } 244 245 static inline int __must_check clk_bulk_prepare(int num_clks, struct clk_bulk_data *clks) 246 { 247 might_sleep(); 248 return 0; 249 } 250 #endif 251 252 /** 253 * clk_unprepare - undo preparation of a clock source 254 * @clk: clock source 255 * 256 * This undoes a previously prepared clock. The caller must balance 257 * the number of prepare and unprepare calls. 258 * 259 * Must not be called from within atomic context. 260 */ 261 #ifdef CONFIG_HAVE_CLK_PREPARE 262 void clk_unprepare(struct clk *clk); 263 void clk_bulk_unprepare(int num_clks, const struct clk_bulk_data *clks); 264 #else 265 static inline void clk_unprepare(struct clk *clk) 266 { 267 might_sleep(); 268 } 269 static inline void clk_bulk_unprepare(int num_clks, struct clk_bulk_data *clks) 270 { 271 might_sleep(); 272 } 273 #endif 274 275 #ifdef CONFIG_HAVE_CLK 276 /** 277 * clk_get - lookup and obtain a reference to a clock producer. 278 * @dev: device for clock "consumer" 279 * @id: clock consumer ID 280 * 281 * Returns a struct clk corresponding to the clock producer, or 282 * valid IS_ERR() condition containing errno. The implementation 283 * uses @dev and @id to determine the clock consumer, and thereby 284 * the clock producer. (IOW, @id may be identical strings, but 285 * clk_get may return different clock producers depending on @dev.) 286 * 287 * Drivers must assume that the clock source is not enabled. 288 * 289 * clk_get should not be called from within interrupt context. 290 */ 291 struct clk *clk_get(struct device *dev, const char *id); 292 293 /** 294 * clk_bulk_get - lookup and obtain a number of references to clock producer. 295 * @dev: device for clock "consumer" 296 * @num_clks: the number of clk_bulk_data 297 * @clks: the clk_bulk_data table of consumer 298 * 299 * This helper function allows drivers to get several clk consumers in one 300 * operation. If any of the clk cannot be acquired then any clks 301 * that were obtained will be freed before returning to the caller. 302 * 303 * Returns 0 if all clocks specified in clk_bulk_data table are obtained 304 * successfully, or valid IS_ERR() condition containing errno. 305 * The implementation uses @dev and @clk_bulk_data.id to determine the 306 * clock consumer, and thereby the clock producer. 307 * The clock returned is stored in each @clk_bulk_data.clk field. 308 * 309 * Drivers must assume that the clock source is not enabled. 310 * 311 * clk_bulk_get should not be called from within interrupt context. 312 */ 313 int __must_check clk_bulk_get(struct device *dev, int num_clks, 314 struct clk_bulk_data *clks); 315 /** 316 * clk_bulk_get_all - lookup and obtain all available references to clock 317 * producer. 318 * @dev: device for clock "consumer" 319 * @clks: pointer to the clk_bulk_data table of consumer 320 * 321 * This helper function allows drivers to get all clk consumers in one 322 * operation. If any of the clk cannot be acquired then any clks 323 * that were obtained will be freed before returning to the caller. 324 * 325 * Returns a positive value for the number of clocks obtained while the 326 * clock references are stored in the clk_bulk_data table in @clks field. 327 * Returns 0 if there're none and a negative value if something failed. 328 * 329 * Drivers must assume that the clock source is not enabled. 330 * 331 * clk_bulk_get should not be called from within interrupt context. 332 */ 333 int __must_check clk_bulk_get_all(struct device *dev, 334 struct clk_bulk_data **clks); 335 /** 336 * devm_clk_bulk_get - managed get multiple clk consumers 337 * @dev: device for clock "consumer" 338 * @num_clks: the number of clk_bulk_data 339 * @clks: the clk_bulk_data table of consumer 340 * 341 * Return 0 on success, an errno on failure. 342 * 343 * This helper function allows drivers to get several clk 344 * consumers in one operation with management, the clks will 345 * automatically be freed when the device is unbound. 346 */ 347 int __must_check devm_clk_bulk_get(struct device *dev, int num_clks, 348 struct clk_bulk_data *clks); 349 /** 350 * devm_clk_bulk_get_all - managed get multiple clk consumers 351 * @dev: device for clock "consumer" 352 * @clks: pointer to the clk_bulk_data table of consumer 353 * 354 * Returns a positive value for the number of clocks obtained while the 355 * clock references are stored in the clk_bulk_data table in @clks field. 356 * Returns 0 if there're none and a negative value if something failed. 357 * 358 * This helper function allows drivers to get several clk 359 * consumers in one operation with management, the clks will 360 * automatically be freed when the device is unbound. 361 */ 362 363 int __must_check devm_clk_bulk_get_all(struct device *dev, 364 struct clk_bulk_data **clks); 365 366 /** 367 * devm_clk_get - lookup and obtain a managed reference to a clock producer. 368 * @dev: device for clock "consumer" 369 * @id: clock consumer ID 370 * 371 * Returns a struct clk corresponding to the clock producer, or 372 * valid IS_ERR() condition containing errno. The implementation 373 * uses @dev and @id to determine the clock consumer, and thereby 374 * the clock producer. (IOW, @id may be identical strings, but 375 * clk_get may return different clock producers depending on @dev.) 376 * 377 * Drivers must assume that the clock source is not enabled. 378 * 379 * devm_clk_get should not be called from within interrupt context. 380 * 381 * The clock will automatically be freed when the device is unbound 382 * from the bus. 383 */ 384 struct clk *devm_clk_get(struct device *dev, const char *id); 385 386 /** 387 * devm_get_clk_from_child - lookup and obtain a managed reference to a 388 * clock producer from child node. 389 * @dev: device for clock "consumer" 390 * @np: pointer to clock consumer node 391 * @con_id: clock consumer ID 392 * 393 * This function parses the clocks, and uses them to look up the 394 * struct clk from the registered list of clock providers by using 395 * @np and @con_id 396 * 397 * The clock will automatically be freed when the device is unbound 398 * from the bus. 399 */ 400 struct clk *devm_get_clk_from_child(struct device *dev, 401 struct device_node *np, const char *con_id); 402 /** 403 * clk_rate_exclusive_get - get exclusivity over the rate control of a 404 * producer 405 * @clk: clock source 406 * 407 * This function allows drivers to get exclusive control over the rate of a 408 * provider. It prevents any other consumer to execute, even indirectly, 409 * opereation which could alter the rate of the provider or cause glitches 410 * 411 * If exlusivity is claimed more than once on clock, even by the same driver, 412 * the rate effectively gets locked as exclusivity can't be preempted. 413 * 414 * Must not be called from within atomic context. 415 * 416 * Returns success (0) or negative errno. 417 */ 418 int clk_rate_exclusive_get(struct clk *clk); 419 420 /** 421 * clk_rate_exclusive_put - release exclusivity over the rate control of a 422 * producer 423 * @clk: clock source 424 * 425 * This function allows drivers to release the exclusivity it previously got 426 * from clk_rate_exclusive_get() 427 * 428 * The caller must balance the number of clk_rate_exclusive_get() and 429 * clk_rate_exclusive_put() calls. 430 * 431 * Must not be called from within atomic context. 432 */ 433 void clk_rate_exclusive_put(struct clk *clk); 434 435 /** 436 * clk_enable - inform the system when the clock source should be running. 437 * @clk: clock source 438 * 439 * If the clock can not be enabled/disabled, this should return success. 440 * 441 * May be called from atomic contexts. 442 * 443 * Returns success (0) or negative errno. 444 */ 445 int clk_enable(struct clk *clk); 446 447 /** 448 * clk_bulk_enable - inform the system when the set of clks should be running. 449 * @num_clks: the number of clk_bulk_data 450 * @clks: the clk_bulk_data table of consumer 451 * 452 * May be called from atomic contexts. 453 * 454 * Returns success (0) or negative errno. 455 */ 456 int __must_check clk_bulk_enable(int num_clks, 457 const struct clk_bulk_data *clks); 458 459 /** 460 * clk_disable - inform the system when the clock source is no longer required. 461 * @clk: clock source 462 * 463 * Inform the system that a clock source is no longer required by 464 * a driver and may be shut down. 465 * 466 * May be called from atomic contexts. 467 * 468 * Implementation detail: if the clock source is shared between 469 * multiple drivers, clk_enable() calls must be balanced by the 470 * same number of clk_disable() calls for the clock source to be 471 * disabled. 472 */ 473 void clk_disable(struct clk *clk); 474 475 /** 476 * clk_bulk_disable - inform the system when the set of clks is no 477 * longer required. 478 * @num_clks: the number of clk_bulk_data 479 * @clks: the clk_bulk_data table of consumer 480 * 481 * Inform the system that a set of clks is no longer required by 482 * a driver and may be shut down. 483 * 484 * May be called from atomic contexts. 485 * 486 * Implementation detail: if the set of clks is shared between 487 * multiple drivers, clk_bulk_enable() calls must be balanced by the 488 * same number of clk_bulk_disable() calls for the clock source to be 489 * disabled. 490 */ 491 void clk_bulk_disable(int num_clks, const struct clk_bulk_data *clks); 492 493 /** 494 * clk_get_rate - obtain the current clock rate (in Hz) for a clock source. 495 * This is only valid once the clock source has been enabled. 496 * @clk: clock source 497 */ 498 unsigned long clk_get_rate(struct clk *clk); 499 500 /** 501 * clk_put - "free" the clock source 502 * @clk: clock source 503 * 504 * Note: drivers must ensure that all clk_enable calls made on this 505 * clock source are balanced by clk_disable calls prior to calling 506 * this function. 507 * 508 * clk_put should not be called from within interrupt context. 509 */ 510 void clk_put(struct clk *clk); 511 512 /** 513 * clk_bulk_put - "free" the clock source 514 * @num_clks: the number of clk_bulk_data 515 * @clks: the clk_bulk_data table of consumer 516 * 517 * Note: drivers must ensure that all clk_bulk_enable calls made on this 518 * clock source are balanced by clk_bulk_disable calls prior to calling 519 * this function. 520 * 521 * clk_bulk_put should not be called from within interrupt context. 522 */ 523 void clk_bulk_put(int num_clks, struct clk_bulk_data *clks); 524 525 /** 526 * clk_bulk_put_all - "free" all the clock source 527 * @num_clks: the number of clk_bulk_data 528 * @clks: the clk_bulk_data table of consumer 529 * 530 * Note: drivers must ensure that all clk_bulk_enable calls made on this 531 * clock source are balanced by clk_bulk_disable calls prior to calling 532 * this function. 533 * 534 * clk_bulk_put_all should not be called from within interrupt context. 535 */ 536 void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks); 537 538 /** 539 * devm_clk_put - "free" a managed clock source 540 * @dev: device used to acquire the clock 541 * @clk: clock source acquired with devm_clk_get() 542 * 543 * Note: drivers must ensure that all clk_enable calls made on this 544 * clock source are balanced by clk_disable calls prior to calling 545 * this function. 546 * 547 * clk_put should not be called from within interrupt context. 548 */ 549 void devm_clk_put(struct device *dev, struct clk *clk); 550 551 /* 552 * The remaining APIs are optional for machine class support. 553 */ 554 555 556 /** 557 * clk_round_rate - adjust a rate to the exact rate a clock can provide 558 * @clk: clock source 559 * @rate: desired clock rate in Hz 560 * 561 * This answers the question "if I were to pass @rate to clk_set_rate(), 562 * what clock rate would I end up with?" without changing the hardware 563 * in any way. In other words: 564 * 565 * rate = clk_round_rate(clk, r); 566 * 567 * and: 568 * 569 * clk_set_rate(clk, r); 570 * rate = clk_get_rate(clk); 571 * 572 * are equivalent except the former does not modify the clock hardware 573 * in any way. 574 * 575 * Returns rounded clock rate in Hz, or negative errno. 576 */ 577 long clk_round_rate(struct clk *clk, unsigned long rate); 578 579 /** 580 * clk_set_rate - set the clock rate for a clock source 581 * @clk: clock source 582 * @rate: desired clock rate in Hz 583 * 584 * Returns success (0) or negative errno. 585 */ 586 int clk_set_rate(struct clk *clk, unsigned long rate); 587 588 /** 589 * clk_set_rate_exclusive- set the clock rate and claim exclusivity over 590 * clock source 591 * @clk: clock source 592 * @rate: desired clock rate in Hz 593 * 594 * This helper function allows drivers to atomically set the rate of a producer 595 * and claim exclusivity over the rate control of the producer. 596 * 597 * It is essentially a combination of clk_set_rate() and 598 * clk_rate_exclusite_get(). Caller must balance this call with a call to 599 * clk_rate_exclusive_put() 600 * 601 * Returns success (0) or negative errno. 602 */ 603 int clk_set_rate_exclusive(struct clk *clk, unsigned long rate); 604 605 /** 606 * clk_has_parent - check if a clock is a possible parent for another 607 * @clk: clock source 608 * @parent: parent clock source 609 * 610 * This function can be used in drivers that need to check that a clock can be 611 * the parent of another without actually changing the parent. 612 * 613 * Returns true if @parent is a possible parent for @clk, false otherwise. 614 */ 615 bool clk_has_parent(struct clk *clk, struct clk *parent); 616 617 /** 618 * clk_set_rate_range - set a rate range for a clock source 619 * @clk: clock source 620 * @min: desired minimum clock rate in Hz, inclusive 621 * @max: desired maximum clock rate in Hz, inclusive 622 * 623 * Returns success (0) or negative errno. 624 */ 625 int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max); 626 627 /** 628 * clk_set_min_rate - set a minimum clock rate for a clock source 629 * @clk: clock source 630 * @rate: desired minimum clock rate in Hz, inclusive 631 * 632 * Returns success (0) or negative errno. 633 */ 634 int clk_set_min_rate(struct clk *clk, unsigned long rate); 635 636 /** 637 * clk_set_max_rate - set a maximum clock rate for a clock source 638 * @clk: clock source 639 * @rate: desired maximum clock rate in Hz, inclusive 640 * 641 * Returns success (0) or negative errno. 642 */ 643 int clk_set_max_rate(struct clk *clk, unsigned long rate); 644 645 /** 646 * clk_set_parent - set the parent clock source for this clock 647 * @clk: clock source 648 * @parent: parent clock source 649 * 650 * Returns success (0) or negative errno. 651 */ 652 int clk_set_parent(struct clk *clk, struct clk *parent); 653 654 /** 655 * clk_get_parent - get the parent clock source for this clock 656 * @clk: clock source 657 * 658 * Returns struct clk corresponding to parent clock source, or 659 * valid IS_ERR() condition containing errno. 660 */ 661 struct clk *clk_get_parent(struct clk *clk); 662 663 /** 664 * clk_get_sys - get a clock based upon the device name 665 * @dev_id: device name 666 * @con_id: connection ID 667 * 668 * Returns a struct clk corresponding to the clock producer, or 669 * valid IS_ERR() condition containing errno. The implementation 670 * uses @dev_id and @con_id to determine the clock consumer, and 671 * thereby the clock producer. In contrast to clk_get() this function 672 * takes the device name instead of the device itself for identification. 673 * 674 * Drivers must assume that the clock source is not enabled. 675 * 676 * clk_get_sys should not be called from within interrupt context. 677 */ 678 struct clk *clk_get_sys(const char *dev_id, const char *con_id); 679 680 /** 681 * clk_save_context - save clock context for poweroff 682 * 683 * Saves the context of the clock register for powerstates in which the 684 * contents of the registers will be lost. Occurs deep within the suspend 685 * code so locking is not necessary. 686 */ 687 int clk_save_context(void); 688 689 /** 690 * clk_restore_context - restore clock context after poweroff 691 * 692 * This occurs with all clocks enabled. Occurs deep within the resume code 693 * so locking is not necessary. 694 */ 695 void clk_restore_context(void); 696 697 #else /* !CONFIG_HAVE_CLK */ 698 699 static inline struct clk *clk_get(struct device *dev, const char *id) 700 { 701 return NULL; 702 } 703 704 static inline int __must_check clk_bulk_get(struct device *dev, int num_clks, 705 struct clk_bulk_data *clks) 706 { 707 return 0; 708 } 709 710 static inline int __must_check clk_bulk_get_all(struct device *dev, 711 struct clk_bulk_data **clks) 712 { 713 return 0; 714 } 715 716 static inline struct clk *devm_clk_get(struct device *dev, const char *id) 717 { 718 return NULL; 719 } 720 721 static inline int __must_check devm_clk_bulk_get(struct device *dev, int num_clks, 722 struct clk_bulk_data *clks) 723 { 724 return 0; 725 } 726 727 static inline int __must_check devm_clk_bulk_get_all(struct device *dev, 728 struct clk_bulk_data **clks) 729 { 730 731 return 0; 732 } 733 734 static inline struct clk *devm_get_clk_from_child(struct device *dev, 735 struct device_node *np, const char *con_id) 736 { 737 return NULL; 738 } 739 740 static inline void clk_put(struct clk *clk) {} 741 742 static inline void clk_bulk_put(int num_clks, struct clk_bulk_data *clks) {} 743 744 static inline void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks) {} 745 746 static inline void devm_clk_put(struct device *dev, struct clk *clk) {} 747 748 749 static inline int clk_rate_exclusive_get(struct clk *clk) 750 { 751 return 0; 752 } 753 754 static inline void clk_rate_exclusive_put(struct clk *clk) {} 755 756 static inline int clk_enable(struct clk *clk) 757 { 758 return 0; 759 } 760 761 static inline int __must_check clk_bulk_enable(int num_clks, struct clk_bulk_data *clks) 762 { 763 return 0; 764 } 765 766 static inline void clk_disable(struct clk *clk) {} 767 768 769 static inline void clk_bulk_disable(int num_clks, 770 struct clk_bulk_data *clks) {} 771 772 static inline unsigned long clk_get_rate(struct clk *clk) 773 { 774 return 0; 775 } 776 777 static inline int clk_set_rate(struct clk *clk, unsigned long rate) 778 { 779 return 0; 780 } 781 782 static inline int clk_set_rate_exclusive(struct clk *clk, unsigned long rate) 783 { 784 return 0; 785 } 786 787 static inline long clk_round_rate(struct clk *clk, unsigned long rate) 788 { 789 return 0; 790 } 791 792 static inline bool clk_has_parent(struct clk *clk, struct clk *parent) 793 { 794 return true; 795 } 796 797 static inline int clk_set_parent(struct clk *clk, struct clk *parent) 798 { 799 return 0; 800 } 801 802 static inline struct clk *clk_get_parent(struct clk *clk) 803 { 804 return NULL; 805 } 806 807 static inline struct clk *clk_get_sys(const char *dev_id, const char *con_id) 808 { 809 return NULL; 810 } 811 812 static inline int clk_save_context(void) 813 { 814 return 0; 815 } 816 817 static inline void clk_restore_context(void) {} 818 819 #endif 820 821 /* clk_prepare_enable helps cases using clk_enable in non-atomic context. */ 822 static inline int clk_prepare_enable(struct clk *clk) 823 { 824 int ret; 825 826 ret = clk_prepare(clk); 827 if (ret) 828 return ret; 829 ret = clk_enable(clk); 830 if (ret) 831 clk_unprepare(clk); 832 833 return ret; 834 } 835 836 /* clk_disable_unprepare helps cases using clk_disable in non-atomic context. */ 837 static inline void clk_disable_unprepare(struct clk *clk) 838 { 839 clk_disable(clk); 840 clk_unprepare(clk); 841 } 842 843 static inline int __must_check clk_bulk_prepare_enable(int num_clks, 844 struct clk_bulk_data *clks) 845 { 846 int ret; 847 848 ret = clk_bulk_prepare(num_clks, clks); 849 if (ret) 850 return ret; 851 ret = clk_bulk_enable(num_clks, clks); 852 if (ret) 853 clk_bulk_unprepare(num_clks, clks); 854 855 return ret; 856 } 857 858 static inline void clk_bulk_disable_unprepare(int num_clks, 859 struct clk_bulk_data *clks) 860 { 861 clk_bulk_disable(num_clks, clks); 862 clk_bulk_unprepare(num_clks, clks); 863 } 864 865 #if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK) 866 struct clk *of_clk_get(struct device_node *np, int index); 867 struct clk *of_clk_get_by_name(struct device_node *np, const char *name); 868 struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec); 869 #else 870 static inline struct clk *of_clk_get(struct device_node *np, int index) 871 { 872 return ERR_PTR(-ENOENT); 873 } 874 static inline struct clk *of_clk_get_by_name(struct device_node *np, 875 const char *name) 876 { 877 return ERR_PTR(-ENOENT); 878 } 879 static inline struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec) 880 { 881 return ERR_PTR(-ENOENT); 882 } 883 #endif 884 885 #endif 886