1 /* 2 * OMAP DPLL clock support 3 * 4 * Copyright (C) 2013 Texas Instruments, Inc. 5 * 6 * Tero Kristo <t-kristo@ti.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 * 12 * This program is distributed "as is" WITHOUT ANY WARRANTY of any 13 * kind, whether express or implied; without even the implied warranty 14 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 */ 17 18 #include <linux/clk.h> 19 #include <linux/clk-provider.h> 20 #include <linux/slab.h> 21 #include <linux/err.h> 22 #include <linux/of.h> 23 #include <linux/of_address.h> 24 #include <linux/clk/ti.h> 25 #include "clock.h" 26 27 #undef pr_fmt 28 #define pr_fmt(fmt) "%s: " fmt, __func__ 29 30 #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \ 31 defined(CONFIG_SOC_DRA7XX) 32 static const struct clk_ops dpll_m4xen_ck_ops = { 33 .enable = &omap3_noncore_dpll_enable, 34 .disable = &omap3_noncore_dpll_disable, 35 .recalc_rate = &omap4_dpll_regm4xen_recalc, 36 .round_rate = &omap4_dpll_regm4xen_round_rate, 37 .set_rate = &omap3_noncore_dpll_set_rate, 38 .set_parent = &omap3_noncore_dpll_set_parent, 39 .set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent, 40 .determine_rate = &omap4_dpll_regm4xen_determine_rate, 41 .get_parent = &omap2_init_dpll_parent, 42 }; 43 #else 44 static const struct clk_ops dpll_m4xen_ck_ops = {}; 45 #endif 46 47 #if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4) || \ 48 defined(CONFIG_SOC_OMAP5) || defined(CONFIG_SOC_DRA7XX) || \ 49 defined(CONFIG_SOC_AM33XX) || defined(CONFIG_SOC_AM43XX) 50 static const struct clk_ops dpll_core_ck_ops = { 51 .recalc_rate = &omap3_dpll_recalc, 52 .get_parent = &omap2_init_dpll_parent, 53 }; 54 55 static const struct clk_ops dpll_ck_ops = { 56 .enable = &omap3_noncore_dpll_enable, 57 .disable = &omap3_noncore_dpll_disable, 58 .recalc_rate = &omap3_dpll_recalc, 59 .round_rate = &omap2_dpll_round_rate, 60 .set_rate = &omap3_noncore_dpll_set_rate, 61 .set_parent = &omap3_noncore_dpll_set_parent, 62 .set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent, 63 .determine_rate = &omap3_noncore_dpll_determine_rate, 64 .get_parent = &omap2_init_dpll_parent, 65 }; 66 67 static const struct clk_ops dpll_no_gate_ck_ops = { 68 .recalc_rate = &omap3_dpll_recalc, 69 .get_parent = &omap2_init_dpll_parent, 70 .round_rate = &omap2_dpll_round_rate, 71 .set_rate = &omap3_noncore_dpll_set_rate, 72 .set_parent = &omap3_noncore_dpll_set_parent, 73 .set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent, 74 .determine_rate = &omap3_noncore_dpll_determine_rate, 75 }; 76 #else 77 static const struct clk_ops dpll_core_ck_ops = {}; 78 static const struct clk_ops dpll_ck_ops = {}; 79 static const struct clk_ops dpll_no_gate_ck_ops = {}; 80 const struct clk_hw_omap_ops clkhwops_omap3_dpll = {}; 81 #endif 82 83 #ifdef CONFIG_ARCH_OMAP2 84 static const struct clk_ops omap2_dpll_core_ck_ops = { 85 .get_parent = &omap2_init_dpll_parent, 86 .recalc_rate = &omap2_dpllcore_recalc, 87 .round_rate = &omap2_dpll_round_rate, 88 .set_rate = &omap2_reprogram_dpllcore, 89 }; 90 #else 91 static const struct clk_ops omap2_dpll_core_ck_ops = {}; 92 #endif 93 94 #ifdef CONFIG_ARCH_OMAP3 95 static const struct clk_ops omap3_dpll_core_ck_ops = { 96 .get_parent = &omap2_init_dpll_parent, 97 .recalc_rate = &omap3_dpll_recalc, 98 .round_rate = &omap2_dpll_round_rate, 99 }; 100 #else 101 static const struct clk_ops omap3_dpll_core_ck_ops = {}; 102 #endif 103 104 #ifdef CONFIG_ARCH_OMAP3 105 static const struct clk_ops omap3_dpll_ck_ops = { 106 .enable = &omap3_noncore_dpll_enable, 107 .disable = &omap3_noncore_dpll_disable, 108 .get_parent = &omap2_init_dpll_parent, 109 .recalc_rate = &omap3_dpll_recalc, 110 .set_rate = &omap3_noncore_dpll_set_rate, 111 .set_parent = &omap3_noncore_dpll_set_parent, 112 .set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent, 113 .determine_rate = &omap3_noncore_dpll_determine_rate, 114 .round_rate = &omap2_dpll_round_rate, 115 }; 116 117 static const struct clk_ops omap3_dpll5_ck_ops = { 118 .enable = &omap3_noncore_dpll_enable, 119 .disable = &omap3_noncore_dpll_disable, 120 .get_parent = &omap2_init_dpll_parent, 121 .recalc_rate = &omap3_dpll_recalc, 122 .set_rate = &omap3_dpll5_set_rate, 123 .set_parent = &omap3_noncore_dpll_set_parent, 124 .set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent, 125 .determine_rate = &omap3_noncore_dpll_determine_rate, 126 .round_rate = &omap2_dpll_round_rate, 127 }; 128 129 static const struct clk_ops omap3_dpll_per_ck_ops = { 130 .enable = &omap3_noncore_dpll_enable, 131 .disable = &omap3_noncore_dpll_disable, 132 .get_parent = &omap2_init_dpll_parent, 133 .recalc_rate = &omap3_dpll_recalc, 134 .set_rate = &omap3_dpll4_set_rate, 135 .set_parent = &omap3_noncore_dpll_set_parent, 136 .set_rate_and_parent = &omap3_dpll4_set_rate_and_parent, 137 .determine_rate = &omap3_noncore_dpll_determine_rate, 138 .round_rate = &omap2_dpll_round_rate, 139 }; 140 #endif 141 142 static const struct clk_ops dpll_x2_ck_ops = { 143 .recalc_rate = &omap3_clkoutx2_recalc, 144 }; 145 146 /** 147 * _register_dpll - low level registration of a DPLL clock 148 * @hw: hardware clock definition for the clock 149 * @node: device node for the clock 150 * 151 * Finalizes DPLL registration process. In case a failure (clk-ref or 152 * clk-bypass is missing), the clock is added to retry list and 153 * the initialization is retried on later stage. 154 */ 155 static void __init _register_dpll(struct clk_hw *hw, 156 struct device_node *node) 157 { 158 struct clk_hw_omap *clk_hw = to_clk_hw_omap(hw); 159 struct dpll_data *dd = clk_hw->dpll_data; 160 struct clk *clk; 161 162 clk = of_clk_get(node, 0); 163 if (IS_ERR(clk)) { 164 pr_debug("clk-ref missing for %s, retry later\n", 165 node->name); 166 if (!ti_clk_retry_init(node, hw, _register_dpll)) 167 return; 168 169 goto cleanup; 170 } 171 172 dd->clk_ref = __clk_get_hw(clk); 173 174 clk = of_clk_get(node, 1); 175 176 if (IS_ERR(clk)) { 177 pr_debug("clk-bypass missing for %s, retry later\n", 178 node->name); 179 if (!ti_clk_retry_init(node, hw, _register_dpll)) 180 return; 181 182 goto cleanup; 183 } 184 185 dd->clk_bypass = __clk_get_hw(clk); 186 187 /* register the clock */ 188 clk = clk_register(NULL, &clk_hw->hw); 189 190 if (!IS_ERR(clk)) { 191 omap2_init_clk_hw_omap_clocks(&clk_hw->hw); 192 of_clk_add_provider(node, of_clk_src_simple_get, clk); 193 kfree(clk_hw->hw.init->parent_names); 194 kfree(clk_hw->hw.init); 195 return; 196 } 197 198 cleanup: 199 kfree(clk_hw->dpll_data); 200 kfree(clk_hw->hw.init->parent_names); 201 kfree(clk_hw->hw.init); 202 kfree(clk_hw); 203 } 204 205 #if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_ATAGS) 206 static void __iomem *_get_reg(u8 module, u16 offset) 207 { 208 u32 reg; 209 struct clk_omap_reg *reg_setup; 210 211 reg_setup = (struct clk_omap_reg *)® 212 213 reg_setup->index = module; 214 reg_setup->offset = offset; 215 216 return (void __iomem *)reg; 217 } 218 219 struct clk *ti_clk_register_dpll(struct ti_clk *setup) 220 { 221 struct clk_hw_omap *clk_hw; 222 struct clk_init_data init = { NULL }; 223 struct dpll_data *dd; 224 struct clk *clk; 225 struct ti_clk_dpll *dpll; 226 const struct clk_ops *ops = &omap3_dpll_ck_ops; 227 struct clk *clk_ref; 228 struct clk *clk_bypass; 229 230 dpll = setup->data; 231 232 if (dpll->num_parents < 2) 233 return ERR_PTR(-EINVAL); 234 235 clk_ref = clk_get_sys(NULL, dpll->parents[0]); 236 clk_bypass = clk_get_sys(NULL, dpll->parents[1]); 237 238 if (IS_ERR_OR_NULL(clk_ref) || IS_ERR_OR_NULL(clk_bypass)) 239 return ERR_PTR(-EAGAIN); 240 241 dd = kzalloc(sizeof(*dd), GFP_KERNEL); 242 clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL); 243 if (!dd || !clk_hw) { 244 clk = ERR_PTR(-ENOMEM); 245 goto cleanup; 246 } 247 248 clk_hw->dpll_data = dd; 249 clk_hw->ops = &clkhwops_omap3_dpll; 250 clk_hw->hw.init = &init; 251 clk_hw->flags = MEMMAP_ADDRESSING; 252 253 init.name = setup->name; 254 init.ops = ops; 255 256 init.num_parents = dpll->num_parents; 257 init.parent_names = dpll->parents; 258 259 dd->control_reg = _get_reg(dpll->module, dpll->control_reg); 260 dd->idlest_reg = _get_reg(dpll->module, dpll->idlest_reg); 261 dd->mult_div1_reg = _get_reg(dpll->module, dpll->mult_div1_reg); 262 dd->autoidle_reg = _get_reg(dpll->module, dpll->autoidle_reg); 263 264 dd->modes = dpll->modes; 265 dd->div1_mask = dpll->div1_mask; 266 dd->idlest_mask = dpll->idlest_mask; 267 dd->mult_mask = dpll->mult_mask; 268 dd->autoidle_mask = dpll->autoidle_mask; 269 dd->enable_mask = dpll->enable_mask; 270 dd->sddiv_mask = dpll->sddiv_mask; 271 dd->dco_mask = dpll->dco_mask; 272 dd->max_divider = dpll->max_divider; 273 dd->min_divider = dpll->min_divider; 274 dd->max_multiplier = dpll->max_multiplier; 275 dd->auto_recal_bit = dpll->auto_recal_bit; 276 dd->recal_en_bit = dpll->recal_en_bit; 277 dd->recal_st_bit = dpll->recal_st_bit; 278 279 dd->clk_ref = __clk_get_hw(clk_ref); 280 dd->clk_bypass = __clk_get_hw(clk_bypass); 281 282 if (dpll->flags & CLKF_CORE) 283 ops = &omap3_dpll_core_ck_ops; 284 285 if (dpll->flags & CLKF_PER) 286 ops = &omap3_dpll_per_ck_ops; 287 288 if (dpll->flags & CLKF_J_TYPE) 289 dd->flags |= DPLL_J_TYPE; 290 291 clk = clk_register(NULL, &clk_hw->hw); 292 293 if (!IS_ERR(clk)) 294 return clk; 295 296 cleanup: 297 kfree(dd); 298 kfree(clk_hw); 299 return clk; 300 } 301 #endif 302 303 #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \ 304 defined(CONFIG_SOC_DRA7XX) || defined(CONFIG_SOC_AM33XX) || \ 305 defined(CONFIG_SOC_AM43XX) 306 /** 307 * _register_dpll_x2 - Registers a DPLLx2 clock 308 * @node: device node for this clock 309 * @ops: clk_ops for this clock 310 * @hw_ops: clk_hw_ops for this clock 311 * 312 * Initializes a DPLL x 2 clock from device tree data. 313 */ 314 static void _register_dpll_x2(struct device_node *node, 315 const struct clk_ops *ops, 316 const struct clk_hw_omap_ops *hw_ops) 317 { 318 struct clk *clk; 319 struct clk_init_data init = { NULL }; 320 struct clk_hw_omap *clk_hw; 321 const char *name = node->name; 322 const char *parent_name; 323 324 parent_name = of_clk_get_parent_name(node, 0); 325 if (!parent_name) { 326 pr_err("%s must have parent\n", node->name); 327 return; 328 } 329 330 clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL); 331 if (!clk_hw) 332 return; 333 334 clk_hw->ops = hw_ops; 335 clk_hw->hw.init = &init; 336 337 init.name = name; 338 init.ops = ops; 339 init.parent_names = &parent_name; 340 init.num_parents = 1; 341 342 /* register the clock */ 343 clk = clk_register(NULL, &clk_hw->hw); 344 345 if (IS_ERR(clk)) { 346 kfree(clk_hw); 347 } else { 348 omap2_init_clk_hw_omap_clocks(&clk_hw->hw); 349 of_clk_add_provider(node, of_clk_src_simple_get, clk); 350 } 351 } 352 #endif 353 354 /** 355 * of_ti_dpll_setup - Setup function for OMAP DPLL clocks 356 * @node: device node containing the DPLL info 357 * @ops: ops for the DPLL 358 * @ddt: DPLL data template to use 359 * 360 * Initializes a DPLL clock from device tree data. 361 */ 362 static void __init of_ti_dpll_setup(struct device_node *node, 363 const struct clk_ops *ops, 364 const struct dpll_data *ddt) 365 { 366 struct clk_hw_omap *clk_hw = NULL; 367 struct clk_init_data *init = NULL; 368 const char **parent_names = NULL; 369 struct dpll_data *dd = NULL; 370 u8 dpll_mode = 0; 371 372 dd = kzalloc(sizeof(*dd), GFP_KERNEL); 373 clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL); 374 init = kzalloc(sizeof(*init), GFP_KERNEL); 375 if (!dd || !clk_hw || !init) 376 goto cleanup; 377 378 memcpy(dd, ddt, sizeof(*dd)); 379 380 clk_hw->dpll_data = dd; 381 clk_hw->ops = &clkhwops_omap3_dpll; 382 clk_hw->hw.init = init; 383 clk_hw->flags = MEMMAP_ADDRESSING; 384 385 init->name = node->name; 386 init->ops = ops; 387 388 init->num_parents = of_clk_get_parent_count(node); 389 if (!init->num_parents) { 390 pr_err("%s must have parent(s)\n", node->name); 391 goto cleanup; 392 } 393 394 parent_names = kzalloc(sizeof(char *) * init->num_parents, GFP_KERNEL); 395 if (!parent_names) 396 goto cleanup; 397 398 of_clk_parent_fill(node, parent_names, init->num_parents); 399 400 init->parent_names = parent_names; 401 402 dd->control_reg = ti_clk_get_reg_addr(node, 0); 403 404 /* 405 * Special case for OMAP2 DPLL, register order is different due to 406 * missing idlest_reg, also clkhwops is different. Detected from 407 * missing idlest_mask. 408 */ 409 if (!dd->idlest_mask) { 410 dd->mult_div1_reg = ti_clk_get_reg_addr(node, 1); 411 #ifdef CONFIG_ARCH_OMAP2 412 clk_hw->ops = &clkhwops_omap2xxx_dpll; 413 omap2xxx_clkt_dpllcore_init(&clk_hw->hw); 414 #endif 415 } else { 416 dd->idlest_reg = ti_clk_get_reg_addr(node, 1); 417 if (IS_ERR(dd->idlest_reg)) 418 goto cleanup; 419 420 dd->mult_div1_reg = ti_clk_get_reg_addr(node, 2); 421 } 422 423 if (IS_ERR(dd->control_reg) || IS_ERR(dd->mult_div1_reg)) 424 goto cleanup; 425 426 if (dd->autoidle_mask) { 427 dd->autoidle_reg = ti_clk_get_reg_addr(node, 3); 428 if (IS_ERR(dd->autoidle_reg)) 429 goto cleanup; 430 } 431 432 if (of_property_read_bool(node, "ti,low-power-stop")) 433 dpll_mode |= 1 << DPLL_LOW_POWER_STOP; 434 435 if (of_property_read_bool(node, "ti,low-power-bypass")) 436 dpll_mode |= 1 << DPLL_LOW_POWER_BYPASS; 437 438 if (of_property_read_bool(node, "ti,lock")) 439 dpll_mode |= 1 << DPLL_LOCKED; 440 441 if (dpll_mode) 442 dd->modes = dpll_mode; 443 444 _register_dpll(&clk_hw->hw, node); 445 return; 446 447 cleanup: 448 kfree(dd); 449 kfree(parent_names); 450 kfree(init); 451 kfree(clk_hw); 452 } 453 454 #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \ 455 defined(CONFIG_SOC_DRA7XX) 456 static void __init of_ti_omap4_dpll_x2_setup(struct device_node *node) 457 { 458 _register_dpll_x2(node, &dpll_x2_ck_ops, &clkhwops_omap4_dpllmx); 459 } 460 CLK_OF_DECLARE(ti_omap4_dpll_x2_clock, "ti,omap4-dpll-x2-clock", 461 of_ti_omap4_dpll_x2_setup); 462 #endif 463 464 #if defined(CONFIG_SOC_AM33XX) || defined(CONFIG_SOC_AM43XX) 465 static void __init of_ti_am3_dpll_x2_setup(struct device_node *node) 466 { 467 _register_dpll_x2(node, &dpll_x2_ck_ops, NULL); 468 } 469 CLK_OF_DECLARE(ti_am3_dpll_x2_clock, "ti,am3-dpll-x2-clock", 470 of_ti_am3_dpll_x2_setup); 471 #endif 472 473 #ifdef CONFIG_ARCH_OMAP3 474 static void __init of_ti_omap3_dpll_setup(struct device_node *node) 475 { 476 const struct dpll_data dd = { 477 .idlest_mask = 0x1, 478 .enable_mask = 0x7, 479 .autoidle_mask = 0x7, 480 .mult_mask = 0x7ff << 8, 481 .div1_mask = 0x7f, 482 .max_multiplier = 2047, 483 .max_divider = 128, 484 .min_divider = 1, 485 .freqsel_mask = 0xf0, 486 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED), 487 }; 488 489 if ((of_machine_is_compatible("ti,omap3630") || 490 of_machine_is_compatible("ti,omap36xx")) && 491 !strcmp(node->name, "dpll5_ck")) 492 of_ti_dpll_setup(node, &omap3_dpll5_ck_ops, &dd); 493 else 494 of_ti_dpll_setup(node, &omap3_dpll_ck_ops, &dd); 495 } 496 CLK_OF_DECLARE(ti_omap3_dpll_clock, "ti,omap3-dpll-clock", 497 of_ti_omap3_dpll_setup); 498 499 static void __init of_ti_omap3_core_dpll_setup(struct device_node *node) 500 { 501 const struct dpll_data dd = { 502 .idlest_mask = 0x1, 503 .enable_mask = 0x7, 504 .autoidle_mask = 0x7, 505 .mult_mask = 0x7ff << 16, 506 .div1_mask = 0x7f << 8, 507 .max_multiplier = 2047, 508 .max_divider = 128, 509 .min_divider = 1, 510 .freqsel_mask = 0xf0, 511 }; 512 513 of_ti_dpll_setup(node, &omap3_dpll_core_ck_ops, &dd); 514 } 515 CLK_OF_DECLARE(ti_omap3_core_dpll_clock, "ti,omap3-dpll-core-clock", 516 of_ti_omap3_core_dpll_setup); 517 518 static void __init of_ti_omap3_per_dpll_setup(struct device_node *node) 519 { 520 const struct dpll_data dd = { 521 .idlest_mask = 0x1 << 1, 522 .enable_mask = 0x7 << 16, 523 .autoidle_mask = 0x7 << 3, 524 .mult_mask = 0x7ff << 8, 525 .div1_mask = 0x7f, 526 .max_multiplier = 2047, 527 .max_divider = 128, 528 .min_divider = 1, 529 .freqsel_mask = 0xf00000, 530 .modes = (1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED), 531 }; 532 533 of_ti_dpll_setup(node, &omap3_dpll_per_ck_ops, &dd); 534 } 535 CLK_OF_DECLARE(ti_omap3_per_dpll_clock, "ti,omap3-dpll-per-clock", 536 of_ti_omap3_per_dpll_setup); 537 538 static void __init of_ti_omap3_per_jtype_dpll_setup(struct device_node *node) 539 { 540 const struct dpll_data dd = { 541 .idlest_mask = 0x1 << 1, 542 .enable_mask = 0x7 << 16, 543 .autoidle_mask = 0x7 << 3, 544 .mult_mask = 0xfff << 8, 545 .div1_mask = 0x7f, 546 .max_multiplier = 4095, 547 .max_divider = 128, 548 .min_divider = 1, 549 .sddiv_mask = 0xff << 24, 550 .dco_mask = 0xe << 20, 551 .flags = DPLL_J_TYPE, 552 .modes = (1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED), 553 }; 554 555 of_ti_dpll_setup(node, &omap3_dpll_per_ck_ops, &dd); 556 } 557 CLK_OF_DECLARE(ti_omap3_per_jtype_dpll_clock, "ti,omap3-dpll-per-j-type-clock", 558 of_ti_omap3_per_jtype_dpll_setup); 559 #endif 560 561 static void __init of_ti_omap4_dpll_setup(struct device_node *node) 562 { 563 const struct dpll_data dd = { 564 .idlest_mask = 0x1, 565 .enable_mask = 0x7, 566 .autoidle_mask = 0x7, 567 .mult_mask = 0x7ff << 8, 568 .div1_mask = 0x7f, 569 .max_multiplier = 2047, 570 .max_divider = 128, 571 .min_divider = 1, 572 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED), 573 }; 574 575 of_ti_dpll_setup(node, &dpll_ck_ops, &dd); 576 } 577 CLK_OF_DECLARE(ti_omap4_dpll_clock, "ti,omap4-dpll-clock", 578 of_ti_omap4_dpll_setup); 579 580 static void __init of_ti_omap5_mpu_dpll_setup(struct device_node *node) 581 { 582 const struct dpll_data dd = { 583 .idlest_mask = 0x1, 584 .enable_mask = 0x7, 585 .autoidle_mask = 0x7, 586 .mult_mask = 0x7ff << 8, 587 .div1_mask = 0x7f, 588 .max_multiplier = 2047, 589 .max_divider = 128, 590 .dcc_mask = BIT(22), 591 .dcc_rate = 1400000000, /* DCC beyond 1.4GHz */ 592 .min_divider = 1, 593 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED), 594 }; 595 596 of_ti_dpll_setup(node, &dpll_ck_ops, &dd); 597 } 598 CLK_OF_DECLARE(of_ti_omap5_mpu_dpll_clock, "ti,omap5-mpu-dpll-clock", 599 of_ti_omap5_mpu_dpll_setup); 600 601 static void __init of_ti_omap4_core_dpll_setup(struct device_node *node) 602 { 603 const struct dpll_data dd = { 604 .idlest_mask = 0x1, 605 .enable_mask = 0x7, 606 .autoidle_mask = 0x7, 607 .mult_mask = 0x7ff << 8, 608 .div1_mask = 0x7f, 609 .max_multiplier = 2047, 610 .max_divider = 128, 611 .min_divider = 1, 612 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED), 613 }; 614 615 of_ti_dpll_setup(node, &dpll_core_ck_ops, &dd); 616 } 617 CLK_OF_DECLARE(ti_omap4_core_dpll_clock, "ti,omap4-dpll-core-clock", 618 of_ti_omap4_core_dpll_setup); 619 620 #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \ 621 defined(CONFIG_SOC_DRA7XX) 622 static void __init of_ti_omap4_m4xen_dpll_setup(struct device_node *node) 623 { 624 const struct dpll_data dd = { 625 .idlest_mask = 0x1, 626 .enable_mask = 0x7, 627 .autoidle_mask = 0x7, 628 .mult_mask = 0x7ff << 8, 629 .div1_mask = 0x7f, 630 .max_multiplier = 2047, 631 .max_divider = 128, 632 .min_divider = 1, 633 .m4xen_mask = 0x800, 634 .lpmode_mask = 1 << 10, 635 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED), 636 }; 637 638 of_ti_dpll_setup(node, &dpll_m4xen_ck_ops, &dd); 639 } 640 CLK_OF_DECLARE(ti_omap4_m4xen_dpll_clock, "ti,omap4-dpll-m4xen-clock", 641 of_ti_omap4_m4xen_dpll_setup); 642 643 static void __init of_ti_omap4_jtype_dpll_setup(struct device_node *node) 644 { 645 const struct dpll_data dd = { 646 .idlest_mask = 0x1, 647 .enable_mask = 0x7, 648 .autoidle_mask = 0x7, 649 .mult_mask = 0xfff << 8, 650 .div1_mask = 0xff, 651 .max_multiplier = 4095, 652 .max_divider = 256, 653 .min_divider = 1, 654 .sddiv_mask = 0xff << 24, 655 .flags = DPLL_J_TYPE, 656 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED), 657 }; 658 659 of_ti_dpll_setup(node, &dpll_m4xen_ck_ops, &dd); 660 } 661 CLK_OF_DECLARE(ti_omap4_jtype_dpll_clock, "ti,omap4-dpll-j-type-clock", 662 of_ti_omap4_jtype_dpll_setup); 663 #endif 664 665 static void __init of_ti_am3_no_gate_dpll_setup(struct device_node *node) 666 { 667 const struct dpll_data dd = { 668 .idlest_mask = 0x1, 669 .enable_mask = 0x7, 670 .mult_mask = 0x7ff << 8, 671 .div1_mask = 0x7f, 672 .max_multiplier = 2047, 673 .max_divider = 128, 674 .min_divider = 1, 675 .max_rate = 1000000000, 676 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED), 677 }; 678 679 of_ti_dpll_setup(node, &dpll_no_gate_ck_ops, &dd); 680 } 681 CLK_OF_DECLARE(ti_am3_no_gate_dpll_clock, "ti,am3-dpll-no-gate-clock", 682 of_ti_am3_no_gate_dpll_setup); 683 684 static void __init of_ti_am3_jtype_dpll_setup(struct device_node *node) 685 { 686 const struct dpll_data dd = { 687 .idlest_mask = 0x1, 688 .enable_mask = 0x7, 689 .mult_mask = 0x7ff << 8, 690 .div1_mask = 0x7f, 691 .max_multiplier = 4095, 692 .max_divider = 256, 693 .min_divider = 2, 694 .flags = DPLL_J_TYPE, 695 .max_rate = 2000000000, 696 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED), 697 }; 698 699 of_ti_dpll_setup(node, &dpll_ck_ops, &dd); 700 } 701 CLK_OF_DECLARE(ti_am3_jtype_dpll_clock, "ti,am3-dpll-j-type-clock", 702 of_ti_am3_jtype_dpll_setup); 703 704 static void __init of_ti_am3_no_gate_jtype_dpll_setup(struct device_node *node) 705 { 706 const struct dpll_data dd = { 707 .idlest_mask = 0x1, 708 .enable_mask = 0x7, 709 .mult_mask = 0x7ff << 8, 710 .div1_mask = 0x7f, 711 .max_multiplier = 2047, 712 .max_divider = 128, 713 .min_divider = 1, 714 .max_rate = 2000000000, 715 .flags = DPLL_J_TYPE, 716 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED), 717 }; 718 719 of_ti_dpll_setup(node, &dpll_no_gate_ck_ops, &dd); 720 } 721 CLK_OF_DECLARE(ti_am3_no_gate_jtype_dpll_clock, 722 "ti,am3-dpll-no-gate-j-type-clock", 723 of_ti_am3_no_gate_jtype_dpll_setup); 724 725 static void __init of_ti_am3_dpll_setup(struct device_node *node) 726 { 727 const struct dpll_data dd = { 728 .idlest_mask = 0x1, 729 .enable_mask = 0x7, 730 .mult_mask = 0x7ff << 8, 731 .div1_mask = 0x7f, 732 .max_multiplier = 2047, 733 .max_divider = 128, 734 .min_divider = 1, 735 .max_rate = 1000000000, 736 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED), 737 }; 738 739 of_ti_dpll_setup(node, &dpll_ck_ops, &dd); 740 } 741 CLK_OF_DECLARE(ti_am3_dpll_clock, "ti,am3-dpll-clock", of_ti_am3_dpll_setup); 742 743 static void __init of_ti_am3_core_dpll_setup(struct device_node *node) 744 { 745 const struct dpll_data dd = { 746 .idlest_mask = 0x1, 747 .enable_mask = 0x7, 748 .mult_mask = 0x7ff << 8, 749 .div1_mask = 0x7f, 750 .max_multiplier = 2047, 751 .max_divider = 128, 752 .min_divider = 1, 753 .max_rate = 1000000000, 754 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED), 755 }; 756 757 of_ti_dpll_setup(node, &dpll_core_ck_ops, &dd); 758 } 759 CLK_OF_DECLARE(ti_am3_core_dpll_clock, "ti,am3-dpll-core-clock", 760 of_ti_am3_core_dpll_setup); 761 762 static void __init of_ti_omap2_core_dpll_setup(struct device_node *node) 763 { 764 const struct dpll_data dd = { 765 .enable_mask = 0x3, 766 .mult_mask = 0x3ff << 12, 767 .div1_mask = 0xf << 8, 768 .max_divider = 16, 769 .min_divider = 1, 770 }; 771 772 of_ti_dpll_setup(node, &omap2_dpll_core_ck_ops, &dd); 773 } 774 CLK_OF_DECLARE(ti_omap2_core_dpll_clock, "ti,omap2-dpll-core-clock", 775 of_ti_omap2_core_dpll_setup); 776