1 /* 2 * OMAP clkctrl clock support 3 * 4 * Copyright (C) 2017 Texas Instruments, Inc. 5 * 6 * Tero Kristo <t-kristo@ti.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 * 12 * This program is distributed "as is" WITHOUT ANY WARRANTY of any 13 * kind, whether express or implied; without even the implied warranty 14 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 */ 17 18 #include <linux/clk-provider.h> 19 #include <linux/slab.h> 20 #include <linux/of.h> 21 #include <linux/of_address.h> 22 #include <linux/clk/ti.h> 23 #include <linux/delay.h> 24 #include <linux/timekeeping.h> 25 #include "clock.h" 26 27 #define NO_IDLEST 0x1 28 29 #define OMAP4_MODULEMODE_MASK 0x3 30 31 #define MODULEMODE_HWCTRL 0x1 32 #define MODULEMODE_SWCTRL 0x2 33 34 #define OMAP4_IDLEST_MASK (0x3 << 16) 35 #define OMAP4_IDLEST_SHIFT 16 36 37 #define CLKCTRL_IDLEST_FUNCTIONAL 0x0 38 #define CLKCTRL_IDLEST_INTERFACE_IDLE 0x2 39 #define CLKCTRL_IDLEST_DISABLED 0x3 40 41 /* These timeouts are in us */ 42 #define OMAP4_MAX_MODULE_READY_TIME 2000 43 #define OMAP4_MAX_MODULE_DISABLE_TIME 5000 44 45 static bool _early_timeout = true; 46 47 struct omap_clkctrl_provider { 48 void __iomem *base; 49 struct list_head clocks; 50 char *clkdm_name; 51 }; 52 53 struct omap_clkctrl_clk { 54 struct clk_hw *clk; 55 u16 reg_offset; 56 int bit_offset; 57 struct list_head node; 58 }; 59 60 union omap4_timeout { 61 u32 cycles; 62 ktime_t start; 63 }; 64 65 static const struct omap_clkctrl_data default_clkctrl_data[] __initconst = { 66 { 0 }, 67 }; 68 69 static u32 _omap4_idlest(u32 val) 70 { 71 val &= OMAP4_IDLEST_MASK; 72 val >>= OMAP4_IDLEST_SHIFT; 73 74 return val; 75 } 76 77 static bool _omap4_is_idle(u32 val) 78 { 79 val = _omap4_idlest(val); 80 81 return val == CLKCTRL_IDLEST_DISABLED; 82 } 83 84 static bool _omap4_is_ready(u32 val) 85 { 86 val = _omap4_idlest(val); 87 88 return val == CLKCTRL_IDLEST_FUNCTIONAL || 89 val == CLKCTRL_IDLEST_INTERFACE_IDLE; 90 } 91 92 static bool _omap4_is_timeout(union omap4_timeout *time, u32 timeout) 93 { 94 /* 95 * There are two special cases where ktime_to_ns() can't be 96 * used to track the timeouts. First one is during early boot 97 * when the timers haven't been initialized yet. The second 98 * one is during suspend-resume cycle while timekeeping is 99 * being suspended / resumed. Clocksource for the system 100 * can be from a timer that requires pm_runtime access, which 101 * will eventually bring us here with timekeeping_suspended, 102 * during both suspend entry and resume paths. This happens 103 * at least on am43xx platform. 104 */ 105 if (unlikely(_early_timeout || timekeeping_suspended)) { 106 if (time->cycles++ < timeout) { 107 udelay(1); 108 return false; 109 } 110 } else { 111 if (!ktime_to_ns(time->start)) { 112 time->start = ktime_get(); 113 return false; 114 } 115 116 if (ktime_us_delta(ktime_get(), time->start) < timeout) { 117 cpu_relax(); 118 return false; 119 } 120 } 121 122 return true; 123 } 124 125 static int __init _omap4_disable_early_timeout(void) 126 { 127 _early_timeout = false; 128 129 return 0; 130 } 131 arch_initcall(_omap4_disable_early_timeout); 132 133 static int _omap4_clkctrl_clk_enable(struct clk_hw *hw) 134 { 135 struct clk_hw_omap *clk = to_clk_hw_omap(hw); 136 u32 val; 137 int ret; 138 union omap4_timeout timeout = { 0 }; 139 140 if (!clk->enable_bit) 141 return 0; 142 143 if (clk->clkdm) { 144 ret = ti_clk_ll_ops->clkdm_clk_enable(clk->clkdm, hw->clk); 145 if (ret) { 146 WARN(1, 147 "%s: could not enable %s's clockdomain %s: %d\n", 148 __func__, clk_hw_get_name(hw), 149 clk->clkdm_name, ret); 150 return ret; 151 } 152 } 153 154 val = ti_clk_ll_ops->clk_readl(&clk->enable_reg); 155 156 val &= ~OMAP4_MODULEMODE_MASK; 157 val |= clk->enable_bit; 158 159 ti_clk_ll_ops->clk_writel(val, &clk->enable_reg); 160 161 if (clk->flags & NO_IDLEST) 162 return 0; 163 164 /* Wait until module is enabled */ 165 while (!_omap4_is_ready(ti_clk_ll_ops->clk_readl(&clk->enable_reg))) { 166 if (_omap4_is_timeout(&timeout, OMAP4_MAX_MODULE_READY_TIME)) { 167 pr_err("%s: failed to enable\n", clk_hw_get_name(hw)); 168 return -EBUSY; 169 } 170 } 171 172 return 0; 173 } 174 175 static void _omap4_clkctrl_clk_disable(struct clk_hw *hw) 176 { 177 struct clk_hw_omap *clk = to_clk_hw_omap(hw); 178 u32 val; 179 union omap4_timeout timeout = { 0 }; 180 181 if (!clk->enable_bit) 182 return; 183 184 val = ti_clk_ll_ops->clk_readl(&clk->enable_reg); 185 186 val &= ~OMAP4_MODULEMODE_MASK; 187 188 ti_clk_ll_ops->clk_writel(val, &clk->enable_reg); 189 190 if (clk->flags & NO_IDLEST) 191 goto exit; 192 193 /* Wait until module is disabled */ 194 while (!_omap4_is_idle(ti_clk_ll_ops->clk_readl(&clk->enable_reg))) { 195 if (_omap4_is_timeout(&timeout, 196 OMAP4_MAX_MODULE_DISABLE_TIME)) { 197 pr_err("%s: failed to disable\n", clk_hw_get_name(hw)); 198 break; 199 } 200 } 201 202 exit: 203 if (clk->clkdm) 204 ti_clk_ll_ops->clkdm_clk_disable(clk->clkdm, hw->clk); 205 } 206 207 static int _omap4_clkctrl_clk_is_enabled(struct clk_hw *hw) 208 { 209 struct clk_hw_omap *clk = to_clk_hw_omap(hw); 210 u32 val; 211 212 val = ti_clk_ll_ops->clk_readl(&clk->enable_reg); 213 214 if (val & clk->enable_bit) 215 return 1; 216 217 return 0; 218 } 219 220 static const struct clk_ops omap4_clkctrl_clk_ops = { 221 .enable = _omap4_clkctrl_clk_enable, 222 .disable = _omap4_clkctrl_clk_disable, 223 .is_enabled = _omap4_clkctrl_clk_is_enabled, 224 .init = omap2_init_clk_clkdm, 225 }; 226 227 static struct clk_hw *_ti_omap4_clkctrl_xlate(struct of_phandle_args *clkspec, 228 void *data) 229 { 230 struct omap_clkctrl_provider *provider = data; 231 struct omap_clkctrl_clk *entry; 232 233 if (clkspec->args_count != 2) 234 return ERR_PTR(-EINVAL); 235 236 pr_debug("%s: looking for %x:%x\n", __func__, 237 clkspec->args[0], clkspec->args[1]); 238 239 list_for_each_entry(entry, &provider->clocks, node) { 240 if (entry->reg_offset == clkspec->args[0] && 241 entry->bit_offset == clkspec->args[1]) 242 break; 243 } 244 245 if (!entry) 246 return ERR_PTR(-EINVAL); 247 248 return entry->clk; 249 } 250 251 static int __init 252 _ti_clkctrl_clk_register(struct omap_clkctrl_provider *provider, 253 struct device_node *node, struct clk_hw *clk_hw, 254 u16 offset, u8 bit, const char * const *parents, 255 int num_parents, const struct clk_ops *ops) 256 { 257 struct clk_init_data init = { NULL }; 258 struct clk *clk; 259 struct omap_clkctrl_clk *clkctrl_clk; 260 int ret = 0; 261 262 if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT) 263 init.name = kasprintf(GFP_KERNEL, "%pOFn:%pOFn:%04x:%d", 264 node->parent, node, offset, 265 bit); 266 else 267 init.name = kasprintf(GFP_KERNEL, "%pOFn:%04x:%d", node, 268 offset, bit); 269 clkctrl_clk = kzalloc(sizeof(*clkctrl_clk), GFP_KERNEL); 270 if (!init.name || !clkctrl_clk) { 271 ret = -ENOMEM; 272 goto cleanup; 273 } 274 275 clk_hw->init = &init; 276 init.parent_names = parents; 277 init.num_parents = num_parents; 278 init.ops = ops; 279 init.flags = CLK_IS_BASIC; 280 281 clk = ti_clk_register(NULL, clk_hw, init.name); 282 if (IS_ERR_OR_NULL(clk)) { 283 ret = -EINVAL; 284 goto cleanup; 285 } 286 287 clkctrl_clk->reg_offset = offset; 288 clkctrl_clk->bit_offset = bit; 289 clkctrl_clk->clk = clk_hw; 290 291 list_add(&clkctrl_clk->node, &provider->clocks); 292 293 return 0; 294 295 cleanup: 296 kfree(init.name); 297 kfree(clkctrl_clk); 298 return ret; 299 } 300 301 static void __init 302 _ti_clkctrl_setup_gate(struct omap_clkctrl_provider *provider, 303 struct device_node *node, u16 offset, 304 const struct omap_clkctrl_bit_data *data, 305 void __iomem *reg) 306 { 307 struct clk_hw_omap *clk_hw; 308 309 clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL); 310 if (!clk_hw) 311 return; 312 313 clk_hw->enable_bit = data->bit; 314 clk_hw->enable_reg.ptr = reg; 315 316 if (_ti_clkctrl_clk_register(provider, node, &clk_hw->hw, offset, 317 data->bit, data->parents, 1, 318 &omap_gate_clk_ops)) 319 kfree(clk_hw); 320 } 321 322 static void __init 323 _ti_clkctrl_setup_mux(struct omap_clkctrl_provider *provider, 324 struct device_node *node, u16 offset, 325 const struct omap_clkctrl_bit_data *data, 326 void __iomem *reg) 327 { 328 struct clk_omap_mux *mux; 329 int num_parents = 0; 330 const char * const *pname; 331 332 mux = kzalloc(sizeof(*mux), GFP_KERNEL); 333 if (!mux) 334 return; 335 336 pname = data->parents; 337 while (*pname) { 338 num_parents++; 339 pname++; 340 } 341 342 mux->mask = num_parents; 343 if (!(mux->flags & CLK_MUX_INDEX_ONE)) 344 mux->mask--; 345 346 mux->mask = (1 << fls(mux->mask)) - 1; 347 348 mux->shift = data->bit; 349 mux->reg.ptr = reg; 350 351 if (_ti_clkctrl_clk_register(provider, node, &mux->hw, offset, 352 data->bit, data->parents, num_parents, 353 &ti_clk_mux_ops)) 354 kfree(mux); 355 } 356 357 static void __init 358 _ti_clkctrl_setup_div(struct omap_clkctrl_provider *provider, 359 struct device_node *node, u16 offset, 360 const struct omap_clkctrl_bit_data *data, 361 void __iomem *reg) 362 { 363 struct clk_omap_divider *div; 364 const struct omap_clkctrl_div_data *div_data = data->data; 365 u8 div_flags = 0; 366 367 div = kzalloc(sizeof(*div), GFP_KERNEL); 368 if (!div) 369 return; 370 371 div->reg.ptr = reg; 372 div->shift = data->bit; 373 div->flags = div_data->flags; 374 375 if (div->flags & CLK_DIVIDER_POWER_OF_TWO) 376 div_flags |= CLKF_INDEX_POWER_OF_TWO; 377 378 if (ti_clk_parse_divider_data((int *)div_data->dividers, 0, 379 div_data->max_div, div_flags, 380 &div->width, &div->table)) { 381 pr_err("%s: Data parsing for %pOF:%04x:%d failed\n", __func__, 382 node, offset, data->bit); 383 kfree(div); 384 return; 385 } 386 387 if (_ti_clkctrl_clk_register(provider, node, &div->hw, offset, 388 data->bit, data->parents, 1, 389 &ti_clk_divider_ops)) 390 kfree(div); 391 } 392 393 static void __init 394 _ti_clkctrl_setup_subclks(struct omap_clkctrl_provider *provider, 395 struct device_node *node, 396 const struct omap_clkctrl_reg_data *data, 397 void __iomem *reg) 398 { 399 const struct omap_clkctrl_bit_data *bits = data->bit_data; 400 401 if (!bits) 402 return; 403 404 while (bits->bit) { 405 switch (bits->type) { 406 case TI_CLK_GATE: 407 _ti_clkctrl_setup_gate(provider, node, data->offset, 408 bits, reg); 409 break; 410 411 case TI_CLK_DIVIDER: 412 _ti_clkctrl_setup_div(provider, node, data->offset, 413 bits, reg); 414 break; 415 416 case TI_CLK_MUX: 417 _ti_clkctrl_setup_mux(provider, node, data->offset, 418 bits, reg); 419 break; 420 421 default: 422 pr_err("%s: bad subclk type: %d\n", __func__, 423 bits->type); 424 return; 425 } 426 bits++; 427 } 428 } 429 430 static void __init _clkctrl_add_provider(void *data, 431 struct device_node *np) 432 { 433 of_clk_add_hw_provider(np, _ti_omap4_clkctrl_xlate, data); 434 } 435 436 static void __init _ti_omap4_clkctrl_setup(struct device_node *node) 437 { 438 struct omap_clkctrl_provider *provider; 439 const struct omap_clkctrl_data *data = default_clkctrl_data; 440 const struct omap_clkctrl_reg_data *reg_data; 441 struct clk_init_data init = { NULL }; 442 struct clk_hw_omap *hw; 443 struct clk *clk; 444 struct omap_clkctrl_clk *clkctrl_clk; 445 const __be32 *addrp; 446 u32 addr; 447 int ret; 448 char *c; 449 450 if (!(ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT) && 451 !strcmp(node->name, "clk")) 452 ti_clk_features.flags |= TI_CLK_CLKCTRL_COMPAT; 453 454 addrp = of_get_address(node, 0, NULL, NULL); 455 addr = (u32)of_translate_address(node, addrp); 456 457 #ifdef CONFIG_ARCH_OMAP4 458 if (of_machine_is_compatible("ti,omap4")) 459 data = omap4_clkctrl_data; 460 #endif 461 #ifdef CONFIG_SOC_OMAP5 462 if (of_machine_is_compatible("ti,omap5")) 463 data = omap5_clkctrl_data; 464 #endif 465 #ifdef CONFIG_SOC_DRA7XX 466 if (of_machine_is_compatible("ti,dra7")) { 467 if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT) 468 data = dra7_clkctrl_compat_data; 469 else 470 data = dra7_clkctrl_data; 471 } 472 #endif 473 #ifdef CONFIG_SOC_AM33XX 474 if (of_machine_is_compatible("ti,am33xx")) { 475 if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT) 476 data = am3_clkctrl_compat_data; 477 else 478 data = am3_clkctrl_data; 479 } 480 #endif 481 #ifdef CONFIG_SOC_AM43XX 482 if (of_machine_is_compatible("ti,am4372")) { 483 if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT) 484 data = am4_clkctrl_compat_data; 485 else 486 data = am4_clkctrl_data; 487 } 488 489 if (of_machine_is_compatible("ti,am438x")) { 490 if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT) 491 data = am438x_clkctrl_compat_data; 492 else 493 data = am438x_clkctrl_data; 494 } 495 #endif 496 #ifdef CONFIG_SOC_TI81XX 497 if (of_machine_is_compatible("ti,dm814")) 498 data = dm814_clkctrl_data; 499 500 if (of_machine_is_compatible("ti,dm816")) 501 data = dm816_clkctrl_data; 502 #endif 503 504 while (data->addr) { 505 if (addr == data->addr) 506 break; 507 508 data++; 509 } 510 511 if (!data->addr) { 512 pr_err("%pOF not found from clkctrl data.\n", node); 513 return; 514 } 515 516 provider = kzalloc(sizeof(*provider), GFP_KERNEL); 517 if (!provider) 518 return; 519 520 provider->base = of_iomap(node, 0); 521 522 if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT) { 523 provider->clkdm_name = kasprintf(GFP_KERNEL, "%pOFnxxx", node->parent); 524 if (!provider->clkdm_name) { 525 kfree(provider); 526 return; 527 } 528 529 /* 530 * Create default clkdm name, replace _cm from end of parent 531 * node name with _clkdm 532 */ 533 provider->clkdm_name[strlen(provider->clkdm_name) - 5] = 0; 534 } else { 535 provider->clkdm_name = kasprintf(GFP_KERNEL, "%pOFn", node); 536 if (!provider->clkdm_name) { 537 kfree(provider); 538 return; 539 } 540 541 /* 542 * Create default clkdm name, replace _clkctrl from end of 543 * node name with _clkdm 544 */ 545 provider->clkdm_name[strlen(provider->clkdm_name) - 7] = 0; 546 } 547 548 strcat(provider->clkdm_name, "clkdm"); 549 550 /* Replace any dash from the clkdm name with underscore */ 551 c = provider->clkdm_name; 552 553 while (*c) { 554 if (*c == '-') 555 *c = '_'; 556 c++; 557 } 558 559 INIT_LIST_HEAD(&provider->clocks); 560 561 /* Generate clocks */ 562 reg_data = data->regs; 563 564 while (reg_data->parent) { 565 hw = kzalloc(sizeof(*hw), GFP_KERNEL); 566 if (!hw) 567 return; 568 569 hw->enable_reg.ptr = provider->base + reg_data->offset; 570 571 _ti_clkctrl_setup_subclks(provider, node, reg_data, 572 hw->enable_reg.ptr); 573 574 if (reg_data->flags & CLKF_SW_SUP) 575 hw->enable_bit = MODULEMODE_SWCTRL; 576 if (reg_data->flags & CLKF_HW_SUP) 577 hw->enable_bit = MODULEMODE_HWCTRL; 578 if (reg_data->flags & CLKF_NO_IDLEST) 579 hw->flags |= NO_IDLEST; 580 581 if (reg_data->clkdm_name) 582 hw->clkdm_name = reg_data->clkdm_name; 583 else 584 hw->clkdm_name = provider->clkdm_name; 585 586 init.parent_names = ®_data->parent; 587 init.num_parents = 1; 588 init.flags = 0; 589 if (reg_data->flags & CLKF_SET_RATE_PARENT) 590 init.flags |= CLK_SET_RATE_PARENT; 591 if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT) 592 init.name = kasprintf(GFP_KERNEL, "%pOFn:%pOFn:%04x:%d", 593 node->parent, node, 594 reg_data->offset, 0); 595 else 596 init.name = kasprintf(GFP_KERNEL, "%pOFn:%04x:%d", 597 node, reg_data->offset, 0); 598 clkctrl_clk = kzalloc(sizeof(*clkctrl_clk), GFP_KERNEL); 599 if (!init.name || !clkctrl_clk) 600 goto cleanup; 601 602 init.ops = &omap4_clkctrl_clk_ops; 603 hw->hw.init = &init; 604 605 clk = ti_clk_register(NULL, &hw->hw, init.name); 606 if (IS_ERR_OR_NULL(clk)) 607 goto cleanup; 608 609 clkctrl_clk->reg_offset = reg_data->offset; 610 clkctrl_clk->clk = &hw->hw; 611 612 list_add(&clkctrl_clk->node, &provider->clocks); 613 614 reg_data++; 615 } 616 617 ret = of_clk_add_hw_provider(node, _ti_omap4_clkctrl_xlate, provider); 618 if (ret == -EPROBE_DEFER) 619 ti_clk_retry_init(node, provider, _clkctrl_add_provider); 620 621 return; 622 623 cleanup: 624 kfree(hw); 625 kfree(init.name); 626 kfree(clkctrl_clk); 627 } 628 CLK_OF_DECLARE(ti_omap4_clkctrl_clock, "ti,clkctrl", 629 _ti_omap4_clkctrl_setup); 630