1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * drivers/clk/tegra/clk-emc.c 4 * 5 * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved. 6 * 7 * Author: 8 * Mikko Perttunen <mperttunen@nvidia.com> 9 */ 10 11 #include <linux/clk-provider.h> 12 #include <linux/clk.h> 13 #include <linux/clkdev.h> 14 #include <linux/clk/tegra.h> 15 #include <linux/delay.h> 16 #include <linux/export.h> 17 #include <linux/io.h> 18 #include <linux/module.h> 19 #include <linux/of_address.h> 20 #include <linux/of_platform.h> 21 #include <linux/platform_device.h> 22 #include <linux/sort.h> 23 #include <linux/string.h> 24 25 #include <soc/tegra/fuse.h> 26 27 #include "clk.h" 28 29 #define CLK_SOURCE_EMC 0x19c 30 31 #define CLK_SOURCE_EMC_EMC_2X_CLK_DIVISOR_SHIFT 0 32 #define CLK_SOURCE_EMC_EMC_2X_CLK_DIVISOR_MASK 0xff 33 #define CLK_SOURCE_EMC_EMC_2X_CLK_DIVISOR(x) (((x) & CLK_SOURCE_EMC_EMC_2X_CLK_DIVISOR_MASK) << \ 34 CLK_SOURCE_EMC_EMC_2X_CLK_DIVISOR_SHIFT) 35 36 #define CLK_SOURCE_EMC_EMC_2X_CLK_SRC_SHIFT 29 37 #define CLK_SOURCE_EMC_EMC_2X_CLK_SRC_MASK 0x7 38 #define CLK_SOURCE_EMC_EMC_2X_CLK_SRC(x) (((x) & CLK_SOURCE_EMC_EMC_2X_CLK_SRC_MASK) << \ 39 CLK_SOURCE_EMC_EMC_2X_CLK_SRC_SHIFT) 40 41 static const char * const emc_parent_clk_names[] = { 42 "pll_m", "pll_c", "pll_p", "clk_m", "pll_m_ud", 43 "pll_c2", "pll_c3", "pll_c_ud" 44 }; 45 46 /* 47 * List of clock sources for various parents the EMC clock can have. 48 * When we change the timing to a timing with a parent that has the same 49 * clock source as the current parent, we must first change to a backup 50 * timing that has a different clock source. 51 */ 52 53 #define EMC_SRC_PLL_M 0 54 #define EMC_SRC_PLL_C 1 55 #define EMC_SRC_PLL_P 2 56 #define EMC_SRC_CLK_M 3 57 #define EMC_SRC_PLL_C2 4 58 #define EMC_SRC_PLL_C3 5 59 60 static const char emc_parent_clk_sources[] = { 61 EMC_SRC_PLL_M, EMC_SRC_PLL_C, EMC_SRC_PLL_P, EMC_SRC_CLK_M, 62 EMC_SRC_PLL_M, EMC_SRC_PLL_C2, EMC_SRC_PLL_C3, EMC_SRC_PLL_C 63 }; 64 65 struct emc_timing { 66 unsigned long rate, parent_rate; 67 u8 parent_index; 68 struct clk *parent; 69 u32 ram_code; 70 }; 71 72 struct tegra_clk_emc { 73 struct clk_hw hw; 74 void __iomem *clk_regs; 75 struct clk *prev_parent; 76 bool changing_timing; 77 78 struct device_node *emc_node; 79 struct tegra_emc *emc; 80 81 int num_timings; 82 struct emc_timing *timings; 83 spinlock_t *lock; 84 85 tegra124_emc_prepare_timing_change_cb *prepare_timing_change; 86 tegra124_emc_complete_timing_change_cb *complete_timing_change; 87 }; 88 89 /* Common clock framework callback implementations */ 90 91 static unsigned long emc_recalc_rate(struct clk_hw *hw, 92 unsigned long parent_rate) 93 { 94 struct tegra_clk_emc *tegra; 95 u32 val, div; 96 97 tegra = container_of(hw, struct tegra_clk_emc, hw); 98 99 /* 100 * CCF wrongly assumes that the parent won't change during set_rate, 101 * so get the parent rate explicitly. 102 */ 103 parent_rate = clk_hw_get_rate(clk_hw_get_parent(hw)); 104 105 val = readl(tegra->clk_regs + CLK_SOURCE_EMC); 106 div = val & CLK_SOURCE_EMC_EMC_2X_CLK_DIVISOR_MASK; 107 108 return parent_rate / (div + 2) * 2; 109 } 110 111 /* 112 * Rounds up unless no higher rate exists, in which case down. This way is 113 * safer since things have EMC rate floors. Also don't touch parent_rate 114 * since we don't want the CCF to play with our parent clocks. 115 */ 116 static int emc_determine_rate(struct clk_hw *hw, struct clk_rate_request *req) 117 { 118 struct tegra_clk_emc *tegra; 119 u8 ram_code = tegra_read_ram_code(); 120 struct emc_timing *timing = NULL; 121 int i, k, t; 122 123 tegra = container_of(hw, struct tegra_clk_emc, hw); 124 125 for (k = 0; k < tegra->num_timings; k++) { 126 if (tegra->timings[k].ram_code == ram_code) 127 break; 128 } 129 130 for (t = k; t < tegra->num_timings; t++) { 131 if (tegra->timings[t].ram_code != ram_code) 132 break; 133 } 134 135 for (i = k; i < t; i++) { 136 timing = tegra->timings + i; 137 138 if (timing->rate < req->rate && i != t - 1) 139 continue; 140 141 if (timing->rate > req->max_rate) { 142 i = max(i, k + 1); 143 req->rate = tegra->timings[i - 1].rate; 144 return 0; 145 } 146 147 if (timing->rate < req->min_rate) 148 continue; 149 150 req->rate = timing->rate; 151 return 0; 152 } 153 154 if (timing) { 155 req->rate = timing->rate; 156 return 0; 157 } 158 159 req->rate = clk_hw_get_rate(hw); 160 return 0; 161 } 162 163 static u8 emc_get_parent(struct clk_hw *hw) 164 { 165 struct tegra_clk_emc *tegra; 166 u32 val; 167 168 tegra = container_of(hw, struct tegra_clk_emc, hw); 169 170 val = readl(tegra->clk_regs + CLK_SOURCE_EMC); 171 172 return (val >> CLK_SOURCE_EMC_EMC_2X_CLK_SRC_SHIFT) 173 & CLK_SOURCE_EMC_EMC_2X_CLK_SRC_MASK; 174 } 175 176 static struct tegra_emc *emc_ensure_emc_driver(struct tegra_clk_emc *tegra) 177 { 178 struct platform_device *pdev; 179 180 if (tegra->emc) 181 return tegra->emc; 182 183 if (!tegra->prepare_timing_change || !tegra->complete_timing_change) 184 return NULL; 185 186 if (!tegra->emc_node) 187 return NULL; 188 189 pdev = of_find_device_by_node(tegra->emc_node); 190 if (!pdev) { 191 pr_err("%s: could not get external memory controller\n", 192 __func__); 193 return NULL; 194 } 195 196 of_node_put(tegra->emc_node); 197 tegra->emc_node = NULL; 198 199 tegra->emc = platform_get_drvdata(pdev); 200 if (!tegra->emc) { 201 pr_err("%s: cannot find EMC driver\n", __func__); 202 return NULL; 203 } 204 205 return tegra->emc; 206 } 207 208 static int emc_set_timing(struct tegra_clk_emc *tegra, 209 struct emc_timing *timing) 210 { 211 int err; 212 u8 div; 213 u32 car_value; 214 unsigned long flags = 0; 215 struct tegra_emc *emc = emc_ensure_emc_driver(tegra); 216 217 if (!emc) 218 return -ENOENT; 219 220 pr_debug("going to rate %ld prate %ld p %s\n", timing->rate, 221 timing->parent_rate, __clk_get_name(timing->parent)); 222 223 if (emc_get_parent(&tegra->hw) == timing->parent_index && 224 clk_get_rate(timing->parent) != timing->parent_rate) { 225 WARN_ONCE(1, "parent %s rate mismatch %lu %lu\n", 226 __clk_get_name(timing->parent), 227 clk_get_rate(timing->parent), 228 timing->parent_rate); 229 return -EINVAL; 230 } 231 232 tegra->changing_timing = true; 233 234 err = clk_set_rate(timing->parent, timing->parent_rate); 235 if (err) { 236 pr_err("cannot change parent %s rate to %ld: %d\n", 237 __clk_get_name(timing->parent), timing->parent_rate, 238 err); 239 240 return err; 241 } 242 243 err = clk_prepare_enable(timing->parent); 244 if (err) { 245 pr_err("cannot enable parent clock: %d\n", err); 246 return err; 247 } 248 249 div = timing->parent_rate / (timing->rate / 2) - 2; 250 251 err = tegra->prepare_timing_change(emc, timing->rate); 252 if (err) { 253 clk_disable_unprepare(timing->parent); 254 return err; 255 } 256 257 spin_lock_irqsave(tegra->lock, flags); 258 259 car_value = readl(tegra->clk_regs + CLK_SOURCE_EMC); 260 261 car_value &= ~CLK_SOURCE_EMC_EMC_2X_CLK_SRC(~0); 262 car_value |= CLK_SOURCE_EMC_EMC_2X_CLK_SRC(timing->parent_index); 263 264 car_value &= ~CLK_SOURCE_EMC_EMC_2X_CLK_DIVISOR(~0); 265 car_value |= CLK_SOURCE_EMC_EMC_2X_CLK_DIVISOR(div); 266 267 writel(car_value, tegra->clk_regs + CLK_SOURCE_EMC); 268 269 spin_unlock_irqrestore(tegra->lock, flags); 270 271 tegra->complete_timing_change(emc, timing->rate); 272 273 clk_hw_reparent(&tegra->hw, __clk_get_hw(timing->parent)); 274 clk_disable_unprepare(tegra->prev_parent); 275 276 tegra->prev_parent = timing->parent; 277 tegra->changing_timing = false; 278 279 return 0; 280 } 281 282 /* 283 * Get backup timing to use as an intermediate step when a change between 284 * two timings with the same clock source has been requested. First try to 285 * find a timing with a higher clock rate to avoid a rate below any set rate 286 * floors. If that is not possible, find a lower rate. 287 */ 288 static struct emc_timing *get_backup_timing(struct tegra_clk_emc *tegra, 289 int timing_index) 290 { 291 int i; 292 u32 ram_code = tegra_read_ram_code(); 293 struct emc_timing *timing; 294 295 for (i = timing_index+1; i < tegra->num_timings; i++) { 296 timing = tegra->timings + i; 297 if (timing->ram_code != ram_code) 298 break; 299 300 if (emc_parent_clk_sources[timing->parent_index] != 301 emc_parent_clk_sources[ 302 tegra->timings[timing_index].parent_index]) 303 return timing; 304 } 305 306 for (i = timing_index-1; i >= 0; --i) { 307 timing = tegra->timings + i; 308 if (timing->ram_code != ram_code) 309 break; 310 311 if (emc_parent_clk_sources[timing->parent_index] != 312 emc_parent_clk_sources[ 313 tegra->timings[timing_index].parent_index]) 314 return timing; 315 } 316 317 return NULL; 318 } 319 320 static int emc_set_rate(struct clk_hw *hw, unsigned long rate, 321 unsigned long parent_rate) 322 { 323 struct tegra_clk_emc *tegra; 324 struct emc_timing *timing = NULL; 325 int i, err; 326 u32 ram_code = tegra_read_ram_code(); 327 328 tegra = container_of(hw, struct tegra_clk_emc, hw); 329 330 if (clk_hw_get_rate(hw) == rate) 331 return 0; 332 333 /* 334 * When emc_set_timing changes the parent rate, CCF will propagate 335 * that downward to us, so ignore any set_rate calls while a rate 336 * change is already going on. 337 */ 338 if (tegra->changing_timing) 339 return 0; 340 341 for (i = 0; i < tegra->num_timings; i++) { 342 if (tegra->timings[i].rate == rate && 343 tegra->timings[i].ram_code == ram_code) { 344 timing = tegra->timings + i; 345 break; 346 } 347 } 348 349 if (!timing) { 350 pr_err("cannot switch to rate %ld without emc table\n", rate); 351 return -EINVAL; 352 } 353 354 if (emc_parent_clk_sources[emc_get_parent(hw)] == 355 emc_parent_clk_sources[timing->parent_index] && 356 clk_get_rate(timing->parent) != timing->parent_rate) { 357 /* 358 * Parent clock source not changed but parent rate has changed, 359 * need to temporarily switch to another parent 360 */ 361 362 struct emc_timing *backup_timing; 363 364 backup_timing = get_backup_timing(tegra, i); 365 if (!backup_timing) { 366 pr_err("cannot find backup timing\n"); 367 return -EINVAL; 368 } 369 370 pr_debug("using %ld as backup rate when going to %ld\n", 371 backup_timing->rate, rate); 372 373 err = emc_set_timing(tegra, backup_timing); 374 if (err) { 375 pr_err("cannot set backup timing: %d\n", err); 376 return err; 377 } 378 } 379 380 return emc_set_timing(tegra, timing); 381 } 382 383 /* Initialization and deinitialization */ 384 385 static int load_one_timing_from_dt(struct tegra_clk_emc *tegra, 386 struct emc_timing *timing, 387 struct device_node *node) 388 { 389 int err, i; 390 u32 tmp; 391 392 err = of_property_read_u32(node, "clock-frequency", &tmp); 393 if (err) { 394 pr_err("timing %pOF: failed to read rate\n", node); 395 return err; 396 } 397 398 timing->rate = tmp; 399 400 err = of_property_read_u32(node, "nvidia,parent-clock-frequency", &tmp); 401 if (err) { 402 pr_err("timing %pOF: failed to read parent rate\n", node); 403 return err; 404 } 405 406 timing->parent_rate = tmp; 407 408 timing->parent = of_clk_get_by_name(node, "emc-parent"); 409 if (IS_ERR(timing->parent)) { 410 pr_err("timing %pOF: failed to get parent clock\n", node); 411 return PTR_ERR(timing->parent); 412 } 413 414 timing->parent_index = 0xff; 415 i = match_string(emc_parent_clk_names, ARRAY_SIZE(emc_parent_clk_names), 416 __clk_get_name(timing->parent)); 417 if (i < 0) { 418 pr_err("timing %pOF: %s is not a valid parent\n", 419 node, __clk_get_name(timing->parent)); 420 clk_put(timing->parent); 421 return -EINVAL; 422 } 423 424 timing->parent_index = i; 425 return 0; 426 } 427 428 static int cmp_timings(const void *_a, const void *_b) 429 { 430 const struct emc_timing *a = _a; 431 const struct emc_timing *b = _b; 432 433 if (a->rate < b->rate) 434 return -1; 435 else if (a->rate == b->rate) 436 return 0; 437 else 438 return 1; 439 } 440 441 static int load_timings_from_dt(struct tegra_clk_emc *tegra, 442 struct device_node *node, 443 u32 ram_code) 444 { 445 struct emc_timing *timings_ptr; 446 struct device_node *child; 447 int child_count = of_get_child_count(node); 448 int i = 0, err; 449 size_t size; 450 451 size = (tegra->num_timings + child_count) * sizeof(struct emc_timing); 452 453 tegra->timings = krealloc(tegra->timings, size, GFP_KERNEL); 454 if (!tegra->timings) 455 return -ENOMEM; 456 457 timings_ptr = tegra->timings + tegra->num_timings; 458 tegra->num_timings += child_count; 459 460 for_each_child_of_node(node, child) { 461 struct emc_timing *timing = timings_ptr + (i++); 462 463 err = load_one_timing_from_dt(tegra, timing, child); 464 if (err) { 465 of_node_put(child); 466 return err; 467 } 468 469 timing->ram_code = ram_code; 470 } 471 472 sort(timings_ptr, child_count, sizeof(struct emc_timing), 473 cmp_timings, NULL); 474 475 return 0; 476 } 477 478 static const struct clk_ops tegra_clk_emc_ops = { 479 .recalc_rate = emc_recalc_rate, 480 .determine_rate = emc_determine_rate, 481 .set_rate = emc_set_rate, 482 .get_parent = emc_get_parent, 483 }; 484 485 struct clk *tegra124_clk_register_emc(void __iomem *base, struct device_node *np, 486 spinlock_t *lock) 487 { 488 struct tegra_clk_emc *tegra; 489 struct clk_init_data init; 490 struct device_node *node; 491 u32 node_ram_code; 492 struct clk *clk; 493 int err; 494 495 tegra = kcalloc(1, sizeof(*tegra), GFP_KERNEL); 496 if (!tegra) 497 return ERR_PTR(-ENOMEM); 498 499 tegra->clk_regs = base; 500 tegra->lock = lock; 501 502 tegra->num_timings = 0; 503 504 for_each_child_of_node(np, node) { 505 err = of_property_read_u32(node, "nvidia,ram-code", 506 &node_ram_code); 507 if (err) 508 continue; 509 510 /* 511 * Store timings for all ram codes as we cannot read the 512 * fuses until the apbmisc driver is loaded. 513 */ 514 err = load_timings_from_dt(tegra, node, node_ram_code); 515 if (err) { 516 of_node_put(node); 517 return ERR_PTR(err); 518 } 519 } 520 521 if (tegra->num_timings == 0) 522 pr_warn("%s: no memory timings registered\n", __func__); 523 524 tegra->emc_node = of_parse_phandle(np, 525 "nvidia,external-memory-controller", 0); 526 if (!tegra->emc_node) 527 pr_warn("%s: couldn't find node for EMC driver\n", __func__); 528 529 init.name = "emc"; 530 init.ops = &tegra_clk_emc_ops; 531 init.flags = CLK_IS_CRITICAL; 532 init.parent_names = emc_parent_clk_names; 533 init.num_parents = ARRAY_SIZE(emc_parent_clk_names); 534 535 tegra->hw.init = &init; 536 537 clk = clk_register(NULL, &tegra->hw); 538 if (IS_ERR(clk)) 539 return clk; 540 541 tegra->prev_parent = clk_hw_get_parent_by_index( 542 &tegra->hw, emc_get_parent(&tegra->hw))->clk; 543 tegra->changing_timing = false; 544 545 /* Allow debugging tools to see the EMC clock */ 546 clk_register_clkdev(clk, "emc", "tegra-clk-debug"); 547 548 return clk; 549 }; 550 551 void tegra124_clk_set_emc_callbacks(tegra124_emc_prepare_timing_change_cb *prep_cb, 552 tegra124_emc_complete_timing_change_cb *complete_cb) 553 { 554 struct clk *clk = __clk_lookup("emc"); 555 struct tegra_clk_emc *tegra; 556 struct clk_hw *hw; 557 558 if (clk) { 559 hw = __clk_get_hw(clk); 560 tegra = container_of(hw, struct tegra_clk_emc, hw); 561 562 tegra->prepare_timing_change = prep_cb; 563 tegra->complete_timing_change = complete_cb; 564 } 565 } 566 EXPORT_SYMBOL_GPL(tegra124_clk_set_emc_callbacks); 567 568 bool tegra124_clk_emc_driver_available(struct clk_hw *hw) 569 { 570 struct tegra_clk_emc *tegra = container_of(hw, struct tegra_clk_emc, hw); 571 572 return tegra->prepare_timing_change && tegra->complete_timing_change; 573 } 574