1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * drivers/clk/tegra/clk-emc.c 4 * 5 * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved. 6 * 7 * Author: 8 * Mikko Perttunen <mperttunen@nvidia.com> 9 */ 10 11 #include <linux/clk-provider.h> 12 #include <linux/clk.h> 13 #include <linux/clkdev.h> 14 #include <linux/delay.h> 15 #include <linux/io.h> 16 #include <linux/module.h> 17 #include <linux/of_address.h> 18 #include <linux/of_platform.h> 19 #include <linux/platform_device.h> 20 #include <linux/sort.h> 21 #include <linux/string.h> 22 23 #include <soc/tegra/fuse.h> 24 #include <soc/tegra/emc.h> 25 26 #include "clk.h" 27 28 #define CLK_SOURCE_EMC 0x19c 29 30 #define CLK_SOURCE_EMC_EMC_2X_CLK_DIVISOR_SHIFT 0 31 #define CLK_SOURCE_EMC_EMC_2X_CLK_DIVISOR_MASK 0xff 32 #define CLK_SOURCE_EMC_EMC_2X_CLK_DIVISOR(x) (((x) & CLK_SOURCE_EMC_EMC_2X_CLK_DIVISOR_MASK) << \ 33 CLK_SOURCE_EMC_EMC_2X_CLK_DIVISOR_SHIFT) 34 35 #define CLK_SOURCE_EMC_EMC_2X_CLK_SRC_SHIFT 29 36 #define CLK_SOURCE_EMC_EMC_2X_CLK_SRC_MASK 0x7 37 #define CLK_SOURCE_EMC_EMC_2X_CLK_SRC(x) (((x) & CLK_SOURCE_EMC_EMC_2X_CLK_SRC_MASK) << \ 38 CLK_SOURCE_EMC_EMC_2X_CLK_SRC_SHIFT) 39 40 static const char * const emc_parent_clk_names[] = { 41 "pll_m", "pll_c", "pll_p", "clk_m", "pll_m_ud", 42 "pll_c2", "pll_c3", "pll_c_ud" 43 }; 44 45 /* 46 * List of clock sources for various parents the EMC clock can have. 47 * When we change the timing to a timing with a parent that has the same 48 * clock source as the current parent, we must first change to a backup 49 * timing that has a different clock source. 50 */ 51 52 #define EMC_SRC_PLL_M 0 53 #define EMC_SRC_PLL_C 1 54 #define EMC_SRC_PLL_P 2 55 #define EMC_SRC_CLK_M 3 56 #define EMC_SRC_PLL_C2 4 57 #define EMC_SRC_PLL_C3 5 58 59 static const char emc_parent_clk_sources[] = { 60 EMC_SRC_PLL_M, EMC_SRC_PLL_C, EMC_SRC_PLL_P, EMC_SRC_CLK_M, 61 EMC_SRC_PLL_M, EMC_SRC_PLL_C2, EMC_SRC_PLL_C3, EMC_SRC_PLL_C 62 }; 63 64 struct emc_timing { 65 unsigned long rate, parent_rate; 66 u8 parent_index; 67 struct clk *parent; 68 u32 ram_code; 69 }; 70 71 struct tegra_clk_emc { 72 struct clk_hw hw; 73 void __iomem *clk_regs; 74 struct clk *prev_parent; 75 bool changing_timing; 76 77 struct device_node *emc_node; 78 struct tegra_emc *emc; 79 80 int num_timings; 81 struct emc_timing *timings; 82 spinlock_t *lock; 83 }; 84 85 /* Common clock framework callback implementations */ 86 87 static unsigned long emc_recalc_rate(struct clk_hw *hw, 88 unsigned long parent_rate) 89 { 90 struct tegra_clk_emc *tegra; 91 u32 val, div; 92 93 tegra = container_of(hw, struct tegra_clk_emc, hw); 94 95 /* 96 * CCF wrongly assumes that the parent won't change during set_rate, 97 * so get the parent rate explicitly. 98 */ 99 parent_rate = clk_hw_get_rate(clk_hw_get_parent(hw)); 100 101 val = readl(tegra->clk_regs + CLK_SOURCE_EMC); 102 div = val & CLK_SOURCE_EMC_EMC_2X_CLK_DIVISOR_MASK; 103 104 return parent_rate / (div + 2) * 2; 105 } 106 107 /* 108 * Rounds up unless no higher rate exists, in which case down. This way is 109 * safer since things have EMC rate floors. Also don't touch parent_rate 110 * since we don't want the CCF to play with our parent clocks. 111 */ 112 static int emc_determine_rate(struct clk_hw *hw, struct clk_rate_request *req) 113 { 114 struct tegra_clk_emc *tegra; 115 u8 ram_code = tegra_read_ram_code(); 116 struct emc_timing *timing = NULL; 117 int i, k, t; 118 119 tegra = container_of(hw, struct tegra_clk_emc, hw); 120 121 for (k = 0; k < tegra->num_timings; k++) { 122 if (tegra->timings[k].ram_code == ram_code) 123 break; 124 } 125 126 for (t = k; t < tegra->num_timings; t++) { 127 if (tegra->timings[t].ram_code != ram_code) 128 break; 129 } 130 131 for (i = k; i < t; i++) { 132 timing = tegra->timings + i; 133 134 if (timing->rate < req->rate && i != t - 1) 135 continue; 136 137 if (timing->rate > req->max_rate) { 138 i = max(i, k + 1); 139 req->rate = tegra->timings[i - 1].rate; 140 return 0; 141 } 142 143 if (timing->rate < req->min_rate) 144 continue; 145 146 req->rate = timing->rate; 147 return 0; 148 } 149 150 if (timing) { 151 req->rate = timing->rate; 152 return 0; 153 } 154 155 req->rate = clk_hw_get_rate(hw); 156 return 0; 157 } 158 159 static u8 emc_get_parent(struct clk_hw *hw) 160 { 161 struct tegra_clk_emc *tegra; 162 u32 val; 163 164 tegra = container_of(hw, struct tegra_clk_emc, hw); 165 166 val = readl(tegra->clk_regs + CLK_SOURCE_EMC); 167 168 return (val >> CLK_SOURCE_EMC_EMC_2X_CLK_SRC_SHIFT) 169 & CLK_SOURCE_EMC_EMC_2X_CLK_SRC_MASK; 170 } 171 172 static struct tegra_emc *emc_ensure_emc_driver(struct tegra_clk_emc *tegra) 173 { 174 struct platform_device *pdev; 175 176 if (tegra->emc) 177 return tegra->emc; 178 179 if (!tegra->emc_node) 180 return NULL; 181 182 pdev = of_find_device_by_node(tegra->emc_node); 183 if (!pdev) { 184 pr_err("%s: could not get external memory controller\n", 185 __func__); 186 return NULL; 187 } 188 189 of_node_put(tegra->emc_node); 190 tegra->emc_node = NULL; 191 192 tegra->emc = platform_get_drvdata(pdev); 193 if (!tegra->emc) { 194 pr_err("%s: cannot find EMC driver\n", __func__); 195 return NULL; 196 } 197 198 return tegra->emc; 199 } 200 201 static int emc_set_timing(struct tegra_clk_emc *tegra, 202 struct emc_timing *timing) 203 { 204 int err; 205 u8 div; 206 u32 car_value; 207 unsigned long flags = 0; 208 struct tegra_emc *emc = emc_ensure_emc_driver(tegra); 209 210 if (!emc) 211 return -ENOENT; 212 213 pr_debug("going to rate %ld prate %ld p %s\n", timing->rate, 214 timing->parent_rate, __clk_get_name(timing->parent)); 215 216 if (emc_get_parent(&tegra->hw) == timing->parent_index && 217 clk_get_rate(timing->parent) != timing->parent_rate) { 218 WARN_ONCE(1, "parent %s rate mismatch %lu %lu\n", 219 __clk_get_name(timing->parent), 220 clk_get_rate(timing->parent), 221 timing->parent_rate); 222 return -EINVAL; 223 } 224 225 tegra->changing_timing = true; 226 227 err = clk_set_rate(timing->parent, timing->parent_rate); 228 if (err) { 229 pr_err("cannot change parent %s rate to %ld: %d\n", 230 __clk_get_name(timing->parent), timing->parent_rate, 231 err); 232 233 return err; 234 } 235 236 err = clk_prepare_enable(timing->parent); 237 if (err) { 238 pr_err("cannot enable parent clock: %d\n", err); 239 return err; 240 } 241 242 div = timing->parent_rate / (timing->rate / 2) - 2; 243 244 err = tegra_emc_prepare_timing_change(emc, timing->rate); 245 if (err) 246 return err; 247 248 spin_lock_irqsave(tegra->lock, flags); 249 250 car_value = readl(tegra->clk_regs + CLK_SOURCE_EMC); 251 252 car_value &= ~CLK_SOURCE_EMC_EMC_2X_CLK_SRC(~0); 253 car_value |= CLK_SOURCE_EMC_EMC_2X_CLK_SRC(timing->parent_index); 254 255 car_value &= ~CLK_SOURCE_EMC_EMC_2X_CLK_DIVISOR(~0); 256 car_value |= CLK_SOURCE_EMC_EMC_2X_CLK_DIVISOR(div); 257 258 writel(car_value, tegra->clk_regs + CLK_SOURCE_EMC); 259 260 spin_unlock_irqrestore(tegra->lock, flags); 261 262 tegra_emc_complete_timing_change(emc, timing->rate); 263 264 clk_hw_reparent(&tegra->hw, __clk_get_hw(timing->parent)); 265 clk_disable_unprepare(tegra->prev_parent); 266 267 tegra->prev_parent = timing->parent; 268 tegra->changing_timing = false; 269 270 return 0; 271 } 272 273 /* 274 * Get backup timing to use as an intermediate step when a change between 275 * two timings with the same clock source has been requested. First try to 276 * find a timing with a higher clock rate to avoid a rate below any set rate 277 * floors. If that is not possible, find a lower rate. 278 */ 279 static struct emc_timing *get_backup_timing(struct tegra_clk_emc *tegra, 280 int timing_index) 281 { 282 int i; 283 u32 ram_code = tegra_read_ram_code(); 284 struct emc_timing *timing; 285 286 for (i = timing_index+1; i < tegra->num_timings; i++) { 287 timing = tegra->timings + i; 288 if (timing->ram_code != ram_code) 289 break; 290 291 if (emc_parent_clk_sources[timing->parent_index] != 292 emc_parent_clk_sources[ 293 tegra->timings[timing_index].parent_index]) 294 return timing; 295 } 296 297 for (i = timing_index-1; i >= 0; --i) { 298 timing = tegra->timings + i; 299 if (timing->ram_code != ram_code) 300 break; 301 302 if (emc_parent_clk_sources[timing->parent_index] != 303 emc_parent_clk_sources[ 304 tegra->timings[timing_index].parent_index]) 305 return timing; 306 } 307 308 return NULL; 309 } 310 311 static int emc_set_rate(struct clk_hw *hw, unsigned long rate, 312 unsigned long parent_rate) 313 { 314 struct tegra_clk_emc *tegra; 315 struct emc_timing *timing = NULL; 316 int i, err; 317 u32 ram_code = tegra_read_ram_code(); 318 319 tegra = container_of(hw, struct tegra_clk_emc, hw); 320 321 if (clk_hw_get_rate(hw) == rate) 322 return 0; 323 324 /* 325 * When emc_set_timing changes the parent rate, CCF will propagate 326 * that downward to us, so ignore any set_rate calls while a rate 327 * change is already going on. 328 */ 329 if (tegra->changing_timing) 330 return 0; 331 332 for (i = 0; i < tegra->num_timings; i++) { 333 if (tegra->timings[i].rate == rate && 334 tegra->timings[i].ram_code == ram_code) { 335 timing = tegra->timings + i; 336 break; 337 } 338 } 339 340 if (!timing) { 341 pr_err("cannot switch to rate %ld without emc table\n", rate); 342 return -EINVAL; 343 } 344 345 if (emc_parent_clk_sources[emc_get_parent(hw)] == 346 emc_parent_clk_sources[timing->parent_index] && 347 clk_get_rate(timing->parent) != timing->parent_rate) { 348 /* 349 * Parent clock source not changed but parent rate has changed, 350 * need to temporarily switch to another parent 351 */ 352 353 struct emc_timing *backup_timing; 354 355 backup_timing = get_backup_timing(tegra, i); 356 if (!backup_timing) { 357 pr_err("cannot find backup timing\n"); 358 return -EINVAL; 359 } 360 361 pr_debug("using %ld as backup rate when going to %ld\n", 362 backup_timing->rate, rate); 363 364 err = emc_set_timing(tegra, backup_timing); 365 if (err) { 366 pr_err("cannot set backup timing: %d\n", err); 367 return err; 368 } 369 } 370 371 return emc_set_timing(tegra, timing); 372 } 373 374 /* Initialization and deinitialization */ 375 376 static int load_one_timing_from_dt(struct tegra_clk_emc *tegra, 377 struct emc_timing *timing, 378 struct device_node *node) 379 { 380 int err, i; 381 u32 tmp; 382 383 err = of_property_read_u32(node, "clock-frequency", &tmp); 384 if (err) { 385 pr_err("timing %pOF: failed to read rate\n", node); 386 return err; 387 } 388 389 timing->rate = tmp; 390 391 err = of_property_read_u32(node, "nvidia,parent-clock-frequency", &tmp); 392 if (err) { 393 pr_err("timing %pOF: failed to read parent rate\n", node); 394 return err; 395 } 396 397 timing->parent_rate = tmp; 398 399 timing->parent = of_clk_get_by_name(node, "emc-parent"); 400 if (IS_ERR(timing->parent)) { 401 pr_err("timing %pOF: failed to get parent clock\n", node); 402 return PTR_ERR(timing->parent); 403 } 404 405 timing->parent_index = 0xff; 406 i = match_string(emc_parent_clk_names, ARRAY_SIZE(emc_parent_clk_names), 407 __clk_get_name(timing->parent)); 408 if (i < 0) { 409 pr_err("timing %pOF: %s is not a valid parent\n", 410 node, __clk_get_name(timing->parent)); 411 clk_put(timing->parent); 412 return -EINVAL; 413 } 414 415 timing->parent_index = i; 416 return 0; 417 } 418 419 static int cmp_timings(const void *_a, const void *_b) 420 { 421 const struct emc_timing *a = _a; 422 const struct emc_timing *b = _b; 423 424 if (a->rate < b->rate) 425 return -1; 426 else if (a->rate == b->rate) 427 return 0; 428 else 429 return 1; 430 } 431 432 static int load_timings_from_dt(struct tegra_clk_emc *tegra, 433 struct device_node *node, 434 u32 ram_code) 435 { 436 struct emc_timing *timings_ptr; 437 struct device_node *child; 438 int child_count = of_get_child_count(node); 439 int i = 0, err; 440 size_t size; 441 442 size = (tegra->num_timings + child_count) * sizeof(struct emc_timing); 443 444 tegra->timings = krealloc(tegra->timings, size, GFP_KERNEL); 445 if (!tegra->timings) 446 return -ENOMEM; 447 448 timings_ptr = tegra->timings + tegra->num_timings; 449 tegra->num_timings += child_count; 450 451 for_each_child_of_node(node, child) { 452 struct emc_timing *timing = timings_ptr + (i++); 453 454 err = load_one_timing_from_dt(tegra, timing, child); 455 if (err) { 456 of_node_put(child); 457 return err; 458 } 459 460 timing->ram_code = ram_code; 461 } 462 463 sort(timings_ptr, child_count, sizeof(struct emc_timing), 464 cmp_timings, NULL); 465 466 return 0; 467 } 468 469 static const struct clk_ops tegra_clk_emc_ops = { 470 .recalc_rate = emc_recalc_rate, 471 .determine_rate = emc_determine_rate, 472 .set_rate = emc_set_rate, 473 .get_parent = emc_get_parent, 474 }; 475 476 struct clk *tegra_clk_register_emc(void __iomem *base, struct device_node *np, 477 spinlock_t *lock) 478 { 479 struct tegra_clk_emc *tegra; 480 struct clk_init_data init; 481 struct device_node *node; 482 u32 node_ram_code; 483 struct clk *clk; 484 int err; 485 486 tegra = kcalloc(1, sizeof(*tegra), GFP_KERNEL); 487 if (!tegra) 488 return ERR_PTR(-ENOMEM); 489 490 tegra->clk_regs = base; 491 tegra->lock = lock; 492 493 tegra->num_timings = 0; 494 495 for_each_child_of_node(np, node) { 496 err = of_property_read_u32(node, "nvidia,ram-code", 497 &node_ram_code); 498 if (err) 499 continue; 500 501 /* 502 * Store timings for all ram codes as we cannot read the 503 * fuses until the apbmisc driver is loaded. 504 */ 505 err = load_timings_from_dt(tegra, node, node_ram_code); 506 if (err) { 507 of_node_put(node); 508 return ERR_PTR(err); 509 } 510 } 511 512 if (tegra->num_timings == 0) 513 pr_warn("%s: no memory timings registered\n", __func__); 514 515 tegra->emc_node = of_parse_phandle(np, 516 "nvidia,external-memory-controller", 0); 517 if (!tegra->emc_node) 518 pr_warn("%s: couldn't find node for EMC driver\n", __func__); 519 520 init.name = "emc"; 521 init.ops = &tegra_clk_emc_ops; 522 init.flags = CLK_IS_CRITICAL; 523 init.parent_names = emc_parent_clk_names; 524 init.num_parents = ARRAY_SIZE(emc_parent_clk_names); 525 526 tegra->hw.init = &init; 527 528 clk = clk_register(NULL, &tegra->hw); 529 if (IS_ERR(clk)) 530 return clk; 531 532 tegra->prev_parent = clk_hw_get_parent_by_index( 533 &tegra->hw, emc_get_parent(&tegra->hw))->clk; 534 tegra->changing_timing = false; 535 536 /* Allow debugging tools to see the EMC clock */ 537 clk_register_clkdev(clk, "emc", "tegra-clk-debug"); 538 539 return clk; 540 }; 541