1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright 2013 Freescale Semiconductor, Inc. 4 * 5 * clock driver for Freescale QorIQ SoCs. 6 */ 7 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10 #include <linux/clk.h> 11 #include <linux/clk-provider.h> 12 #include <linux/clkdev.h> 13 #include <linux/fsl/guts.h> 14 #include <linux/io.h> 15 #include <linux/kernel.h> 16 #include <linux/module.h> 17 #include <linux/of_address.h> 18 #include <linux/of_platform.h> 19 #include <linux/of.h> 20 #include <linux/slab.h> 21 22 #define PLL_DIV1 0 23 #define PLL_DIV2 1 24 #define PLL_DIV3 2 25 #define PLL_DIV4 3 26 27 #define PLATFORM_PLL 0 28 #define CGA_PLL1 1 29 #define CGA_PLL2 2 30 #define CGA_PLL3 3 31 #define CGA_PLL4 4 /* only on clockgen-1.0, which lacks CGB */ 32 #define CGB_PLL1 4 33 #define CGB_PLL2 5 34 #define MAX_PLL_DIV 16 35 36 struct clockgen_pll_div { 37 struct clk *clk; 38 char name[32]; 39 }; 40 41 struct clockgen_pll { 42 struct clockgen_pll_div div[MAX_PLL_DIV]; 43 }; 44 45 #define CLKSEL_VALID 1 46 #define CLKSEL_80PCT 2 /* Only allowed if PLL <= 80% of max cpu freq */ 47 48 struct clockgen_sourceinfo { 49 u32 flags; /* CLKSEL_xxx */ 50 int pll; /* CGx_PLLn */ 51 int div; /* PLL_DIVn */ 52 }; 53 54 #define NUM_MUX_PARENTS 16 55 56 struct clockgen_muxinfo { 57 struct clockgen_sourceinfo clksel[NUM_MUX_PARENTS]; 58 }; 59 60 #define NUM_HWACCEL 5 61 #define NUM_CMUX 8 62 63 struct clockgen; 64 65 /* 66 * cmux freq must be >= platform pll. 67 * If not set, cmux freq must be >= platform pll/2 68 */ 69 #define CG_CMUX_GE_PLAT 1 70 71 #define CG_PLL_8BIT 2 /* PLLCnGSR[CFG] is 8 bits, not 6 */ 72 #define CG_VER3 4 /* version 3 cg: reg layout different */ 73 #define CG_LITTLE_ENDIAN 8 74 75 struct clockgen_chipinfo { 76 const char *compat, *guts_compat; 77 const struct clockgen_muxinfo *cmux_groups[2]; 78 const struct clockgen_muxinfo *hwaccel[NUM_HWACCEL]; 79 void (*init_periph)(struct clockgen *cg); 80 int cmux_to_group[NUM_CMUX + 1]; /* array should be -1 terminated */ 81 u32 pll_mask; /* 1 << n bit set if PLL n is valid */ 82 u32 flags; /* CG_xxx */ 83 }; 84 85 struct clockgen { 86 struct device_node *node; 87 void __iomem *regs; 88 struct clockgen_chipinfo info; /* mutable copy */ 89 struct clk *sysclk, *coreclk; 90 struct clockgen_pll pll[6]; 91 struct clk *cmux[NUM_CMUX]; 92 struct clk *hwaccel[NUM_HWACCEL]; 93 struct clk *fman[2]; 94 struct ccsr_guts __iomem *guts; 95 }; 96 97 static struct clockgen clockgen; 98 99 static void cg_out(struct clockgen *cg, u32 val, u32 __iomem *reg) 100 { 101 if (cg->info.flags & CG_LITTLE_ENDIAN) 102 iowrite32(val, reg); 103 else 104 iowrite32be(val, reg); 105 } 106 107 static u32 cg_in(struct clockgen *cg, u32 __iomem *reg) 108 { 109 u32 val; 110 111 if (cg->info.flags & CG_LITTLE_ENDIAN) 112 val = ioread32(reg); 113 else 114 val = ioread32be(reg); 115 116 return val; 117 } 118 119 static const struct clockgen_muxinfo p2041_cmux_grp1 = { 120 { 121 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, 122 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, 123 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, 124 } 125 }; 126 127 static const struct clockgen_muxinfo p2041_cmux_grp2 = { 128 { 129 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, 130 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, 131 [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, 132 } 133 }; 134 135 static const struct clockgen_muxinfo p5020_cmux_grp1 = { 136 { 137 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, 138 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, 139 [4] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL2, PLL_DIV1 }, 140 } 141 }; 142 143 static const struct clockgen_muxinfo p5020_cmux_grp2 = { 144 { 145 [0] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV1 }, 146 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, 147 [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, 148 } 149 }; 150 151 static const struct clockgen_muxinfo p5040_cmux_grp1 = { 152 { 153 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, 154 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, 155 [4] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL2, PLL_DIV1 }, 156 [5] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL2, PLL_DIV2 }, 157 } 158 }; 159 160 static const struct clockgen_muxinfo p5040_cmux_grp2 = { 161 { 162 [0] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV1 }, 163 [1] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV2 }, 164 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, 165 [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, 166 } 167 }; 168 169 static const struct clockgen_muxinfo p4080_cmux_grp1 = { 170 { 171 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, 172 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, 173 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, 174 [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, 175 [8] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL3, PLL_DIV1 }, 176 } 177 }; 178 179 static const struct clockgen_muxinfo p4080_cmux_grp2 = { 180 { 181 [0] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV1 }, 182 [8] = { CLKSEL_VALID, CGA_PLL3, PLL_DIV1 }, 183 [9] = { CLKSEL_VALID, CGA_PLL3, PLL_DIV2 }, 184 [12] = { CLKSEL_VALID, CGA_PLL4, PLL_DIV1 }, 185 [13] = { CLKSEL_VALID, CGA_PLL4, PLL_DIV2 }, 186 } 187 }; 188 189 static const struct clockgen_muxinfo t1023_cmux = { 190 { 191 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, 192 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, 193 } 194 }; 195 196 static const struct clockgen_muxinfo t1040_cmux = { 197 { 198 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, 199 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, 200 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, 201 [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, 202 } 203 }; 204 205 206 static const struct clockgen_muxinfo clockgen2_cmux_cga = { 207 { 208 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, 209 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, 210 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 }, 211 {}, 212 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, 213 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, 214 { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 }, 215 {}, 216 { CLKSEL_VALID, CGA_PLL3, PLL_DIV1 }, 217 { CLKSEL_VALID, CGA_PLL3, PLL_DIV2 }, 218 { CLKSEL_VALID, CGA_PLL3, PLL_DIV4 }, 219 }, 220 }; 221 222 static const struct clockgen_muxinfo clockgen2_cmux_cga12 = { 223 { 224 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, 225 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, 226 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 }, 227 {}, 228 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, 229 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, 230 { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 }, 231 }, 232 }; 233 234 static const struct clockgen_muxinfo clockgen2_cmux_cgb = { 235 { 236 { CLKSEL_VALID, CGB_PLL1, PLL_DIV1 }, 237 { CLKSEL_VALID, CGB_PLL1, PLL_DIV2 }, 238 { CLKSEL_VALID, CGB_PLL1, PLL_DIV4 }, 239 {}, 240 { CLKSEL_VALID, CGB_PLL2, PLL_DIV1 }, 241 { CLKSEL_VALID, CGB_PLL2, PLL_DIV2 }, 242 { CLKSEL_VALID, CGB_PLL2, PLL_DIV4 }, 243 }, 244 }; 245 246 static const struct clockgen_muxinfo ls1028a_hwa1 = { 247 { 248 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 }, 249 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, 250 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, 251 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 }, 252 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 }, 253 {}, 254 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, 255 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 }, 256 }, 257 }; 258 259 static const struct clockgen_muxinfo ls1028a_hwa2 = { 260 { 261 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 }, 262 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, 263 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, 264 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 }, 265 { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 }, 266 {}, 267 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, 268 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 }, 269 }, 270 }; 271 272 static const struct clockgen_muxinfo ls1028a_hwa3 = { 273 { 274 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 }, 275 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, 276 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, 277 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 }, 278 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 }, 279 {}, 280 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, 281 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 }, 282 }, 283 }; 284 285 static const struct clockgen_muxinfo ls1028a_hwa4 = { 286 { 287 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 }, 288 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, 289 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, 290 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 }, 291 { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 }, 292 {}, 293 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, 294 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 }, 295 }, 296 }; 297 298 static const struct clockgen_muxinfo ls1043a_hwa1 = { 299 { 300 {}, 301 {}, 302 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, 303 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 }, 304 {}, 305 {}, 306 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, 307 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 }, 308 }, 309 }; 310 311 static const struct clockgen_muxinfo ls1043a_hwa2 = { 312 { 313 {}, 314 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, 315 {}, 316 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 }, 317 }, 318 }; 319 320 static const struct clockgen_muxinfo ls1046a_hwa1 = { 321 { 322 {}, 323 {}, 324 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, 325 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 }, 326 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 }, 327 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 }, 328 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, 329 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 }, 330 }, 331 }; 332 333 static const struct clockgen_muxinfo ls1046a_hwa2 = { 334 { 335 {}, 336 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, 337 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, 338 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 }, 339 {}, 340 {}, 341 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, 342 }, 343 }; 344 345 static const struct clockgen_muxinfo ls1088a_hwa1 = { 346 { 347 {}, 348 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, 349 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, 350 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 }, 351 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 }, 352 {}, 353 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, 354 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 }, 355 }, 356 }; 357 358 static const struct clockgen_muxinfo ls1088a_hwa2 = { 359 { 360 {}, 361 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, 362 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, 363 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 }, 364 { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 }, 365 {}, 366 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, 367 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 }, 368 }, 369 }; 370 371 static const struct clockgen_muxinfo ls1012a_cmux = { 372 { 373 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, 374 {}, 375 [2] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, 376 } 377 }; 378 379 static const struct clockgen_muxinfo t1023_hwa1 = { 380 { 381 {}, 382 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, 383 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, 384 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 }, 385 }, 386 }; 387 388 static const struct clockgen_muxinfo t1023_hwa2 = { 389 { 390 [6] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, 391 }, 392 }; 393 394 static const struct clockgen_muxinfo t2080_hwa1 = { 395 { 396 {}, 397 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, 398 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, 399 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 }, 400 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 }, 401 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 }, 402 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, 403 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 }, 404 }, 405 }; 406 407 static const struct clockgen_muxinfo t2080_hwa2 = { 408 { 409 {}, 410 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, 411 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, 412 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 }, 413 { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 }, 414 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 }, 415 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, 416 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 }, 417 }, 418 }; 419 420 static const struct clockgen_muxinfo t4240_hwa1 = { 421 { 422 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV2 }, 423 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, 424 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, 425 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 }, 426 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 }, 427 {}, 428 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, 429 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 }, 430 }, 431 }; 432 433 static const struct clockgen_muxinfo t4240_hwa4 = { 434 { 435 [2] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV2 }, 436 [3] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV3 }, 437 [4] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV4 }, 438 [5] = { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 }, 439 [6] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV2 }, 440 }, 441 }; 442 443 static const struct clockgen_muxinfo t4240_hwa5 = { 444 { 445 [2] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV2 }, 446 [3] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV3 }, 447 [4] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV4 }, 448 [5] = { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 }, 449 [6] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV2 }, 450 [7] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV3 }, 451 }, 452 }; 453 454 #define RCWSR7_FM1_CLK_SEL 0x40000000 455 #define RCWSR7_FM2_CLK_SEL 0x20000000 456 #define RCWSR7_HWA_ASYNC_DIV 0x04000000 457 458 static void __init p2041_init_periph(struct clockgen *cg) 459 { 460 u32 reg; 461 462 reg = ioread32be(&cg->guts->rcwsr[7]); 463 464 if (reg & RCWSR7_FM1_CLK_SEL) 465 cg->fman[0] = cg->pll[CGA_PLL2].div[PLL_DIV2].clk; 466 else 467 cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk; 468 } 469 470 static void __init p4080_init_periph(struct clockgen *cg) 471 { 472 u32 reg; 473 474 reg = ioread32be(&cg->guts->rcwsr[7]); 475 476 if (reg & RCWSR7_FM1_CLK_SEL) 477 cg->fman[0] = cg->pll[CGA_PLL3].div[PLL_DIV2].clk; 478 else 479 cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk; 480 481 if (reg & RCWSR7_FM2_CLK_SEL) 482 cg->fman[1] = cg->pll[CGA_PLL3].div[PLL_DIV2].clk; 483 else 484 cg->fman[1] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk; 485 } 486 487 static void __init p5020_init_periph(struct clockgen *cg) 488 { 489 u32 reg; 490 int div = PLL_DIV2; 491 492 reg = ioread32be(&cg->guts->rcwsr[7]); 493 if (reg & RCWSR7_HWA_ASYNC_DIV) 494 div = PLL_DIV4; 495 496 if (reg & RCWSR7_FM1_CLK_SEL) 497 cg->fman[0] = cg->pll[CGA_PLL2].div[div].clk; 498 else 499 cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk; 500 } 501 502 static void __init p5040_init_periph(struct clockgen *cg) 503 { 504 u32 reg; 505 int div = PLL_DIV2; 506 507 reg = ioread32be(&cg->guts->rcwsr[7]); 508 if (reg & RCWSR7_HWA_ASYNC_DIV) 509 div = PLL_DIV4; 510 511 if (reg & RCWSR7_FM1_CLK_SEL) 512 cg->fman[0] = cg->pll[CGA_PLL3].div[div].clk; 513 else 514 cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk; 515 516 if (reg & RCWSR7_FM2_CLK_SEL) 517 cg->fman[1] = cg->pll[CGA_PLL3].div[div].clk; 518 else 519 cg->fman[1] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk; 520 } 521 522 static void __init t1023_init_periph(struct clockgen *cg) 523 { 524 cg->fman[0] = cg->hwaccel[1]; 525 } 526 527 static void __init t1040_init_periph(struct clockgen *cg) 528 { 529 cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV1].clk; 530 } 531 532 static void __init t2080_init_periph(struct clockgen *cg) 533 { 534 cg->fman[0] = cg->hwaccel[0]; 535 } 536 537 static void __init t4240_init_periph(struct clockgen *cg) 538 { 539 cg->fman[0] = cg->hwaccel[3]; 540 cg->fman[1] = cg->hwaccel[4]; 541 } 542 543 static const struct clockgen_chipinfo chipinfo[] = { 544 { 545 .compat = "fsl,b4420-clockgen", 546 .guts_compat = "fsl,b4860-device-config", 547 .init_periph = t2080_init_periph, 548 .cmux_groups = { 549 &clockgen2_cmux_cga12, &clockgen2_cmux_cgb 550 }, 551 .hwaccel = { 552 &t2080_hwa1 553 }, 554 .cmux_to_group = { 555 0, 1, 1, 1, -1 556 }, 557 .pll_mask = 0x3f, 558 .flags = CG_PLL_8BIT, 559 }, 560 { 561 .compat = "fsl,b4860-clockgen", 562 .guts_compat = "fsl,b4860-device-config", 563 .init_periph = t2080_init_periph, 564 .cmux_groups = { 565 &clockgen2_cmux_cga12, &clockgen2_cmux_cgb 566 }, 567 .hwaccel = { 568 &t2080_hwa1 569 }, 570 .cmux_to_group = { 571 0, 1, 1, 1, -1 572 }, 573 .pll_mask = 0x3f, 574 .flags = CG_PLL_8BIT, 575 }, 576 { 577 .compat = "fsl,ls1021a-clockgen", 578 .cmux_groups = { 579 &t1023_cmux 580 }, 581 .cmux_to_group = { 582 0, -1 583 }, 584 .pll_mask = 0x03, 585 }, 586 { 587 .compat = "fsl,ls1028a-clockgen", 588 .cmux_groups = { 589 &clockgen2_cmux_cga12 590 }, 591 .hwaccel = { 592 &ls1028a_hwa1, &ls1028a_hwa2, 593 &ls1028a_hwa3, &ls1028a_hwa4 594 }, 595 .cmux_to_group = { 596 0, 0, 0, 0, -1 597 }, 598 .pll_mask = 0x07, 599 .flags = CG_VER3 | CG_LITTLE_ENDIAN, 600 }, 601 { 602 .compat = "fsl,ls1043a-clockgen", 603 .init_periph = t2080_init_periph, 604 .cmux_groups = { 605 &t1040_cmux 606 }, 607 .hwaccel = { 608 &ls1043a_hwa1, &ls1043a_hwa2 609 }, 610 .cmux_to_group = { 611 0, -1 612 }, 613 .pll_mask = 0x07, 614 .flags = CG_PLL_8BIT, 615 }, 616 { 617 .compat = "fsl,ls1046a-clockgen", 618 .init_periph = t2080_init_periph, 619 .cmux_groups = { 620 &t1040_cmux 621 }, 622 .hwaccel = { 623 &ls1046a_hwa1, &ls1046a_hwa2 624 }, 625 .cmux_to_group = { 626 0, -1 627 }, 628 .pll_mask = 0x07, 629 .flags = CG_PLL_8BIT, 630 }, 631 { 632 .compat = "fsl,ls1088a-clockgen", 633 .cmux_groups = { 634 &clockgen2_cmux_cga12 635 }, 636 .hwaccel = { 637 &ls1088a_hwa1, &ls1088a_hwa2 638 }, 639 .cmux_to_group = { 640 0, 0, -1 641 }, 642 .pll_mask = 0x07, 643 .flags = CG_VER3 | CG_LITTLE_ENDIAN, 644 }, 645 { 646 .compat = "fsl,ls1012a-clockgen", 647 .cmux_groups = { 648 &ls1012a_cmux 649 }, 650 .cmux_to_group = { 651 0, -1 652 }, 653 .pll_mask = 0x03, 654 }, 655 { 656 .compat = "fsl,ls2080a-clockgen", 657 .cmux_groups = { 658 &clockgen2_cmux_cga12, &clockgen2_cmux_cgb 659 }, 660 .cmux_to_group = { 661 0, 0, 1, 1, -1 662 }, 663 .pll_mask = 0x37, 664 .flags = CG_VER3 | CG_LITTLE_ENDIAN, 665 }, 666 { 667 .compat = "fsl,lx2160a-clockgen", 668 .cmux_groups = { 669 &clockgen2_cmux_cga12, &clockgen2_cmux_cgb 670 }, 671 .cmux_to_group = { 672 0, 0, 0, 0, 1, 1, 1, 1, -1 673 }, 674 .pll_mask = 0x37, 675 .flags = CG_VER3 | CG_LITTLE_ENDIAN, 676 }, 677 { 678 .compat = "fsl,p2041-clockgen", 679 .guts_compat = "fsl,qoriq-device-config-1.0", 680 .init_periph = p2041_init_periph, 681 .cmux_groups = { 682 &p2041_cmux_grp1, &p2041_cmux_grp2 683 }, 684 .cmux_to_group = { 685 0, 0, 1, 1, -1 686 }, 687 .pll_mask = 0x07, 688 }, 689 { 690 .compat = "fsl,p3041-clockgen", 691 .guts_compat = "fsl,qoriq-device-config-1.0", 692 .init_periph = p2041_init_periph, 693 .cmux_groups = { 694 &p2041_cmux_grp1, &p2041_cmux_grp2 695 }, 696 .cmux_to_group = { 697 0, 0, 1, 1, -1 698 }, 699 .pll_mask = 0x07, 700 }, 701 { 702 .compat = "fsl,p4080-clockgen", 703 .guts_compat = "fsl,qoriq-device-config-1.0", 704 .init_periph = p4080_init_periph, 705 .cmux_groups = { 706 &p4080_cmux_grp1, &p4080_cmux_grp2 707 }, 708 .cmux_to_group = { 709 0, 0, 0, 0, 1, 1, 1, 1, -1 710 }, 711 .pll_mask = 0x1f, 712 }, 713 { 714 .compat = "fsl,p5020-clockgen", 715 .guts_compat = "fsl,qoriq-device-config-1.0", 716 .init_periph = p5020_init_periph, 717 .cmux_groups = { 718 &p5020_cmux_grp1, &p5020_cmux_grp2 719 }, 720 .cmux_to_group = { 721 0, 1, -1 722 }, 723 .pll_mask = 0x07, 724 }, 725 { 726 .compat = "fsl,p5040-clockgen", 727 .guts_compat = "fsl,p5040-device-config", 728 .init_periph = p5040_init_periph, 729 .cmux_groups = { 730 &p5040_cmux_grp1, &p5040_cmux_grp2 731 }, 732 .cmux_to_group = { 733 0, 0, 1, 1, -1 734 }, 735 .pll_mask = 0x0f, 736 }, 737 { 738 .compat = "fsl,t1023-clockgen", 739 .guts_compat = "fsl,t1023-device-config", 740 .init_periph = t1023_init_periph, 741 .cmux_groups = { 742 &t1023_cmux 743 }, 744 .hwaccel = { 745 &t1023_hwa1, &t1023_hwa2 746 }, 747 .cmux_to_group = { 748 0, 0, -1 749 }, 750 .pll_mask = 0x03, 751 .flags = CG_PLL_8BIT, 752 }, 753 { 754 .compat = "fsl,t1040-clockgen", 755 .guts_compat = "fsl,t1040-device-config", 756 .init_periph = t1040_init_periph, 757 .cmux_groups = { 758 &t1040_cmux 759 }, 760 .cmux_to_group = { 761 0, 0, 0, 0, -1 762 }, 763 .pll_mask = 0x07, 764 .flags = CG_PLL_8BIT, 765 }, 766 { 767 .compat = "fsl,t2080-clockgen", 768 .guts_compat = "fsl,t2080-device-config", 769 .init_periph = t2080_init_periph, 770 .cmux_groups = { 771 &clockgen2_cmux_cga12 772 }, 773 .hwaccel = { 774 &t2080_hwa1, &t2080_hwa2 775 }, 776 .cmux_to_group = { 777 0, -1 778 }, 779 .pll_mask = 0x07, 780 .flags = CG_PLL_8BIT, 781 }, 782 { 783 .compat = "fsl,t4240-clockgen", 784 .guts_compat = "fsl,t4240-device-config", 785 .init_periph = t4240_init_periph, 786 .cmux_groups = { 787 &clockgen2_cmux_cga, &clockgen2_cmux_cgb 788 }, 789 .hwaccel = { 790 &t4240_hwa1, NULL, NULL, &t4240_hwa4, &t4240_hwa5 791 }, 792 .cmux_to_group = { 793 0, 0, 1, -1 794 }, 795 .pll_mask = 0x3f, 796 .flags = CG_PLL_8BIT, 797 }, 798 {}, 799 }; 800 801 struct mux_hwclock { 802 struct clk_hw hw; 803 struct clockgen *cg; 804 const struct clockgen_muxinfo *info; 805 u32 __iomem *reg; 806 u8 parent_to_clksel[NUM_MUX_PARENTS]; 807 s8 clksel_to_parent[NUM_MUX_PARENTS]; 808 int num_parents; 809 }; 810 811 #define to_mux_hwclock(p) container_of(p, struct mux_hwclock, hw) 812 #define CLKSEL_MASK 0x78000000 813 #define CLKSEL_SHIFT 27 814 815 static int mux_set_parent(struct clk_hw *hw, u8 idx) 816 { 817 struct mux_hwclock *hwc = to_mux_hwclock(hw); 818 u32 clksel; 819 820 if (idx >= hwc->num_parents) 821 return -EINVAL; 822 823 clksel = hwc->parent_to_clksel[idx]; 824 cg_out(hwc->cg, (clksel << CLKSEL_SHIFT) & CLKSEL_MASK, hwc->reg); 825 826 return 0; 827 } 828 829 static u8 mux_get_parent(struct clk_hw *hw) 830 { 831 struct mux_hwclock *hwc = to_mux_hwclock(hw); 832 u32 clksel; 833 s8 ret; 834 835 clksel = (cg_in(hwc->cg, hwc->reg) & CLKSEL_MASK) >> CLKSEL_SHIFT; 836 837 ret = hwc->clksel_to_parent[clksel]; 838 if (ret < 0) { 839 pr_err("%s: mux at %p has bad clksel\n", __func__, hwc->reg); 840 return 0; 841 } 842 843 return ret; 844 } 845 846 static const struct clk_ops cmux_ops = { 847 .get_parent = mux_get_parent, 848 .set_parent = mux_set_parent, 849 }; 850 851 /* 852 * Don't allow setting for now, as the clock options haven't been 853 * sanitized for additional restrictions. 854 */ 855 static const struct clk_ops hwaccel_ops = { 856 .get_parent = mux_get_parent, 857 }; 858 859 static const struct clockgen_pll_div *get_pll_div(struct clockgen *cg, 860 struct mux_hwclock *hwc, 861 int idx) 862 { 863 int pll, div; 864 865 if (!(hwc->info->clksel[idx].flags & CLKSEL_VALID)) 866 return NULL; 867 868 pll = hwc->info->clksel[idx].pll; 869 div = hwc->info->clksel[idx].div; 870 871 return &cg->pll[pll].div[div]; 872 } 873 874 static struct clk * __init create_mux_common(struct clockgen *cg, 875 struct mux_hwclock *hwc, 876 const struct clk_ops *ops, 877 unsigned long min_rate, 878 unsigned long max_rate, 879 unsigned long pct80_rate, 880 const char *fmt, int idx) 881 { 882 struct clk_init_data init = {}; 883 struct clk *clk; 884 const struct clockgen_pll_div *div; 885 const char *parent_names[NUM_MUX_PARENTS]; 886 char name[32]; 887 int i, j; 888 889 snprintf(name, sizeof(name), fmt, idx); 890 891 for (i = 0, j = 0; i < NUM_MUX_PARENTS; i++) { 892 unsigned long rate; 893 894 hwc->clksel_to_parent[i] = -1; 895 896 div = get_pll_div(cg, hwc, i); 897 if (!div) 898 continue; 899 900 rate = clk_get_rate(div->clk); 901 902 if (hwc->info->clksel[i].flags & CLKSEL_80PCT && 903 rate > pct80_rate) 904 continue; 905 if (rate < min_rate) 906 continue; 907 if (rate > max_rate) 908 continue; 909 910 parent_names[j] = div->name; 911 hwc->parent_to_clksel[j] = i; 912 hwc->clksel_to_parent[i] = j; 913 j++; 914 } 915 916 init.name = name; 917 init.ops = ops; 918 init.parent_names = parent_names; 919 init.num_parents = hwc->num_parents = j; 920 init.flags = 0; 921 hwc->hw.init = &init; 922 hwc->cg = cg; 923 924 clk = clk_register(NULL, &hwc->hw); 925 if (IS_ERR(clk)) { 926 pr_err("%s: Couldn't register %s: %ld\n", __func__, name, 927 PTR_ERR(clk)); 928 kfree(hwc); 929 return NULL; 930 } 931 932 return clk; 933 } 934 935 static struct clk * __init create_one_cmux(struct clockgen *cg, int idx) 936 { 937 struct mux_hwclock *hwc; 938 const struct clockgen_pll_div *div; 939 unsigned long plat_rate, min_rate; 940 u64 max_rate, pct80_rate; 941 u32 clksel; 942 943 hwc = kzalloc(sizeof(*hwc), GFP_KERNEL); 944 if (!hwc) 945 return NULL; 946 947 if (cg->info.flags & CG_VER3) 948 hwc->reg = cg->regs + 0x70000 + 0x20 * idx; 949 else 950 hwc->reg = cg->regs + 0x20 * idx; 951 952 hwc->info = cg->info.cmux_groups[cg->info.cmux_to_group[idx]]; 953 954 /* 955 * Find the rate for the default clksel, and treat it as the 956 * maximum rated core frequency. If this is an incorrect 957 * assumption, certain clock options (possibly including the 958 * default clksel) may be inappropriately excluded on certain 959 * chips. 960 */ 961 clksel = (cg_in(cg, hwc->reg) & CLKSEL_MASK) >> CLKSEL_SHIFT; 962 div = get_pll_div(cg, hwc, clksel); 963 if (!div) { 964 kfree(hwc); 965 return NULL; 966 } 967 968 max_rate = clk_get_rate(div->clk); 969 pct80_rate = max_rate * 8; 970 do_div(pct80_rate, 10); 971 972 plat_rate = clk_get_rate(cg->pll[PLATFORM_PLL].div[PLL_DIV1].clk); 973 974 if (cg->info.flags & CG_CMUX_GE_PLAT) 975 min_rate = plat_rate; 976 else 977 min_rate = plat_rate / 2; 978 979 return create_mux_common(cg, hwc, &cmux_ops, min_rate, max_rate, 980 pct80_rate, "cg-cmux%d", idx); 981 } 982 983 static struct clk * __init create_one_hwaccel(struct clockgen *cg, int idx) 984 { 985 struct mux_hwclock *hwc; 986 987 hwc = kzalloc(sizeof(*hwc), GFP_KERNEL); 988 if (!hwc) 989 return NULL; 990 991 hwc->reg = cg->regs + 0x20 * idx + 0x10; 992 hwc->info = cg->info.hwaccel[idx]; 993 994 return create_mux_common(cg, hwc, &hwaccel_ops, 0, ULONG_MAX, 0, 995 "cg-hwaccel%d", idx); 996 } 997 998 static void __init create_muxes(struct clockgen *cg) 999 { 1000 int i; 1001 1002 for (i = 0; i < ARRAY_SIZE(cg->cmux); i++) { 1003 if (cg->info.cmux_to_group[i] < 0) 1004 break; 1005 if (cg->info.cmux_to_group[i] >= 1006 ARRAY_SIZE(cg->info.cmux_groups)) { 1007 WARN_ON_ONCE(1); 1008 continue; 1009 } 1010 1011 cg->cmux[i] = create_one_cmux(cg, i); 1012 } 1013 1014 for (i = 0; i < ARRAY_SIZE(cg->hwaccel); i++) { 1015 if (!cg->info.hwaccel[i]) 1016 continue; 1017 1018 cg->hwaccel[i] = create_one_hwaccel(cg, i); 1019 } 1020 } 1021 1022 static void __init clockgen_init(struct device_node *np); 1023 1024 /* 1025 * Legacy nodes may get probed before the parent clockgen node. 1026 * It is assumed that device trees with legacy nodes will not 1027 * contain a "clocks" property -- otherwise the input clocks may 1028 * not be initialized at this point. 1029 */ 1030 static void __init legacy_init_clockgen(struct device_node *np) 1031 { 1032 if (!clockgen.node) 1033 clockgen_init(of_get_parent(np)); 1034 } 1035 1036 /* Legacy node */ 1037 static void __init core_mux_init(struct device_node *np) 1038 { 1039 struct clk *clk; 1040 struct resource res; 1041 int idx, rc; 1042 1043 legacy_init_clockgen(np); 1044 1045 if (of_address_to_resource(np, 0, &res)) 1046 return; 1047 1048 idx = (res.start & 0xf0) >> 5; 1049 clk = clockgen.cmux[idx]; 1050 1051 rc = of_clk_add_provider(np, of_clk_src_simple_get, clk); 1052 if (rc) { 1053 pr_err("%s: Couldn't register clk provider for node %pOFn: %d\n", 1054 __func__, np, rc); 1055 return; 1056 } 1057 } 1058 1059 static struct clk __init 1060 *sysclk_from_fixed(struct device_node *node, const char *name) 1061 { 1062 u32 rate; 1063 1064 if (of_property_read_u32(node, "clock-frequency", &rate)) 1065 return ERR_PTR(-ENODEV); 1066 1067 return clk_register_fixed_rate(NULL, name, NULL, 0, rate); 1068 } 1069 1070 static struct clk __init *input_clock(const char *name, struct clk *clk) 1071 { 1072 const char *input_name; 1073 1074 /* Register the input clock under the desired name. */ 1075 input_name = __clk_get_name(clk); 1076 clk = clk_register_fixed_factor(NULL, name, input_name, 1077 0, 1, 1); 1078 if (IS_ERR(clk)) 1079 pr_err("%s: Couldn't register %s: %ld\n", __func__, name, 1080 PTR_ERR(clk)); 1081 1082 return clk; 1083 } 1084 1085 static struct clk __init *input_clock_by_name(const char *name, 1086 const char *dtname) 1087 { 1088 struct clk *clk; 1089 1090 clk = of_clk_get_by_name(clockgen.node, dtname); 1091 if (IS_ERR(clk)) 1092 return clk; 1093 1094 return input_clock(name, clk); 1095 } 1096 1097 static struct clk __init *input_clock_by_index(const char *name, int idx) 1098 { 1099 struct clk *clk; 1100 1101 clk = of_clk_get(clockgen.node, 0); 1102 if (IS_ERR(clk)) 1103 return clk; 1104 1105 return input_clock(name, clk); 1106 } 1107 1108 static struct clk * __init create_sysclk(const char *name) 1109 { 1110 struct device_node *sysclk; 1111 struct clk *clk; 1112 1113 clk = sysclk_from_fixed(clockgen.node, name); 1114 if (!IS_ERR(clk)) 1115 return clk; 1116 1117 clk = input_clock_by_name(name, "sysclk"); 1118 if (!IS_ERR(clk)) 1119 return clk; 1120 1121 clk = input_clock_by_index(name, 0); 1122 if (!IS_ERR(clk)) 1123 return clk; 1124 1125 sysclk = of_get_child_by_name(clockgen.node, "sysclk"); 1126 if (sysclk) { 1127 clk = sysclk_from_fixed(sysclk, name); 1128 if (!IS_ERR(clk)) 1129 return clk; 1130 } 1131 1132 pr_err("%s: No input sysclk\n", __func__); 1133 return NULL; 1134 } 1135 1136 static struct clk * __init create_coreclk(const char *name) 1137 { 1138 struct clk *clk; 1139 1140 clk = input_clock_by_name(name, "coreclk"); 1141 if (!IS_ERR(clk)) 1142 return clk; 1143 1144 /* 1145 * This indicates a mix of legacy nodes with the new coreclk 1146 * mechanism, which should never happen. If this error occurs, 1147 * don't use the wrong input clock just because coreclk isn't 1148 * ready yet. 1149 */ 1150 if (WARN_ON(PTR_ERR(clk) == -EPROBE_DEFER)) 1151 return clk; 1152 1153 return NULL; 1154 } 1155 1156 /* Legacy node */ 1157 static void __init sysclk_init(struct device_node *node) 1158 { 1159 struct clk *clk; 1160 1161 legacy_init_clockgen(node); 1162 1163 clk = clockgen.sysclk; 1164 if (clk) 1165 of_clk_add_provider(node, of_clk_src_simple_get, clk); 1166 } 1167 1168 #define PLL_KILL BIT(31) 1169 1170 static void __init create_one_pll(struct clockgen *cg, int idx) 1171 { 1172 u32 __iomem *reg; 1173 u32 mult; 1174 struct clockgen_pll *pll = &cg->pll[idx]; 1175 const char *input = "cg-sysclk"; 1176 int i; 1177 1178 if (!(cg->info.pll_mask & (1 << idx))) 1179 return; 1180 1181 if (cg->coreclk && idx != PLATFORM_PLL) { 1182 if (IS_ERR(cg->coreclk)) 1183 return; 1184 1185 input = "cg-coreclk"; 1186 } 1187 1188 if (cg->info.flags & CG_VER3) { 1189 switch (idx) { 1190 case PLATFORM_PLL: 1191 reg = cg->regs + 0x60080; 1192 break; 1193 case CGA_PLL1: 1194 reg = cg->regs + 0x80; 1195 break; 1196 case CGA_PLL2: 1197 reg = cg->regs + 0xa0; 1198 break; 1199 case CGB_PLL1: 1200 reg = cg->regs + 0x10080; 1201 break; 1202 case CGB_PLL2: 1203 reg = cg->regs + 0x100a0; 1204 break; 1205 default: 1206 WARN_ONCE(1, "index %d\n", idx); 1207 return; 1208 } 1209 } else { 1210 if (idx == PLATFORM_PLL) 1211 reg = cg->regs + 0xc00; 1212 else 1213 reg = cg->regs + 0x800 + 0x20 * (idx - 1); 1214 } 1215 1216 /* Get the multiple of PLL */ 1217 mult = cg_in(cg, reg); 1218 1219 /* Check if this PLL is disabled */ 1220 if (mult & PLL_KILL) { 1221 pr_debug("%s(): pll %p disabled\n", __func__, reg); 1222 return; 1223 } 1224 1225 if ((cg->info.flags & CG_VER3) || 1226 ((cg->info.flags & CG_PLL_8BIT) && idx != PLATFORM_PLL)) 1227 mult = (mult & GENMASK(8, 1)) >> 1; 1228 else 1229 mult = (mult & GENMASK(6, 1)) >> 1; 1230 1231 for (i = 0; i < ARRAY_SIZE(pll->div); i++) { 1232 struct clk *clk; 1233 int ret; 1234 1235 /* 1236 * For platform PLL, there are MAX_PLL_DIV divider clocks. 1237 * For core PLL, there are 4 divider clocks at most. 1238 */ 1239 if (idx != PLATFORM_PLL && i >= 4) 1240 break; 1241 1242 snprintf(pll->div[i].name, sizeof(pll->div[i].name), 1243 "cg-pll%d-div%d", idx, i + 1); 1244 1245 clk = clk_register_fixed_factor(NULL, 1246 pll->div[i].name, input, 0, mult, i + 1); 1247 if (IS_ERR(clk)) { 1248 pr_err("%s: %s: register failed %ld\n", 1249 __func__, pll->div[i].name, PTR_ERR(clk)); 1250 continue; 1251 } 1252 1253 pll->div[i].clk = clk; 1254 ret = clk_register_clkdev(clk, pll->div[i].name, NULL); 1255 if (ret != 0) 1256 pr_err("%s: %s: register to lookup table failed %d\n", 1257 __func__, pll->div[i].name, ret); 1258 1259 } 1260 } 1261 1262 static void __init create_plls(struct clockgen *cg) 1263 { 1264 int i; 1265 1266 for (i = 0; i < ARRAY_SIZE(cg->pll); i++) 1267 create_one_pll(cg, i); 1268 } 1269 1270 static void __init legacy_pll_init(struct device_node *np, int idx) 1271 { 1272 struct clockgen_pll *pll; 1273 struct clk_onecell_data *onecell_data; 1274 struct clk **subclks; 1275 int count, rc; 1276 1277 legacy_init_clockgen(np); 1278 1279 pll = &clockgen.pll[idx]; 1280 count = of_property_count_strings(np, "clock-output-names"); 1281 1282 BUILD_BUG_ON(ARRAY_SIZE(pll->div) < 4); 1283 subclks = kcalloc(4, sizeof(struct clk *), GFP_KERNEL); 1284 if (!subclks) 1285 return; 1286 1287 onecell_data = kmalloc(sizeof(*onecell_data), GFP_KERNEL); 1288 if (!onecell_data) 1289 goto err_clks; 1290 1291 if (count <= 3) { 1292 subclks[0] = pll->div[0].clk; 1293 subclks[1] = pll->div[1].clk; 1294 subclks[2] = pll->div[3].clk; 1295 } else { 1296 subclks[0] = pll->div[0].clk; 1297 subclks[1] = pll->div[1].clk; 1298 subclks[2] = pll->div[2].clk; 1299 subclks[3] = pll->div[3].clk; 1300 } 1301 1302 onecell_data->clks = subclks; 1303 onecell_data->clk_num = count; 1304 1305 rc = of_clk_add_provider(np, of_clk_src_onecell_get, onecell_data); 1306 if (rc) { 1307 pr_err("%s: Couldn't register clk provider for node %pOFn: %d\n", 1308 __func__, np, rc); 1309 goto err_cell; 1310 } 1311 1312 return; 1313 err_cell: 1314 kfree(onecell_data); 1315 err_clks: 1316 kfree(subclks); 1317 } 1318 1319 /* Legacy node */ 1320 static void __init pltfrm_pll_init(struct device_node *np) 1321 { 1322 legacy_pll_init(np, PLATFORM_PLL); 1323 } 1324 1325 /* Legacy node */ 1326 static void __init core_pll_init(struct device_node *np) 1327 { 1328 struct resource res; 1329 int idx; 1330 1331 if (of_address_to_resource(np, 0, &res)) 1332 return; 1333 1334 if ((res.start & 0xfff) == 0xc00) { 1335 /* 1336 * ls1021a devtree labels the platform PLL 1337 * with the core PLL compatible 1338 */ 1339 pltfrm_pll_init(np); 1340 } else { 1341 idx = (res.start & 0xf0) >> 5; 1342 legacy_pll_init(np, CGA_PLL1 + idx); 1343 } 1344 } 1345 1346 static struct clk *clockgen_clk_get(struct of_phandle_args *clkspec, void *data) 1347 { 1348 struct clockgen *cg = data; 1349 struct clk *clk; 1350 struct clockgen_pll *pll; 1351 u32 type, idx; 1352 1353 if (clkspec->args_count < 2) { 1354 pr_err("%s: insufficient phandle args\n", __func__); 1355 return ERR_PTR(-EINVAL); 1356 } 1357 1358 type = clkspec->args[0]; 1359 idx = clkspec->args[1]; 1360 1361 switch (type) { 1362 case 0: 1363 if (idx != 0) 1364 goto bad_args; 1365 clk = cg->sysclk; 1366 break; 1367 case 1: 1368 if (idx >= ARRAY_SIZE(cg->cmux)) 1369 goto bad_args; 1370 clk = cg->cmux[idx]; 1371 break; 1372 case 2: 1373 if (idx >= ARRAY_SIZE(cg->hwaccel)) 1374 goto bad_args; 1375 clk = cg->hwaccel[idx]; 1376 break; 1377 case 3: 1378 if (idx >= ARRAY_SIZE(cg->fman)) 1379 goto bad_args; 1380 clk = cg->fman[idx]; 1381 break; 1382 case 4: 1383 pll = &cg->pll[PLATFORM_PLL]; 1384 if (idx >= ARRAY_SIZE(pll->div)) 1385 goto bad_args; 1386 clk = pll->div[idx].clk; 1387 break; 1388 case 5: 1389 if (idx != 0) 1390 goto bad_args; 1391 clk = cg->coreclk; 1392 if (IS_ERR(clk)) 1393 clk = NULL; 1394 break; 1395 default: 1396 goto bad_args; 1397 } 1398 1399 if (!clk) 1400 return ERR_PTR(-ENOENT); 1401 return clk; 1402 1403 bad_args: 1404 pr_err("%s: Bad phandle args %u %u\n", __func__, type, idx); 1405 return ERR_PTR(-EINVAL); 1406 } 1407 1408 #ifdef CONFIG_PPC 1409 #include <asm/mpc85xx.h> 1410 1411 static const u32 a4510_svrs[] __initconst = { 1412 (SVR_P2040 << 8) | 0x10, /* P2040 1.0 */ 1413 (SVR_P2040 << 8) | 0x11, /* P2040 1.1 */ 1414 (SVR_P2041 << 8) | 0x10, /* P2041 1.0 */ 1415 (SVR_P2041 << 8) | 0x11, /* P2041 1.1 */ 1416 (SVR_P3041 << 8) | 0x10, /* P3041 1.0 */ 1417 (SVR_P3041 << 8) | 0x11, /* P3041 1.1 */ 1418 (SVR_P4040 << 8) | 0x20, /* P4040 2.0 */ 1419 (SVR_P4080 << 8) | 0x20, /* P4080 2.0 */ 1420 (SVR_P5010 << 8) | 0x10, /* P5010 1.0 */ 1421 (SVR_P5010 << 8) | 0x20, /* P5010 2.0 */ 1422 (SVR_P5020 << 8) | 0x10, /* P5020 1.0 */ 1423 (SVR_P5021 << 8) | 0x10, /* P5021 1.0 */ 1424 (SVR_P5040 << 8) | 0x10, /* P5040 1.0 */ 1425 }; 1426 1427 #define SVR_SECURITY 0x80000 /* The Security (E) bit */ 1428 1429 static bool __init has_erratum_a4510(void) 1430 { 1431 u32 svr = mfspr(SPRN_SVR); 1432 int i; 1433 1434 svr &= ~SVR_SECURITY; 1435 1436 for (i = 0; i < ARRAY_SIZE(a4510_svrs); i++) { 1437 if (svr == a4510_svrs[i]) 1438 return true; 1439 } 1440 1441 return false; 1442 } 1443 #else 1444 static bool __init has_erratum_a4510(void) 1445 { 1446 return false; 1447 } 1448 #endif 1449 1450 static void __init clockgen_init(struct device_node *np) 1451 { 1452 int i, ret; 1453 bool is_old_ls1021a = false; 1454 1455 /* May have already been called by a legacy probe */ 1456 if (clockgen.node) 1457 return; 1458 1459 clockgen.node = np; 1460 clockgen.regs = of_iomap(np, 0); 1461 if (!clockgen.regs && 1462 of_device_is_compatible(of_root, "fsl,ls1021a")) { 1463 /* Compatibility hack for old, broken device trees */ 1464 clockgen.regs = ioremap(0x1ee1000, 0x1000); 1465 is_old_ls1021a = true; 1466 } 1467 if (!clockgen.regs) { 1468 pr_err("%s(): %pOFn: of_iomap() failed\n", __func__, np); 1469 return; 1470 } 1471 1472 for (i = 0; i < ARRAY_SIZE(chipinfo); i++) { 1473 if (of_device_is_compatible(np, chipinfo[i].compat)) 1474 break; 1475 if (is_old_ls1021a && 1476 !strcmp(chipinfo[i].compat, "fsl,ls1021a-clockgen")) 1477 break; 1478 } 1479 1480 if (i == ARRAY_SIZE(chipinfo)) { 1481 pr_err("%s: unknown clockgen node %pOF\n", __func__, np); 1482 goto err; 1483 } 1484 clockgen.info = chipinfo[i]; 1485 1486 if (clockgen.info.guts_compat) { 1487 struct device_node *guts; 1488 1489 guts = of_find_compatible_node(NULL, NULL, 1490 clockgen.info.guts_compat); 1491 if (guts) { 1492 clockgen.guts = of_iomap(guts, 0); 1493 if (!clockgen.guts) { 1494 pr_err("%s: Couldn't map %pOF regs\n", __func__, 1495 guts); 1496 } 1497 of_node_put(guts); 1498 } 1499 1500 } 1501 1502 if (has_erratum_a4510()) 1503 clockgen.info.flags |= CG_CMUX_GE_PLAT; 1504 1505 clockgen.sysclk = create_sysclk("cg-sysclk"); 1506 clockgen.coreclk = create_coreclk("cg-coreclk"); 1507 create_plls(&clockgen); 1508 create_muxes(&clockgen); 1509 1510 if (clockgen.info.init_periph) 1511 clockgen.info.init_periph(&clockgen); 1512 1513 ret = of_clk_add_provider(np, clockgen_clk_get, &clockgen); 1514 if (ret) { 1515 pr_err("%s: Couldn't register clk provider for node %pOFn: %d\n", 1516 __func__, np, ret); 1517 } 1518 1519 return; 1520 err: 1521 iounmap(clockgen.regs); 1522 clockgen.regs = NULL; 1523 } 1524 1525 CLK_OF_DECLARE(qoriq_clockgen_1, "fsl,qoriq-clockgen-1.0", clockgen_init); 1526 CLK_OF_DECLARE(qoriq_clockgen_2, "fsl,qoriq-clockgen-2.0", clockgen_init); 1527 CLK_OF_DECLARE(qoriq_clockgen_b4420, "fsl,b4420-clockgen", clockgen_init); 1528 CLK_OF_DECLARE(qoriq_clockgen_b4860, "fsl,b4860-clockgen", clockgen_init); 1529 CLK_OF_DECLARE(qoriq_clockgen_ls1012a, "fsl,ls1012a-clockgen", clockgen_init); 1530 CLK_OF_DECLARE(qoriq_clockgen_ls1021a, "fsl,ls1021a-clockgen", clockgen_init); 1531 CLK_OF_DECLARE(qoriq_clockgen_ls1028a, "fsl,ls1028a-clockgen", clockgen_init); 1532 CLK_OF_DECLARE(qoriq_clockgen_ls1043a, "fsl,ls1043a-clockgen", clockgen_init); 1533 CLK_OF_DECLARE(qoriq_clockgen_ls1046a, "fsl,ls1046a-clockgen", clockgen_init); 1534 CLK_OF_DECLARE(qoriq_clockgen_ls1088a, "fsl,ls1088a-clockgen", clockgen_init); 1535 CLK_OF_DECLARE(qoriq_clockgen_ls2080a, "fsl,ls2080a-clockgen", clockgen_init); 1536 CLK_OF_DECLARE(qoriq_clockgen_lx2160a, "fsl,lx2160a-clockgen", clockgen_init); 1537 CLK_OF_DECLARE(qoriq_clockgen_p2041, "fsl,p2041-clockgen", clockgen_init); 1538 CLK_OF_DECLARE(qoriq_clockgen_p3041, "fsl,p3041-clockgen", clockgen_init); 1539 CLK_OF_DECLARE(qoriq_clockgen_p4080, "fsl,p4080-clockgen", clockgen_init); 1540 CLK_OF_DECLARE(qoriq_clockgen_p5020, "fsl,p5020-clockgen", clockgen_init); 1541 CLK_OF_DECLARE(qoriq_clockgen_p5040, "fsl,p5040-clockgen", clockgen_init); 1542 CLK_OF_DECLARE(qoriq_clockgen_t1023, "fsl,t1023-clockgen", clockgen_init); 1543 CLK_OF_DECLARE(qoriq_clockgen_t1040, "fsl,t1040-clockgen", clockgen_init); 1544 CLK_OF_DECLARE(qoriq_clockgen_t2080, "fsl,t2080-clockgen", clockgen_init); 1545 CLK_OF_DECLARE(qoriq_clockgen_t4240, "fsl,t4240-clockgen", clockgen_init); 1546 1547 /* Legacy nodes */ 1548 CLK_OF_DECLARE(qoriq_sysclk_1, "fsl,qoriq-sysclk-1.0", sysclk_init); 1549 CLK_OF_DECLARE(qoriq_sysclk_2, "fsl,qoriq-sysclk-2.0", sysclk_init); 1550 CLK_OF_DECLARE(qoriq_core_pll_1, "fsl,qoriq-core-pll-1.0", core_pll_init); 1551 CLK_OF_DECLARE(qoriq_core_pll_2, "fsl,qoriq-core-pll-2.0", core_pll_init); 1552 CLK_OF_DECLARE(qoriq_core_mux_1, "fsl,qoriq-core-mux-1.0", core_mux_init); 1553 CLK_OF_DECLARE(qoriq_core_mux_2, "fsl,qoriq-core-mux-2.0", core_mux_init); 1554 CLK_OF_DECLARE(qoriq_pltfrm_pll_1, "fsl,qoriq-platform-pll-1.0", pltfrm_pll_init); 1555 CLK_OF_DECLARE(qoriq_pltfrm_pll_2, "fsl,qoriq-platform-pll-2.0", pltfrm_pll_init); 1556