1 /* 2 * Copyright 2013 Freescale Semiconductor, Inc. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 * 8 * clock driver for Freescale QorIQ SoCs. 9 */ 10 11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 12 13 #include <linux/clk.h> 14 #include <linux/clk-provider.h> 15 #include <linux/fsl/guts.h> 16 #include <linux/io.h> 17 #include <linux/kernel.h> 18 #include <linux/module.h> 19 #include <linux/of_address.h> 20 #include <linux/of_platform.h> 21 #include <linux/of.h> 22 #include <linux/slab.h> 23 24 #define PLL_DIV1 0 25 #define PLL_DIV2 1 26 #define PLL_DIV3 2 27 #define PLL_DIV4 3 28 29 #define PLATFORM_PLL 0 30 #define CGA_PLL1 1 31 #define CGA_PLL2 2 32 #define CGA_PLL3 3 33 #define CGA_PLL4 4 /* only on clockgen-1.0, which lacks CGB */ 34 #define CGB_PLL1 4 35 #define CGB_PLL2 5 36 37 struct clockgen_pll_div { 38 struct clk *clk; 39 char name[32]; 40 }; 41 42 struct clockgen_pll { 43 struct clockgen_pll_div div[4]; 44 }; 45 46 #define CLKSEL_VALID 1 47 #define CLKSEL_80PCT 2 /* Only allowed if PLL <= 80% of max cpu freq */ 48 49 struct clockgen_sourceinfo { 50 u32 flags; /* CLKSEL_xxx */ 51 int pll; /* CGx_PLLn */ 52 int div; /* PLL_DIVn */ 53 }; 54 55 #define NUM_MUX_PARENTS 16 56 57 struct clockgen_muxinfo { 58 struct clockgen_sourceinfo clksel[NUM_MUX_PARENTS]; 59 }; 60 61 #define NUM_HWACCEL 5 62 #define NUM_CMUX 8 63 64 struct clockgen; 65 66 /* 67 * cmux freq must be >= platform pll. 68 * If not set, cmux freq must be >= platform pll/2 69 */ 70 #define CG_CMUX_GE_PLAT 1 71 72 #define CG_PLL_8BIT 2 /* PLLCnGSR[CFG] is 8 bits, not 6 */ 73 #define CG_VER3 4 /* version 3 cg: reg layout different */ 74 #define CG_LITTLE_ENDIAN 8 75 76 struct clockgen_chipinfo { 77 const char *compat, *guts_compat; 78 const struct clockgen_muxinfo *cmux_groups[2]; 79 const struct clockgen_muxinfo *hwaccel[NUM_HWACCEL]; 80 void (*init_periph)(struct clockgen *cg); 81 int cmux_to_group[NUM_CMUX]; /* -1 terminates if fewer than NUM_CMUX */ 82 u32 pll_mask; /* 1 << n bit set if PLL n is valid */ 83 u32 flags; /* CG_xxx */ 84 }; 85 86 struct clockgen { 87 struct device_node *node; 88 void __iomem *regs; 89 struct clockgen_chipinfo info; /* mutable copy */ 90 struct clk *sysclk; 91 struct clockgen_pll pll[6]; 92 struct clk *cmux[NUM_CMUX]; 93 struct clk *hwaccel[NUM_HWACCEL]; 94 struct clk *fman[2]; 95 struct ccsr_guts __iomem *guts; 96 }; 97 98 static struct clockgen clockgen; 99 100 static void cg_out(struct clockgen *cg, u32 val, u32 __iomem *reg) 101 { 102 if (cg->info.flags & CG_LITTLE_ENDIAN) 103 iowrite32(val, reg); 104 else 105 iowrite32be(val, reg); 106 } 107 108 static u32 cg_in(struct clockgen *cg, u32 __iomem *reg) 109 { 110 u32 val; 111 112 if (cg->info.flags & CG_LITTLE_ENDIAN) 113 val = ioread32(reg); 114 else 115 val = ioread32be(reg); 116 117 return val; 118 } 119 120 static const struct clockgen_muxinfo p2041_cmux_grp1 = { 121 { 122 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, 123 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, 124 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, 125 } 126 }; 127 128 static const struct clockgen_muxinfo p2041_cmux_grp2 = { 129 { 130 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, 131 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, 132 [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, 133 } 134 }; 135 136 static const struct clockgen_muxinfo p5020_cmux_grp1 = { 137 { 138 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, 139 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, 140 [4] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL2, PLL_DIV1 }, 141 } 142 }; 143 144 static const struct clockgen_muxinfo p5020_cmux_grp2 = { 145 { 146 [0] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV1 }, 147 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, 148 [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, 149 } 150 }; 151 152 static const struct clockgen_muxinfo p5040_cmux_grp1 = { 153 { 154 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, 155 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, 156 [4] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL2, PLL_DIV1 }, 157 [5] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL2, PLL_DIV2 }, 158 } 159 }; 160 161 static const struct clockgen_muxinfo p5040_cmux_grp2 = { 162 { 163 [0] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV1 }, 164 [1] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV2 }, 165 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, 166 [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, 167 } 168 }; 169 170 static const struct clockgen_muxinfo p4080_cmux_grp1 = { 171 { 172 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, 173 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, 174 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, 175 [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, 176 [8] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL3, PLL_DIV1 }, 177 } 178 }; 179 180 static const struct clockgen_muxinfo p4080_cmux_grp2 = { 181 { 182 [0] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV1 }, 183 [8] = { CLKSEL_VALID, CGA_PLL3, PLL_DIV1 }, 184 [9] = { CLKSEL_VALID, CGA_PLL3, PLL_DIV2 }, 185 [12] = { CLKSEL_VALID, CGA_PLL4, PLL_DIV1 }, 186 [13] = { CLKSEL_VALID, CGA_PLL4, PLL_DIV2 }, 187 } 188 }; 189 190 static const struct clockgen_muxinfo t1023_cmux = { 191 { 192 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, 193 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, 194 } 195 }; 196 197 static const struct clockgen_muxinfo t1040_cmux = { 198 { 199 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, 200 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, 201 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, 202 [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, 203 } 204 }; 205 206 207 static const struct clockgen_muxinfo clockgen2_cmux_cga = { 208 { 209 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, 210 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, 211 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 }, 212 {}, 213 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, 214 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, 215 { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 }, 216 {}, 217 { CLKSEL_VALID, CGA_PLL3, PLL_DIV1 }, 218 { CLKSEL_VALID, CGA_PLL3, PLL_DIV2 }, 219 { CLKSEL_VALID, CGA_PLL3, PLL_DIV4 }, 220 }, 221 }; 222 223 static const struct clockgen_muxinfo clockgen2_cmux_cga12 = { 224 { 225 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, 226 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, 227 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 }, 228 {}, 229 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, 230 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, 231 { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 }, 232 }, 233 }; 234 235 static const struct clockgen_muxinfo clockgen2_cmux_cgb = { 236 { 237 { CLKSEL_VALID, CGB_PLL1, PLL_DIV1 }, 238 { CLKSEL_VALID, CGB_PLL1, PLL_DIV2 }, 239 { CLKSEL_VALID, CGB_PLL1, PLL_DIV4 }, 240 {}, 241 { CLKSEL_VALID, CGB_PLL2, PLL_DIV1 }, 242 { CLKSEL_VALID, CGB_PLL2, PLL_DIV2 }, 243 { CLKSEL_VALID, CGB_PLL2, PLL_DIV4 }, 244 }, 245 }; 246 247 static const struct clockgen_muxinfo ls1043a_hwa1 = { 248 { 249 {}, 250 {}, 251 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, 252 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 }, 253 {}, 254 {}, 255 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, 256 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 }, 257 }, 258 }; 259 260 static const struct clockgen_muxinfo ls1043a_hwa2 = { 261 { 262 {}, 263 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, 264 {}, 265 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 }, 266 }, 267 }; 268 269 static const struct clockgen_muxinfo t1023_hwa1 = { 270 { 271 {}, 272 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, 273 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, 274 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 }, 275 }, 276 }; 277 278 static const struct clockgen_muxinfo t1023_hwa2 = { 279 { 280 [6] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, 281 }, 282 }; 283 284 static const struct clockgen_muxinfo t2080_hwa1 = { 285 { 286 {}, 287 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, 288 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, 289 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 }, 290 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 }, 291 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 }, 292 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, 293 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 }, 294 }, 295 }; 296 297 static const struct clockgen_muxinfo t2080_hwa2 = { 298 { 299 {}, 300 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, 301 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, 302 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 }, 303 { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 }, 304 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 }, 305 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, 306 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 }, 307 }, 308 }; 309 310 static const struct clockgen_muxinfo t4240_hwa1 = { 311 { 312 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV2 }, 313 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, 314 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, 315 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 }, 316 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 }, 317 {}, 318 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, 319 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 }, 320 }, 321 }; 322 323 static const struct clockgen_muxinfo t4240_hwa4 = { 324 { 325 [2] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV2 }, 326 [3] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV3 }, 327 [4] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV4 }, 328 [5] = { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 }, 329 [6] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV2 }, 330 }, 331 }; 332 333 static const struct clockgen_muxinfo t4240_hwa5 = { 334 { 335 [2] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV2 }, 336 [3] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV3 }, 337 [4] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV4 }, 338 [5] = { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 }, 339 [6] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV2 }, 340 [7] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV3 }, 341 }, 342 }; 343 344 #define RCWSR7_FM1_CLK_SEL 0x40000000 345 #define RCWSR7_FM2_CLK_SEL 0x20000000 346 #define RCWSR7_HWA_ASYNC_DIV 0x04000000 347 348 static void __init p2041_init_periph(struct clockgen *cg) 349 { 350 u32 reg; 351 352 reg = ioread32be(&cg->guts->rcwsr[7]); 353 354 if (reg & RCWSR7_FM1_CLK_SEL) 355 cg->fman[0] = cg->pll[CGA_PLL2].div[PLL_DIV2].clk; 356 else 357 cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk; 358 } 359 360 static void __init p4080_init_periph(struct clockgen *cg) 361 { 362 u32 reg; 363 364 reg = ioread32be(&cg->guts->rcwsr[7]); 365 366 if (reg & RCWSR7_FM1_CLK_SEL) 367 cg->fman[0] = cg->pll[CGA_PLL3].div[PLL_DIV2].clk; 368 else 369 cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk; 370 371 if (reg & RCWSR7_FM2_CLK_SEL) 372 cg->fman[1] = cg->pll[CGA_PLL3].div[PLL_DIV2].clk; 373 else 374 cg->fman[1] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk; 375 } 376 377 static void __init p5020_init_periph(struct clockgen *cg) 378 { 379 u32 reg; 380 int div = PLL_DIV2; 381 382 reg = ioread32be(&cg->guts->rcwsr[7]); 383 if (reg & RCWSR7_HWA_ASYNC_DIV) 384 div = PLL_DIV4; 385 386 if (reg & RCWSR7_FM1_CLK_SEL) 387 cg->fman[0] = cg->pll[CGA_PLL2].div[div].clk; 388 else 389 cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk; 390 } 391 392 static void __init p5040_init_periph(struct clockgen *cg) 393 { 394 u32 reg; 395 int div = PLL_DIV2; 396 397 reg = ioread32be(&cg->guts->rcwsr[7]); 398 if (reg & RCWSR7_HWA_ASYNC_DIV) 399 div = PLL_DIV4; 400 401 if (reg & RCWSR7_FM1_CLK_SEL) 402 cg->fman[0] = cg->pll[CGA_PLL3].div[div].clk; 403 else 404 cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk; 405 406 if (reg & RCWSR7_FM2_CLK_SEL) 407 cg->fman[1] = cg->pll[CGA_PLL3].div[div].clk; 408 else 409 cg->fman[1] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk; 410 } 411 412 static void __init t1023_init_periph(struct clockgen *cg) 413 { 414 cg->fman[0] = cg->hwaccel[1]; 415 } 416 417 static void __init t1040_init_periph(struct clockgen *cg) 418 { 419 cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV1].clk; 420 } 421 422 static void __init t2080_init_periph(struct clockgen *cg) 423 { 424 cg->fman[0] = cg->hwaccel[0]; 425 } 426 427 static void __init t4240_init_periph(struct clockgen *cg) 428 { 429 cg->fman[0] = cg->hwaccel[3]; 430 cg->fman[1] = cg->hwaccel[4]; 431 } 432 433 static const struct clockgen_chipinfo chipinfo[] = { 434 { 435 .compat = "fsl,b4420-clockgen", 436 .guts_compat = "fsl,b4860-device-config", 437 .init_periph = t2080_init_periph, 438 .cmux_groups = { 439 &clockgen2_cmux_cga12, &clockgen2_cmux_cgb 440 }, 441 .hwaccel = { 442 &t2080_hwa1 443 }, 444 .cmux_to_group = { 445 0, 1, 1, 1, -1 446 }, 447 .pll_mask = 0x3f, 448 .flags = CG_PLL_8BIT, 449 }, 450 { 451 .compat = "fsl,b4860-clockgen", 452 .guts_compat = "fsl,b4860-device-config", 453 .init_periph = t2080_init_periph, 454 .cmux_groups = { 455 &clockgen2_cmux_cga12, &clockgen2_cmux_cgb 456 }, 457 .hwaccel = { 458 &t2080_hwa1 459 }, 460 .cmux_to_group = { 461 0, 1, 1, 1, -1 462 }, 463 .pll_mask = 0x3f, 464 .flags = CG_PLL_8BIT, 465 }, 466 { 467 .compat = "fsl,ls1021a-clockgen", 468 .cmux_groups = { 469 &t1023_cmux 470 }, 471 .cmux_to_group = { 472 0, -1 473 }, 474 .pll_mask = 0x03, 475 }, 476 { 477 .compat = "fsl,ls1043a-clockgen", 478 .init_periph = t2080_init_periph, 479 .cmux_groups = { 480 &t1040_cmux 481 }, 482 .hwaccel = { 483 &ls1043a_hwa1, &ls1043a_hwa2 484 }, 485 .cmux_to_group = { 486 0, -1 487 }, 488 .pll_mask = 0x07, 489 .flags = CG_PLL_8BIT, 490 }, 491 { 492 .compat = "fsl,ls2080a-clockgen", 493 .cmux_groups = { 494 &clockgen2_cmux_cga12, &clockgen2_cmux_cgb 495 }, 496 .cmux_to_group = { 497 0, 0, 1, 1, -1 498 }, 499 .pll_mask = 0x37, 500 .flags = CG_VER3 | CG_LITTLE_ENDIAN, 501 }, 502 { 503 .compat = "fsl,p2041-clockgen", 504 .guts_compat = "fsl,qoriq-device-config-1.0", 505 .init_periph = p2041_init_periph, 506 .cmux_groups = { 507 &p2041_cmux_grp1, &p2041_cmux_grp2 508 }, 509 .cmux_to_group = { 510 0, 0, 1, 1, -1 511 }, 512 .pll_mask = 0x07, 513 }, 514 { 515 .compat = "fsl,p3041-clockgen", 516 .guts_compat = "fsl,qoriq-device-config-1.0", 517 .init_periph = p2041_init_periph, 518 .cmux_groups = { 519 &p2041_cmux_grp1, &p2041_cmux_grp2 520 }, 521 .cmux_to_group = { 522 0, 0, 1, 1, -1 523 }, 524 .pll_mask = 0x07, 525 }, 526 { 527 .compat = "fsl,p4080-clockgen", 528 .guts_compat = "fsl,qoriq-device-config-1.0", 529 .init_periph = p4080_init_periph, 530 .cmux_groups = { 531 &p4080_cmux_grp1, &p4080_cmux_grp2 532 }, 533 .cmux_to_group = { 534 0, 0, 0, 0, 1, 1, 1, 1 535 }, 536 .pll_mask = 0x1f, 537 }, 538 { 539 .compat = "fsl,p5020-clockgen", 540 .guts_compat = "fsl,qoriq-device-config-1.0", 541 .init_periph = p5020_init_periph, 542 .cmux_groups = { 543 &p2041_cmux_grp1, &p2041_cmux_grp2 544 }, 545 .cmux_to_group = { 546 0, 1, -1 547 }, 548 .pll_mask = 0x07, 549 }, 550 { 551 .compat = "fsl,p5040-clockgen", 552 .guts_compat = "fsl,p5040-device-config", 553 .init_periph = p5040_init_periph, 554 .cmux_groups = { 555 &p5040_cmux_grp1, &p5040_cmux_grp2 556 }, 557 .cmux_to_group = { 558 0, 0, 1, 1, -1 559 }, 560 .pll_mask = 0x0f, 561 }, 562 { 563 .compat = "fsl,t1023-clockgen", 564 .guts_compat = "fsl,t1023-device-config", 565 .init_periph = t1023_init_periph, 566 .cmux_groups = { 567 &t1023_cmux 568 }, 569 .hwaccel = { 570 &t1023_hwa1, &t1023_hwa2 571 }, 572 .cmux_to_group = { 573 0, 0, -1 574 }, 575 .pll_mask = 0x03, 576 .flags = CG_PLL_8BIT, 577 }, 578 { 579 .compat = "fsl,t1040-clockgen", 580 .guts_compat = "fsl,t1040-device-config", 581 .init_periph = t1040_init_periph, 582 .cmux_groups = { 583 &t1040_cmux 584 }, 585 .cmux_to_group = { 586 0, 0, 0, 0, -1 587 }, 588 .pll_mask = 0x07, 589 .flags = CG_PLL_8BIT, 590 }, 591 { 592 .compat = "fsl,t2080-clockgen", 593 .guts_compat = "fsl,t2080-device-config", 594 .init_periph = t2080_init_periph, 595 .cmux_groups = { 596 &clockgen2_cmux_cga12 597 }, 598 .hwaccel = { 599 &t2080_hwa1, &t2080_hwa2 600 }, 601 .cmux_to_group = { 602 0, -1 603 }, 604 .pll_mask = 0x07, 605 .flags = CG_PLL_8BIT, 606 }, 607 { 608 .compat = "fsl,t4240-clockgen", 609 .guts_compat = "fsl,t4240-device-config", 610 .init_periph = t4240_init_periph, 611 .cmux_groups = { 612 &clockgen2_cmux_cga, &clockgen2_cmux_cgb 613 }, 614 .hwaccel = { 615 &t4240_hwa1, NULL, NULL, &t4240_hwa4, &t4240_hwa5 616 }, 617 .cmux_to_group = { 618 0, 0, 1, -1 619 }, 620 .pll_mask = 0x3f, 621 .flags = CG_PLL_8BIT, 622 }, 623 {}, 624 }; 625 626 struct mux_hwclock { 627 struct clk_hw hw; 628 struct clockgen *cg; 629 const struct clockgen_muxinfo *info; 630 u32 __iomem *reg; 631 u8 parent_to_clksel[NUM_MUX_PARENTS]; 632 s8 clksel_to_parent[NUM_MUX_PARENTS]; 633 int num_parents; 634 }; 635 636 #define to_mux_hwclock(p) container_of(p, struct mux_hwclock, hw) 637 #define CLKSEL_MASK 0x78000000 638 #define CLKSEL_SHIFT 27 639 640 static int mux_set_parent(struct clk_hw *hw, u8 idx) 641 { 642 struct mux_hwclock *hwc = to_mux_hwclock(hw); 643 u32 clksel; 644 645 if (idx >= hwc->num_parents) 646 return -EINVAL; 647 648 clksel = hwc->parent_to_clksel[idx]; 649 cg_out(hwc->cg, (clksel << CLKSEL_SHIFT) & CLKSEL_MASK, hwc->reg); 650 651 return 0; 652 } 653 654 static u8 mux_get_parent(struct clk_hw *hw) 655 { 656 struct mux_hwclock *hwc = to_mux_hwclock(hw); 657 u32 clksel; 658 s8 ret; 659 660 clksel = (cg_in(hwc->cg, hwc->reg) & CLKSEL_MASK) >> CLKSEL_SHIFT; 661 662 ret = hwc->clksel_to_parent[clksel]; 663 if (ret < 0) { 664 pr_err("%s: mux at %p has bad clksel\n", __func__, hwc->reg); 665 return 0; 666 } 667 668 return ret; 669 } 670 671 static const struct clk_ops cmux_ops = { 672 .get_parent = mux_get_parent, 673 .set_parent = mux_set_parent, 674 }; 675 676 /* 677 * Don't allow setting for now, as the clock options haven't been 678 * sanitized for additional restrictions. 679 */ 680 static const struct clk_ops hwaccel_ops = { 681 .get_parent = mux_get_parent, 682 }; 683 684 static const struct clockgen_pll_div *get_pll_div(struct clockgen *cg, 685 struct mux_hwclock *hwc, 686 int idx) 687 { 688 int pll, div; 689 690 if (!(hwc->info->clksel[idx].flags & CLKSEL_VALID)) 691 return NULL; 692 693 pll = hwc->info->clksel[idx].pll; 694 div = hwc->info->clksel[idx].div; 695 696 return &cg->pll[pll].div[div]; 697 } 698 699 static struct clk * __init create_mux_common(struct clockgen *cg, 700 struct mux_hwclock *hwc, 701 const struct clk_ops *ops, 702 unsigned long min_rate, 703 unsigned long pct80_rate, 704 const char *fmt, int idx) 705 { 706 struct clk_init_data init = {}; 707 struct clk *clk; 708 const struct clockgen_pll_div *div; 709 const char *parent_names[NUM_MUX_PARENTS]; 710 char name[32]; 711 int i, j; 712 713 snprintf(name, sizeof(name), fmt, idx); 714 715 for (i = 0, j = 0; i < NUM_MUX_PARENTS; i++) { 716 unsigned long rate; 717 718 hwc->clksel_to_parent[i] = -1; 719 720 div = get_pll_div(cg, hwc, i); 721 if (!div) 722 continue; 723 724 rate = clk_get_rate(div->clk); 725 726 if (hwc->info->clksel[i].flags & CLKSEL_80PCT && 727 rate > pct80_rate) 728 continue; 729 if (rate < min_rate) 730 continue; 731 732 parent_names[j] = div->name; 733 hwc->parent_to_clksel[j] = i; 734 hwc->clksel_to_parent[i] = j; 735 j++; 736 } 737 738 init.name = name; 739 init.ops = ops; 740 init.parent_names = parent_names; 741 init.num_parents = hwc->num_parents = j; 742 init.flags = 0; 743 hwc->hw.init = &init; 744 hwc->cg = cg; 745 746 clk = clk_register(NULL, &hwc->hw); 747 if (IS_ERR(clk)) { 748 pr_err("%s: Couldn't register %s: %ld\n", __func__, name, 749 PTR_ERR(clk)); 750 kfree(hwc); 751 return NULL; 752 } 753 754 return clk; 755 } 756 757 static struct clk * __init create_one_cmux(struct clockgen *cg, int idx) 758 { 759 struct mux_hwclock *hwc; 760 const struct clockgen_pll_div *div; 761 unsigned long plat_rate, min_rate; 762 u64 pct80_rate; 763 u32 clksel; 764 765 hwc = kzalloc(sizeof(*hwc), GFP_KERNEL); 766 if (!hwc) 767 return NULL; 768 769 hwc->reg = cg->regs + 0x20 * idx; 770 hwc->info = cg->info.cmux_groups[cg->info.cmux_to_group[idx]]; 771 772 /* 773 * Find the rate for the default clksel, and treat it as the 774 * maximum rated core frequency. If this is an incorrect 775 * assumption, certain clock options (possibly including the 776 * default clksel) may be inappropriately excluded on certain 777 * chips. 778 */ 779 clksel = (cg_in(cg, hwc->reg) & CLKSEL_MASK) >> CLKSEL_SHIFT; 780 div = get_pll_div(cg, hwc, clksel); 781 if (!div) { 782 kfree(hwc); 783 return NULL; 784 } 785 786 pct80_rate = clk_get_rate(div->clk); 787 pct80_rate *= 8; 788 do_div(pct80_rate, 10); 789 790 plat_rate = clk_get_rate(cg->pll[PLATFORM_PLL].div[PLL_DIV1].clk); 791 792 if (cg->info.flags & CG_CMUX_GE_PLAT) 793 min_rate = plat_rate; 794 else 795 min_rate = plat_rate / 2; 796 797 return create_mux_common(cg, hwc, &cmux_ops, min_rate, 798 pct80_rate, "cg-cmux%d", idx); 799 } 800 801 static struct clk * __init create_one_hwaccel(struct clockgen *cg, int idx) 802 { 803 struct mux_hwclock *hwc; 804 805 hwc = kzalloc(sizeof(*hwc), GFP_KERNEL); 806 if (!hwc) 807 return NULL; 808 809 hwc->reg = cg->regs + 0x20 * idx + 0x10; 810 hwc->info = cg->info.hwaccel[idx]; 811 812 return create_mux_common(cg, hwc, &hwaccel_ops, 0, 0, 813 "cg-hwaccel%d", idx); 814 } 815 816 static void __init create_muxes(struct clockgen *cg) 817 { 818 int i; 819 820 for (i = 0; i < ARRAY_SIZE(cg->cmux); i++) { 821 if (cg->info.cmux_to_group[i] < 0) 822 break; 823 if (cg->info.cmux_to_group[i] >= 824 ARRAY_SIZE(cg->info.cmux_groups)) { 825 WARN_ON_ONCE(1); 826 continue; 827 } 828 829 cg->cmux[i] = create_one_cmux(cg, i); 830 } 831 832 for (i = 0; i < ARRAY_SIZE(cg->hwaccel); i++) { 833 if (!cg->info.hwaccel[i]) 834 continue; 835 836 cg->hwaccel[i] = create_one_hwaccel(cg, i); 837 } 838 } 839 840 static void __init clockgen_init(struct device_node *np); 841 842 /* Legacy nodes may get probed before the parent clockgen node */ 843 static void __init legacy_init_clockgen(struct device_node *np) 844 { 845 if (!clockgen.node) 846 clockgen_init(of_get_parent(np)); 847 } 848 849 /* Legacy node */ 850 static void __init core_mux_init(struct device_node *np) 851 { 852 struct clk *clk; 853 struct resource res; 854 int idx, rc; 855 856 legacy_init_clockgen(np); 857 858 if (of_address_to_resource(np, 0, &res)) 859 return; 860 861 idx = (res.start & 0xf0) >> 5; 862 clk = clockgen.cmux[idx]; 863 864 rc = of_clk_add_provider(np, of_clk_src_simple_get, clk); 865 if (rc) { 866 pr_err("%s: Couldn't register clk provider for node %s: %d\n", 867 __func__, np->name, rc); 868 return; 869 } 870 } 871 872 static struct clk __init 873 *sysclk_from_fixed(struct device_node *node, const char *name) 874 { 875 u32 rate; 876 877 if (of_property_read_u32(node, "clock-frequency", &rate)) 878 return ERR_PTR(-ENODEV); 879 880 return clk_register_fixed_rate(NULL, name, NULL, 0, rate); 881 } 882 883 static struct clk *sysclk_from_parent(const char *name) 884 { 885 struct clk *clk; 886 const char *parent_name; 887 888 clk = of_clk_get(clockgen.node, 0); 889 if (IS_ERR(clk)) 890 return clk; 891 892 /* Register the input clock under the desired name. */ 893 parent_name = __clk_get_name(clk); 894 clk = clk_register_fixed_factor(NULL, name, parent_name, 895 0, 1, 1); 896 if (IS_ERR(clk)) 897 pr_err("%s: Couldn't register %s: %ld\n", __func__, name, 898 PTR_ERR(clk)); 899 900 return clk; 901 } 902 903 static struct clk * __init create_sysclk(const char *name) 904 { 905 struct device_node *sysclk; 906 struct clk *clk; 907 908 clk = sysclk_from_fixed(clockgen.node, name); 909 if (!IS_ERR(clk)) 910 return clk; 911 912 clk = sysclk_from_parent(name); 913 if (!IS_ERR(clk)) 914 return clk; 915 916 sysclk = of_get_child_by_name(clockgen.node, "sysclk"); 917 if (sysclk) { 918 clk = sysclk_from_fixed(sysclk, name); 919 if (!IS_ERR(clk)) 920 return clk; 921 } 922 923 pr_err("%s: No input clock\n", __func__); 924 return NULL; 925 } 926 927 /* Legacy node */ 928 static void __init sysclk_init(struct device_node *node) 929 { 930 struct clk *clk; 931 932 legacy_init_clockgen(node); 933 934 clk = clockgen.sysclk; 935 if (clk) 936 of_clk_add_provider(node, of_clk_src_simple_get, clk); 937 } 938 939 #define PLL_KILL BIT(31) 940 941 static void __init create_one_pll(struct clockgen *cg, int idx) 942 { 943 u32 __iomem *reg; 944 u32 mult; 945 struct clockgen_pll *pll = &cg->pll[idx]; 946 int i; 947 948 if (!(cg->info.pll_mask & (1 << idx))) 949 return; 950 951 if (cg->info.flags & CG_VER3) { 952 switch (idx) { 953 case PLATFORM_PLL: 954 reg = cg->regs + 0x60080; 955 break; 956 case CGA_PLL1: 957 reg = cg->regs + 0x80; 958 break; 959 case CGA_PLL2: 960 reg = cg->regs + 0xa0; 961 break; 962 case CGB_PLL1: 963 reg = cg->regs + 0x10080; 964 break; 965 case CGB_PLL2: 966 reg = cg->regs + 0x100a0; 967 break; 968 default: 969 WARN_ONCE(1, "index %d\n", idx); 970 return; 971 } 972 } else { 973 if (idx == PLATFORM_PLL) 974 reg = cg->regs + 0xc00; 975 else 976 reg = cg->regs + 0x800 + 0x20 * (idx - 1); 977 } 978 979 /* Get the multiple of PLL */ 980 mult = cg_in(cg, reg); 981 982 /* Check if this PLL is disabled */ 983 if (mult & PLL_KILL) { 984 pr_debug("%s(): pll %p disabled\n", __func__, reg); 985 return; 986 } 987 988 if ((cg->info.flags & CG_VER3) || 989 ((cg->info.flags & CG_PLL_8BIT) && idx != PLATFORM_PLL)) 990 mult = (mult & GENMASK(8, 1)) >> 1; 991 else 992 mult = (mult & GENMASK(6, 1)) >> 1; 993 994 for (i = 0; i < ARRAY_SIZE(pll->div); i++) { 995 struct clk *clk; 996 997 snprintf(pll->div[i].name, sizeof(pll->div[i].name), 998 "cg-pll%d-div%d", idx, i + 1); 999 1000 clk = clk_register_fixed_factor(NULL, 1001 pll->div[i].name, "cg-sysclk", 0, mult, i + 1); 1002 if (IS_ERR(clk)) { 1003 pr_err("%s: %s: register failed %ld\n", 1004 __func__, pll->div[i].name, PTR_ERR(clk)); 1005 continue; 1006 } 1007 1008 pll->div[i].clk = clk; 1009 } 1010 } 1011 1012 static void __init create_plls(struct clockgen *cg) 1013 { 1014 int i; 1015 1016 for (i = 0; i < ARRAY_SIZE(cg->pll); i++) 1017 create_one_pll(cg, i); 1018 } 1019 1020 static void __init legacy_pll_init(struct device_node *np, int idx) 1021 { 1022 struct clockgen_pll *pll; 1023 struct clk_onecell_data *onecell_data; 1024 struct clk **subclks; 1025 int count, rc; 1026 1027 legacy_init_clockgen(np); 1028 1029 pll = &clockgen.pll[idx]; 1030 count = of_property_count_strings(np, "clock-output-names"); 1031 1032 BUILD_BUG_ON(ARRAY_SIZE(pll->div) < 4); 1033 subclks = kcalloc(4, sizeof(struct clk *), GFP_KERNEL); 1034 if (!subclks) 1035 return; 1036 1037 onecell_data = kmalloc(sizeof(*onecell_data), GFP_KERNEL); 1038 if (!onecell_data) 1039 goto err_clks; 1040 1041 if (count <= 3) { 1042 subclks[0] = pll->div[0].clk; 1043 subclks[1] = pll->div[1].clk; 1044 subclks[2] = pll->div[3].clk; 1045 } else { 1046 subclks[0] = pll->div[0].clk; 1047 subclks[1] = pll->div[1].clk; 1048 subclks[2] = pll->div[2].clk; 1049 subclks[3] = pll->div[3].clk; 1050 } 1051 1052 onecell_data->clks = subclks; 1053 onecell_data->clk_num = count; 1054 1055 rc = of_clk_add_provider(np, of_clk_src_onecell_get, onecell_data); 1056 if (rc) { 1057 pr_err("%s: Couldn't register clk provider for node %s: %d\n", 1058 __func__, np->name, rc); 1059 goto err_cell; 1060 } 1061 1062 return; 1063 err_cell: 1064 kfree(onecell_data); 1065 err_clks: 1066 kfree(subclks); 1067 } 1068 1069 /* Legacy node */ 1070 static void __init pltfrm_pll_init(struct device_node *np) 1071 { 1072 legacy_pll_init(np, PLATFORM_PLL); 1073 } 1074 1075 /* Legacy node */ 1076 static void __init core_pll_init(struct device_node *np) 1077 { 1078 struct resource res; 1079 int idx; 1080 1081 if (of_address_to_resource(np, 0, &res)) 1082 return; 1083 1084 if ((res.start & 0xfff) == 0xc00) { 1085 /* 1086 * ls1021a devtree labels the platform PLL 1087 * with the core PLL compatible 1088 */ 1089 pltfrm_pll_init(np); 1090 } else { 1091 idx = (res.start & 0xf0) >> 5; 1092 legacy_pll_init(np, CGA_PLL1 + idx); 1093 } 1094 } 1095 1096 static struct clk *clockgen_clk_get(struct of_phandle_args *clkspec, void *data) 1097 { 1098 struct clockgen *cg = data; 1099 struct clk *clk; 1100 struct clockgen_pll *pll; 1101 u32 type, idx; 1102 1103 if (clkspec->args_count < 2) { 1104 pr_err("%s: insufficient phandle args\n", __func__); 1105 return ERR_PTR(-EINVAL); 1106 } 1107 1108 type = clkspec->args[0]; 1109 idx = clkspec->args[1]; 1110 1111 switch (type) { 1112 case 0: 1113 if (idx != 0) 1114 goto bad_args; 1115 clk = cg->sysclk; 1116 break; 1117 case 1: 1118 if (idx >= ARRAY_SIZE(cg->cmux)) 1119 goto bad_args; 1120 clk = cg->cmux[idx]; 1121 break; 1122 case 2: 1123 if (idx >= ARRAY_SIZE(cg->hwaccel)) 1124 goto bad_args; 1125 clk = cg->hwaccel[idx]; 1126 break; 1127 case 3: 1128 if (idx >= ARRAY_SIZE(cg->fman)) 1129 goto bad_args; 1130 clk = cg->fman[idx]; 1131 break; 1132 case 4: 1133 pll = &cg->pll[PLATFORM_PLL]; 1134 if (idx >= ARRAY_SIZE(pll->div)) 1135 goto bad_args; 1136 clk = pll->div[idx].clk; 1137 break; 1138 default: 1139 goto bad_args; 1140 } 1141 1142 if (!clk) 1143 return ERR_PTR(-ENOENT); 1144 return clk; 1145 1146 bad_args: 1147 pr_err("%s: Bad phandle args %u %u\n", __func__, type, idx); 1148 return ERR_PTR(-EINVAL); 1149 } 1150 1151 #ifdef CONFIG_PPC 1152 #include <asm/mpc85xx.h> 1153 1154 static const u32 a4510_svrs[] __initconst = { 1155 (SVR_P2040 << 8) | 0x10, /* P2040 1.0 */ 1156 (SVR_P2040 << 8) | 0x11, /* P2040 1.1 */ 1157 (SVR_P2041 << 8) | 0x10, /* P2041 1.0 */ 1158 (SVR_P2041 << 8) | 0x11, /* P2041 1.1 */ 1159 (SVR_P3041 << 8) | 0x10, /* P3041 1.0 */ 1160 (SVR_P3041 << 8) | 0x11, /* P3041 1.1 */ 1161 (SVR_P4040 << 8) | 0x20, /* P4040 2.0 */ 1162 (SVR_P4080 << 8) | 0x20, /* P4080 2.0 */ 1163 (SVR_P5010 << 8) | 0x10, /* P5010 1.0 */ 1164 (SVR_P5010 << 8) | 0x20, /* P5010 2.0 */ 1165 (SVR_P5020 << 8) | 0x10, /* P5020 1.0 */ 1166 (SVR_P5021 << 8) | 0x10, /* P5021 1.0 */ 1167 (SVR_P5040 << 8) | 0x10, /* P5040 1.0 */ 1168 }; 1169 1170 #define SVR_SECURITY 0x80000 /* The Security (E) bit */ 1171 1172 static bool __init has_erratum_a4510(void) 1173 { 1174 u32 svr = mfspr(SPRN_SVR); 1175 int i; 1176 1177 svr &= ~SVR_SECURITY; 1178 1179 for (i = 0; i < ARRAY_SIZE(a4510_svrs); i++) { 1180 if (svr == a4510_svrs[i]) 1181 return true; 1182 } 1183 1184 return false; 1185 } 1186 #else 1187 static bool __init has_erratum_a4510(void) 1188 { 1189 return false; 1190 } 1191 #endif 1192 1193 static void __init clockgen_init(struct device_node *np) 1194 { 1195 int i, ret; 1196 bool is_old_ls1021a = false; 1197 1198 /* May have already been called by a legacy probe */ 1199 if (clockgen.node) 1200 return; 1201 1202 clockgen.node = np; 1203 clockgen.regs = of_iomap(np, 0); 1204 if (!clockgen.regs && 1205 of_device_is_compatible(of_root, "fsl,ls1021a")) { 1206 /* Compatibility hack for old, broken device trees */ 1207 clockgen.regs = ioremap(0x1ee1000, 0x1000); 1208 is_old_ls1021a = true; 1209 } 1210 if (!clockgen.regs) { 1211 pr_err("%s(): %s: of_iomap() failed\n", __func__, np->name); 1212 return; 1213 } 1214 1215 for (i = 0; i < ARRAY_SIZE(chipinfo); i++) { 1216 if (of_device_is_compatible(np, chipinfo[i].compat)) 1217 break; 1218 if (is_old_ls1021a && 1219 !strcmp(chipinfo[i].compat, "fsl,ls1021a-clockgen")) 1220 break; 1221 } 1222 1223 if (i == ARRAY_SIZE(chipinfo)) { 1224 pr_err("%s: unknown clockgen node %s\n", __func__, 1225 np->full_name); 1226 goto err; 1227 } 1228 clockgen.info = chipinfo[i]; 1229 1230 if (clockgen.info.guts_compat) { 1231 struct device_node *guts; 1232 1233 guts = of_find_compatible_node(NULL, NULL, 1234 clockgen.info.guts_compat); 1235 if (guts) { 1236 clockgen.guts = of_iomap(guts, 0); 1237 if (!clockgen.guts) { 1238 pr_err("%s: Couldn't map %s regs\n", __func__, 1239 guts->full_name); 1240 } 1241 } 1242 1243 } 1244 1245 if (has_erratum_a4510()) 1246 clockgen.info.flags |= CG_CMUX_GE_PLAT; 1247 1248 clockgen.sysclk = create_sysclk("cg-sysclk"); 1249 create_plls(&clockgen); 1250 create_muxes(&clockgen); 1251 1252 if (clockgen.info.init_periph) 1253 clockgen.info.init_periph(&clockgen); 1254 1255 ret = of_clk_add_provider(np, clockgen_clk_get, &clockgen); 1256 if (ret) { 1257 pr_err("%s: Couldn't register clk provider for node %s: %d\n", 1258 __func__, np->name, ret); 1259 } 1260 1261 return; 1262 err: 1263 iounmap(clockgen.regs); 1264 clockgen.regs = NULL; 1265 } 1266 1267 CLK_OF_DECLARE(qoriq_clockgen_1, "fsl,qoriq-clockgen-1.0", clockgen_init); 1268 CLK_OF_DECLARE(qoriq_clockgen_2, "fsl,qoriq-clockgen-2.0", clockgen_init); 1269 CLK_OF_DECLARE(qoriq_clockgen_ls1021a, "fsl,ls1021a-clockgen", clockgen_init); 1270 CLK_OF_DECLARE(qoriq_clockgen_ls1043a, "fsl,ls1043a-clockgen", clockgen_init); 1271 CLK_OF_DECLARE(qoriq_clockgen_ls2080a, "fsl,ls2080a-clockgen", clockgen_init); 1272 1273 /* Legacy nodes */ 1274 CLK_OF_DECLARE(qoriq_sysclk_1, "fsl,qoriq-sysclk-1.0", sysclk_init); 1275 CLK_OF_DECLARE(qoriq_sysclk_2, "fsl,qoriq-sysclk-2.0", sysclk_init); 1276 CLK_OF_DECLARE(qoriq_core_pll_1, "fsl,qoriq-core-pll-1.0", core_pll_init); 1277 CLK_OF_DECLARE(qoriq_core_pll_2, "fsl,qoriq-core-pll-2.0", core_pll_init); 1278 CLK_OF_DECLARE(qoriq_core_mux_1, "fsl,qoriq-core-mux-1.0", core_mux_init); 1279 CLK_OF_DECLARE(qoriq_core_mux_2, "fsl,qoriq-core-mux-2.0", core_mux_init); 1280 CLK_OF_DECLARE(qoriq_pltfrm_pll_1, "fsl,qoriq-platform-pll-1.0", pltfrm_pll_init); 1281 CLK_OF_DECLARE(qoriq_pltfrm_pll_2, "fsl,qoriq-platform-pll-2.0", pltfrm_pll_init); 1282