1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Alchemy clocks. 4 * 5 * Exposes all configurable internal clock sources to the clk framework. 6 * 7 * We have: 8 * - Root source, usually 12MHz supplied by an external crystal 9 * - 3 PLLs which generate multiples of root rate [AUX, CPU, AUX2] 10 * 11 * Dividers: 12 * - 6 clock dividers with: 13 * * selectable source [one of the PLLs], 14 * * output divided between [2 .. 512 in steps of 2] (!Au1300) 15 * or [1 .. 256 in steps of 1] (Au1300), 16 * * can be enabled individually. 17 * 18 * - up to 6 "internal" (fixed) consumers which: 19 * * take either AUXPLL or one of the above 6 dividers as input, 20 * * divide this input by 1, 2, or 4 (and 3 on Au1300). 21 * * can be disabled separately. 22 * 23 * Misc clocks: 24 * - sysbus clock: CPU core clock (CPUPLL) divided by 2, 3 or 4. 25 * depends on board design and should be set by bootloader, read-only. 26 * - peripheral clock: half the rate of sysbus clock, source for a lot 27 * of peripheral blocks, read-only. 28 * - memory clock: clk rate to main memory chips, depends on board 29 * design and is read-only, 30 * - lrclk: the static bus clock signal for synchronous operation. 31 * depends on board design, must be set by bootloader, 32 * but may be required to correctly configure devices attached to 33 * the static bus. The Au1000/1500/1100 manuals call it LCLK, on 34 * later models it's called RCLK. 35 */ 36 37 #include <linux/init.h> 38 #include <linux/io.h> 39 #include <linux/clk.h> 40 #include <linux/clk-provider.h> 41 #include <linux/clkdev.h> 42 #include <linux/slab.h> 43 #include <linux/spinlock.h> 44 #include <linux/types.h> 45 #include <asm/mach-au1x00/au1000.h> 46 47 /* Base clock: 12MHz is the default in all databooks, and I haven't 48 * found any board yet which uses a different rate. 49 */ 50 #define ALCHEMY_ROOTCLK_RATE 12000000 51 52 /* 53 * the internal sources which can be driven by the PLLs and dividers. 54 * Names taken from the databooks, refer to them for more information, 55 * especially which ones are share a clock line. 56 */ 57 static const char * const alchemy_au1300_intclknames[] = { 58 "lcd_intclk", "gpemgp_clk", "maempe_clk", "maebsa_clk", 59 "EXTCLK0", "EXTCLK1" 60 }; 61 62 static const char * const alchemy_au1200_intclknames[] = { 63 "lcd_intclk", NULL, NULL, NULL, "EXTCLK0", "EXTCLK1" 64 }; 65 66 static const char * const alchemy_au1550_intclknames[] = { 67 "usb_clk", "psc0_intclk", "psc1_intclk", "pci_clko", 68 "EXTCLK0", "EXTCLK1" 69 }; 70 71 static const char * const alchemy_au1100_intclknames[] = { 72 "usb_clk", "lcd_intclk", NULL, "i2s_clk", "EXTCLK0", "EXTCLK1" 73 }; 74 75 static const char * const alchemy_au1500_intclknames[] = { 76 NULL, "usbd_clk", "usbh_clk", "pci_clko", "EXTCLK0", "EXTCLK1" 77 }; 78 79 static const char * const alchemy_au1000_intclknames[] = { 80 "irda_clk", "usbd_clk", "usbh_clk", "i2s_clk", "EXTCLK0", 81 "EXTCLK1" 82 }; 83 84 /* aliases for a few on-chip sources which are either shared 85 * or have gone through name changes. 86 */ 87 static struct clk_aliastable { 88 char *alias; 89 char *base; 90 int cputype; 91 } alchemy_clk_aliases[] __initdata = { 92 { "usbh_clk", "usb_clk", ALCHEMY_CPU_AU1100 }, 93 { "usbd_clk", "usb_clk", ALCHEMY_CPU_AU1100 }, 94 { "irda_clk", "usb_clk", ALCHEMY_CPU_AU1100 }, 95 { "usbh_clk", "usb_clk", ALCHEMY_CPU_AU1550 }, 96 { "usbd_clk", "usb_clk", ALCHEMY_CPU_AU1550 }, 97 { "psc2_intclk", "usb_clk", ALCHEMY_CPU_AU1550 }, 98 { "psc3_intclk", "EXTCLK0", ALCHEMY_CPU_AU1550 }, 99 { "psc0_intclk", "EXTCLK0", ALCHEMY_CPU_AU1200 }, 100 { "psc1_intclk", "EXTCLK1", ALCHEMY_CPU_AU1200 }, 101 { "psc0_intclk", "EXTCLK0", ALCHEMY_CPU_AU1300 }, 102 { "psc2_intclk", "EXTCLK0", ALCHEMY_CPU_AU1300 }, 103 { "psc1_intclk", "EXTCLK1", ALCHEMY_CPU_AU1300 }, 104 { "psc3_intclk", "EXTCLK1", ALCHEMY_CPU_AU1300 }, 105 106 { NULL, NULL, 0 }, 107 }; 108 109 #define IOMEM(x) ((void __iomem *)(KSEG1ADDR(CPHYSADDR(x)))) 110 111 /* access locks to SYS_FREQCTRL0/1 and SYS_CLKSRC registers */ 112 static spinlock_t alchemy_clk_fg0_lock; 113 static spinlock_t alchemy_clk_fg1_lock; 114 static spinlock_t alchemy_clk_csrc_lock; 115 116 /* CPU Core clock *****************************************************/ 117 118 static unsigned long alchemy_clk_cpu_recalc(struct clk_hw *hw, 119 unsigned long parent_rate) 120 { 121 unsigned long t; 122 123 /* 124 * On early Au1000, sys_cpupll was write-only. Since these 125 * silicon versions of Au1000 are not sold, we don't bend 126 * over backwards trying to determine the frequency. 127 */ 128 if (unlikely(au1xxx_cpu_has_pll_wo())) 129 t = 396000000; 130 else { 131 t = alchemy_rdsys(AU1000_SYS_CPUPLL) & 0x7f; 132 if (alchemy_get_cputype() < ALCHEMY_CPU_AU1300) 133 t &= 0x3f; 134 t *= parent_rate; 135 } 136 137 return t; 138 } 139 140 void __init alchemy_set_lpj(void) 141 { 142 preset_lpj = alchemy_clk_cpu_recalc(NULL, ALCHEMY_ROOTCLK_RATE); 143 preset_lpj /= 2 * HZ; 144 } 145 146 static const struct clk_ops alchemy_clkops_cpu = { 147 .recalc_rate = alchemy_clk_cpu_recalc, 148 }; 149 150 static struct clk __init *alchemy_clk_setup_cpu(const char *parent_name, 151 int ctype) 152 { 153 struct clk_init_data id; 154 struct clk_hw *h; 155 156 h = kzalloc(sizeof(*h), GFP_KERNEL); 157 if (!h) 158 return ERR_PTR(-ENOMEM); 159 160 id.name = ALCHEMY_CPU_CLK; 161 id.parent_names = &parent_name; 162 id.num_parents = 1; 163 id.flags = CLK_IS_BASIC; 164 id.ops = &alchemy_clkops_cpu; 165 h->init = &id; 166 167 return clk_register(NULL, h); 168 } 169 170 /* AUXPLLs ************************************************************/ 171 172 struct alchemy_auxpll_clk { 173 struct clk_hw hw; 174 unsigned long reg; /* au1300 has also AUXPLL2 */ 175 int maxmult; /* max multiplier */ 176 }; 177 #define to_auxpll_clk(x) container_of(x, struct alchemy_auxpll_clk, hw) 178 179 static unsigned long alchemy_clk_aux_recalc(struct clk_hw *hw, 180 unsigned long parent_rate) 181 { 182 struct alchemy_auxpll_clk *a = to_auxpll_clk(hw); 183 184 return (alchemy_rdsys(a->reg) & 0xff) * parent_rate; 185 } 186 187 static int alchemy_clk_aux_setr(struct clk_hw *hw, 188 unsigned long rate, 189 unsigned long parent_rate) 190 { 191 struct alchemy_auxpll_clk *a = to_auxpll_clk(hw); 192 unsigned long d = rate; 193 194 if (rate) 195 d /= parent_rate; 196 else 197 d = 0; 198 199 /* minimum is 84MHz, max is 756-1032 depending on variant */ 200 if (((d < 7) && (d != 0)) || (d > a->maxmult)) 201 return -EINVAL; 202 203 alchemy_wrsys(d, a->reg); 204 return 0; 205 } 206 207 static long alchemy_clk_aux_roundr(struct clk_hw *hw, 208 unsigned long rate, 209 unsigned long *parent_rate) 210 { 211 struct alchemy_auxpll_clk *a = to_auxpll_clk(hw); 212 unsigned long mult; 213 214 if (!rate || !*parent_rate) 215 return 0; 216 217 mult = rate / (*parent_rate); 218 219 if (mult && (mult < 7)) 220 mult = 7; 221 if (mult > a->maxmult) 222 mult = a->maxmult; 223 224 return (*parent_rate) * mult; 225 } 226 227 static const struct clk_ops alchemy_clkops_aux = { 228 .recalc_rate = alchemy_clk_aux_recalc, 229 .set_rate = alchemy_clk_aux_setr, 230 .round_rate = alchemy_clk_aux_roundr, 231 }; 232 233 static struct clk __init *alchemy_clk_setup_aux(const char *parent_name, 234 char *name, int maxmult, 235 unsigned long reg) 236 { 237 struct clk_init_data id; 238 struct clk *c; 239 struct alchemy_auxpll_clk *a; 240 241 a = kzalloc(sizeof(*a), GFP_KERNEL); 242 if (!a) 243 return ERR_PTR(-ENOMEM); 244 245 id.name = name; 246 id.parent_names = &parent_name; 247 id.num_parents = 1; 248 id.flags = CLK_GET_RATE_NOCACHE; 249 id.ops = &alchemy_clkops_aux; 250 251 a->reg = reg; 252 a->maxmult = maxmult; 253 a->hw.init = &id; 254 255 c = clk_register(NULL, &a->hw); 256 if (!IS_ERR(c)) 257 clk_register_clkdev(c, name, NULL); 258 else 259 kfree(a); 260 261 return c; 262 } 263 264 /* sysbus_clk *********************************************************/ 265 266 static struct clk __init *alchemy_clk_setup_sysbus(const char *pn) 267 { 268 unsigned long v = (alchemy_rdsys(AU1000_SYS_POWERCTRL) & 3) + 2; 269 struct clk *c; 270 271 c = clk_register_fixed_factor(NULL, ALCHEMY_SYSBUS_CLK, 272 pn, 0, 1, v); 273 if (!IS_ERR(c)) 274 clk_register_clkdev(c, ALCHEMY_SYSBUS_CLK, NULL); 275 return c; 276 } 277 278 /* Peripheral Clock ***************************************************/ 279 280 static struct clk __init *alchemy_clk_setup_periph(const char *pn) 281 { 282 /* Peripheral clock runs at half the rate of sysbus clk */ 283 struct clk *c; 284 285 c = clk_register_fixed_factor(NULL, ALCHEMY_PERIPH_CLK, 286 pn, 0, 1, 2); 287 if (!IS_ERR(c)) 288 clk_register_clkdev(c, ALCHEMY_PERIPH_CLK, NULL); 289 return c; 290 } 291 292 /* mem clock **********************************************************/ 293 294 static struct clk __init *alchemy_clk_setup_mem(const char *pn, int ct) 295 { 296 void __iomem *addr = IOMEM(AU1000_MEM_PHYS_ADDR); 297 unsigned long v; 298 struct clk *c; 299 int div; 300 301 switch (ct) { 302 case ALCHEMY_CPU_AU1550: 303 case ALCHEMY_CPU_AU1200: 304 v = __raw_readl(addr + AU1550_MEM_SDCONFIGB); 305 div = (v & (1 << 15)) ? 1 : 2; 306 break; 307 case ALCHEMY_CPU_AU1300: 308 v = __raw_readl(addr + AU1550_MEM_SDCONFIGB); 309 div = (v & (1 << 31)) ? 1 : 2; 310 break; 311 case ALCHEMY_CPU_AU1000: 312 case ALCHEMY_CPU_AU1500: 313 case ALCHEMY_CPU_AU1100: 314 default: 315 div = 2; 316 break; 317 } 318 319 c = clk_register_fixed_factor(NULL, ALCHEMY_MEM_CLK, pn, 320 0, 1, div); 321 if (!IS_ERR(c)) 322 clk_register_clkdev(c, ALCHEMY_MEM_CLK, NULL); 323 return c; 324 } 325 326 /* lrclk: external synchronous static bus clock ***********************/ 327 328 static struct clk __init *alchemy_clk_setup_lrclk(const char *pn, int t) 329 { 330 /* Au1000, Au1500: MEM_STCFG0[11]: If bit is set, lrclk=pclk/5, 331 * otherwise lrclk=pclk/4. 332 * All other variants: MEM_STCFG0[15:13] = divisor. 333 * L/RCLK = periph_clk / (divisor + 1) 334 * On Au1000, Au1500, Au1100 it's called LCLK, 335 * on later models it's called RCLK, but it's the same thing. 336 */ 337 struct clk *c; 338 unsigned long v = alchemy_rdsmem(AU1000_MEM_STCFG0); 339 340 switch (t) { 341 case ALCHEMY_CPU_AU1000: 342 case ALCHEMY_CPU_AU1500: 343 v = 4 + ((v >> 11) & 1); 344 break; 345 default: /* all other models */ 346 v = ((v >> 13) & 7) + 1; 347 } 348 c = clk_register_fixed_factor(NULL, ALCHEMY_LR_CLK, 349 pn, 0, 1, v); 350 if (!IS_ERR(c)) 351 clk_register_clkdev(c, ALCHEMY_LR_CLK, NULL); 352 return c; 353 } 354 355 /* Clock dividers and muxes *******************************************/ 356 357 /* data for fgen and csrc mux-dividers */ 358 struct alchemy_fgcs_clk { 359 struct clk_hw hw; 360 spinlock_t *reglock; /* register lock */ 361 unsigned long reg; /* SYS_FREQCTRL0/1 */ 362 int shift; /* offset in register */ 363 int parent; /* parent before disable [Au1300] */ 364 int isen; /* is it enabled? */ 365 int *dt; /* dividertable for csrc */ 366 }; 367 #define to_fgcs_clk(x) container_of(x, struct alchemy_fgcs_clk, hw) 368 369 static long alchemy_calc_div(unsigned long rate, unsigned long prate, 370 int scale, int maxdiv, unsigned long *rv) 371 { 372 long div1, div2; 373 374 div1 = prate / rate; 375 if ((prate / div1) > rate) 376 div1++; 377 378 if (scale == 2) { /* only div-by-multiple-of-2 possible */ 379 if (div1 & 1) 380 div1++; /* stay <=prate */ 381 } 382 383 div2 = (div1 / scale) - 1; /* value to write to register */ 384 385 if (div2 > maxdiv) 386 div2 = maxdiv; 387 if (rv) 388 *rv = div2; 389 390 div1 = ((div2 + 1) * scale); 391 return div1; 392 } 393 394 static int alchemy_clk_fgcs_detr(struct clk_hw *hw, 395 struct clk_rate_request *req, 396 int scale, int maxdiv) 397 { 398 struct clk_hw *pc, *bpc, *free; 399 long tdv, tpr, pr, nr, br, bpr, diff, lastdiff; 400 int j; 401 402 lastdiff = INT_MAX; 403 bpr = 0; 404 bpc = NULL; 405 br = -EINVAL; 406 free = NULL; 407 408 /* look at the rates each enabled parent supplies and select 409 * the one that gets closest to but not over the requested rate. 410 */ 411 for (j = 0; j < 7; j++) { 412 pc = clk_hw_get_parent_by_index(hw, j); 413 if (!pc) 414 break; 415 416 /* if this parent is currently unused, remember it. 417 * XXX: we would actually want clk_has_active_children() 418 * but this is a good-enough approximation for now. 419 */ 420 if (!clk_hw_is_prepared(pc)) { 421 if (!free) 422 free = pc; 423 } 424 425 pr = clk_hw_get_rate(pc); 426 if (pr < req->rate) 427 continue; 428 429 /* what can hardware actually provide */ 430 tdv = alchemy_calc_div(req->rate, pr, scale, maxdiv, NULL); 431 nr = pr / tdv; 432 diff = req->rate - nr; 433 if (nr > req->rate) 434 continue; 435 436 if (diff < lastdiff) { 437 lastdiff = diff; 438 bpr = pr; 439 bpc = pc; 440 br = nr; 441 } 442 if (diff == 0) 443 break; 444 } 445 446 /* if we couldn't get the exact rate we wanted from the enabled 447 * parents, maybe we can tell an available disabled/inactive one 448 * to give us a rate we can divide down to the requested rate. 449 */ 450 if (lastdiff && free) { 451 for (j = (maxdiv == 4) ? 1 : scale; j <= maxdiv; j += scale) { 452 tpr = req->rate * j; 453 if (tpr < 0) 454 break; 455 pr = clk_hw_round_rate(free, tpr); 456 457 tdv = alchemy_calc_div(req->rate, pr, scale, maxdiv, 458 NULL); 459 nr = pr / tdv; 460 diff = req->rate - nr; 461 if (nr > req->rate) 462 continue; 463 if (diff < lastdiff) { 464 lastdiff = diff; 465 bpr = pr; 466 bpc = free; 467 br = nr; 468 } 469 if (diff == 0) 470 break; 471 } 472 } 473 474 if (br < 0) 475 return br; 476 477 req->best_parent_rate = bpr; 478 req->best_parent_hw = bpc; 479 req->rate = br; 480 481 return 0; 482 } 483 484 static int alchemy_clk_fgv1_en(struct clk_hw *hw) 485 { 486 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 487 unsigned long v, flags; 488 489 spin_lock_irqsave(c->reglock, flags); 490 v = alchemy_rdsys(c->reg); 491 v |= (1 << 1) << c->shift; 492 alchemy_wrsys(v, c->reg); 493 spin_unlock_irqrestore(c->reglock, flags); 494 495 return 0; 496 } 497 498 static int alchemy_clk_fgv1_isen(struct clk_hw *hw) 499 { 500 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 501 unsigned long v = alchemy_rdsys(c->reg) >> (c->shift + 1); 502 503 return v & 1; 504 } 505 506 static void alchemy_clk_fgv1_dis(struct clk_hw *hw) 507 { 508 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 509 unsigned long v, flags; 510 511 spin_lock_irqsave(c->reglock, flags); 512 v = alchemy_rdsys(c->reg); 513 v &= ~((1 << 1) << c->shift); 514 alchemy_wrsys(v, c->reg); 515 spin_unlock_irqrestore(c->reglock, flags); 516 } 517 518 static int alchemy_clk_fgv1_setp(struct clk_hw *hw, u8 index) 519 { 520 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 521 unsigned long v, flags; 522 523 spin_lock_irqsave(c->reglock, flags); 524 v = alchemy_rdsys(c->reg); 525 if (index) 526 v |= (1 << c->shift); 527 else 528 v &= ~(1 << c->shift); 529 alchemy_wrsys(v, c->reg); 530 spin_unlock_irqrestore(c->reglock, flags); 531 532 return 0; 533 } 534 535 static u8 alchemy_clk_fgv1_getp(struct clk_hw *hw) 536 { 537 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 538 539 return (alchemy_rdsys(c->reg) >> c->shift) & 1; 540 } 541 542 static int alchemy_clk_fgv1_setr(struct clk_hw *hw, unsigned long rate, 543 unsigned long parent_rate) 544 { 545 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 546 unsigned long div, v, flags, ret; 547 int sh = c->shift + 2; 548 549 if (!rate || !parent_rate || rate > (parent_rate / 2)) 550 return -EINVAL; 551 ret = alchemy_calc_div(rate, parent_rate, 2, 512, &div); 552 spin_lock_irqsave(c->reglock, flags); 553 v = alchemy_rdsys(c->reg); 554 v &= ~(0xff << sh); 555 v |= div << sh; 556 alchemy_wrsys(v, c->reg); 557 spin_unlock_irqrestore(c->reglock, flags); 558 559 return 0; 560 } 561 562 static unsigned long alchemy_clk_fgv1_recalc(struct clk_hw *hw, 563 unsigned long parent_rate) 564 { 565 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 566 unsigned long v = alchemy_rdsys(c->reg) >> (c->shift + 2); 567 568 v = ((v & 0xff) + 1) * 2; 569 return parent_rate / v; 570 } 571 572 static int alchemy_clk_fgv1_detr(struct clk_hw *hw, 573 struct clk_rate_request *req) 574 { 575 return alchemy_clk_fgcs_detr(hw, req, 2, 512); 576 } 577 578 /* Au1000, Au1100, Au15x0, Au12x0 */ 579 static const struct clk_ops alchemy_clkops_fgenv1 = { 580 .recalc_rate = alchemy_clk_fgv1_recalc, 581 .determine_rate = alchemy_clk_fgv1_detr, 582 .set_rate = alchemy_clk_fgv1_setr, 583 .set_parent = alchemy_clk_fgv1_setp, 584 .get_parent = alchemy_clk_fgv1_getp, 585 .enable = alchemy_clk_fgv1_en, 586 .disable = alchemy_clk_fgv1_dis, 587 .is_enabled = alchemy_clk_fgv1_isen, 588 }; 589 590 static void __alchemy_clk_fgv2_en(struct alchemy_fgcs_clk *c) 591 { 592 unsigned long v = alchemy_rdsys(c->reg); 593 594 v &= ~(3 << c->shift); 595 v |= (c->parent & 3) << c->shift; 596 alchemy_wrsys(v, c->reg); 597 c->isen = 1; 598 } 599 600 static int alchemy_clk_fgv2_en(struct clk_hw *hw) 601 { 602 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 603 unsigned long flags; 604 605 /* enable by setting the previous parent clock */ 606 spin_lock_irqsave(c->reglock, flags); 607 __alchemy_clk_fgv2_en(c); 608 spin_unlock_irqrestore(c->reglock, flags); 609 610 return 0; 611 } 612 613 static int alchemy_clk_fgv2_isen(struct clk_hw *hw) 614 { 615 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 616 617 return ((alchemy_rdsys(c->reg) >> c->shift) & 3) != 0; 618 } 619 620 static void alchemy_clk_fgv2_dis(struct clk_hw *hw) 621 { 622 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 623 unsigned long v, flags; 624 625 spin_lock_irqsave(c->reglock, flags); 626 v = alchemy_rdsys(c->reg); 627 v &= ~(3 << c->shift); /* set input mux to "disabled" state */ 628 alchemy_wrsys(v, c->reg); 629 c->isen = 0; 630 spin_unlock_irqrestore(c->reglock, flags); 631 } 632 633 static int alchemy_clk_fgv2_setp(struct clk_hw *hw, u8 index) 634 { 635 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 636 unsigned long flags; 637 638 spin_lock_irqsave(c->reglock, flags); 639 c->parent = index + 1; /* value to write to register */ 640 if (c->isen) 641 __alchemy_clk_fgv2_en(c); 642 spin_unlock_irqrestore(c->reglock, flags); 643 644 return 0; 645 } 646 647 static u8 alchemy_clk_fgv2_getp(struct clk_hw *hw) 648 { 649 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 650 unsigned long flags, v; 651 652 spin_lock_irqsave(c->reglock, flags); 653 v = c->parent - 1; 654 spin_unlock_irqrestore(c->reglock, flags); 655 return v; 656 } 657 658 /* fg0-2 and fg4-6 share a "scale"-bit. With this bit cleared, the 659 * dividers behave exactly as on previous models (dividers are multiples 660 * of 2); with the bit set, dividers are multiples of 1, halving their 661 * range, but making them also much more flexible. 662 */ 663 static int alchemy_clk_fgv2_setr(struct clk_hw *hw, unsigned long rate, 664 unsigned long parent_rate) 665 { 666 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 667 int sh = c->shift + 2; 668 unsigned long div, v, flags, ret; 669 670 if (!rate || !parent_rate || rate > parent_rate) 671 return -EINVAL; 672 673 v = alchemy_rdsys(c->reg) & (1 << 30); /* test "scale" bit */ 674 ret = alchemy_calc_div(rate, parent_rate, v ? 1 : 2, 675 v ? 256 : 512, &div); 676 677 spin_lock_irqsave(c->reglock, flags); 678 v = alchemy_rdsys(c->reg); 679 v &= ~(0xff << sh); 680 v |= (div & 0xff) << sh; 681 alchemy_wrsys(v, c->reg); 682 spin_unlock_irqrestore(c->reglock, flags); 683 684 return 0; 685 } 686 687 static unsigned long alchemy_clk_fgv2_recalc(struct clk_hw *hw, 688 unsigned long parent_rate) 689 { 690 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 691 int sh = c->shift + 2; 692 unsigned long v, t; 693 694 v = alchemy_rdsys(c->reg); 695 t = parent_rate / (((v >> sh) & 0xff) + 1); 696 if ((v & (1 << 30)) == 0) /* test scale bit */ 697 t /= 2; 698 699 return t; 700 } 701 702 static int alchemy_clk_fgv2_detr(struct clk_hw *hw, 703 struct clk_rate_request *req) 704 { 705 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 706 int scale, maxdiv; 707 708 if (alchemy_rdsys(c->reg) & (1 << 30)) { 709 scale = 1; 710 maxdiv = 256; 711 } else { 712 scale = 2; 713 maxdiv = 512; 714 } 715 716 return alchemy_clk_fgcs_detr(hw, req, scale, maxdiv); 717 } 718 719 /* Au1300 larger input mux, no separate disable bit, flexible divider */ 720 static const struct clk_ops alchemy_clkops_fgenv2 = { 721 .recalc_rate = alchemy_clk_fgv2_recalc, 722 .determine_rate = alchemy_clk_fgv2_detr, 723 .set_rate = alchemy_clk_fgv2_setr, 724 .set_parent = alchemy_clk_fgv2_setp, 725 .get_parent = alchemy_clk_fgv2_getp, 726 .enable = alchemy_clk_fgv2_en, 727 .disable = alchemy_clk_fgv2_dis, 728 .is_enabled = alchemy_clk_fgv2_isen, 729 }; 730 731 static const char * const alchemy_clk_fgv1_parents[] = { 732 ALCHEMY_CPU_CLK, ALCHEMY_AUXPLL_CLK 733 }; 734 735 static const char * const alchemy_clk_fgv2_parents[] = { 736 ALCHEMY_AUXPLL2_CLK, ALCHEMY_CPU_CLK, ALCHEMY_AUXPLL_CLK 737 }; 738 739 static const char * const alchemy_clk_fgen_names[] = { 740 ALCHEMY_FG0_CLK, ALCHEMY_FG1_CLK, ALCHEMY_FG2_CLK, 741 ALCHEMY_FG3_CLK, ALCHEMY_FG4_CLK, ALCHEMY_FG5_CLK }; 742 743 static int __init alchemy_clk_init_fgens(int ctype) 744 { 745 struct clk *c; 746 struct clk_init_data id; 747 struct alchemy_fgcs_clk *a; 748 unsigned long v; 749 int i, ret; 750 751 switch (ctype) { 752 case ALCHEMY_CPU_AU1000...ALCHEMY_CPU_AU1200: 753 id.ops = &alchemy_clkops_fgenv1; 754 id.parent_names = alchemy_clk_fgv1_parents; 755 id.num_parents = 2; 756 break; 757 case ALCHEMY_CPU_AU1300: 758 id.ops = &alchemy_clkops_fgenv2; 759 id.parent_names = alchemy_clk_fgv2_parents; 760 id.num_parents = 3; 761 break; 762 default: 763 return -ENODEV; 764 } 765 id.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE; 766 767 a = kzalloc((sizeof(*a)) * 6, GFP_KERNEL); 768 if (!a) 769 return -ENOMEM; 770 771 spin_lock_init(&alchemy_clk_fg0_lock); 772 spin_lock_init(&alchemy_clk_fg1_lock); 773 ret = 0; 774 for (i = 0; i < 6; i++) { 775 id.name = alchemy_clk_fgen_names[i]; 776 a->shift = 10 * (i < 3 ? i : i - 3); 777 if (i > 2) { 778 a->reg = AU1000_SYS_FREQCTRL1; 779 a->reglock = &alchemy_clk_fg1_lock; 780 } else { 781 a->reg = AU1000_SYS_FREQCTRL0; 782 a->reglock = &alchemy_clk_fg0_lock; 783 } 784 785 /* default to first parent if bootloader has set 786 * the mux to disabled state. 787 */ 788 if (ctype == ALCHEMY_CPU_AU1300) { 789 v = alchemy_rdsys(a->reg); 790 a->parent = (v >> a->shift) & 3; 791 if (!a->parent) { 792 a->parent = 1; 793 a->isen = 0; 794 } else 795 a->isen = 1; 796 } 797 798 a->hw.init = &id; 799 c = clk_register(NULL, &a->hw); 800 if (IS_ERR(c)) 801 ret++; 802 else 803 clk_register_clkdev(c, id.name, NULL); 804 a++; 805 } 806 807 return ret; 808 } 809 810 /* internal sources muxes *********************************************/ 811 812 static int alchemy_clk_csrc_isen(struct clk_hw *hw) 813 { 814 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 815 unsigned long v = alchemy_rdsys(c->reg); 816 817 return (((v >> c->shift) >> 2) & 7) != 0; 818 } 819 820 static void __alchemy_clk_csrc_en(struct alchemy_fgcs_clk *c) 821 { 822 unsigned long v = alchemy_rdsys(c->reg); 823 824 v &= ~((7 << 2) << c->shift); 825 v |= ((c->parent & 7) << 2) << c->shift; 826 alchemy_wrsys(v, c->reg); 827 c->isen = 1; 828 } 829 830 static int alchemy_clk_csrc_en(struct clk_hw *hw) 831 { 832 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 833 unsigned long flags; 834 835 /* enable by setting the previous parent clock */ 836 spin_lock_irqsave(c->reglock, flags); 837 __alchemy_clk_csrc_en(c); 838 spin_unlock_irqrestore(c->reglock, flags); 839 840 return 0; 841 } 842 843 static void alchemy_clk_csrc_dis(struct clk_hw *hw) 844 { 845 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 846 unsigned long v, flags; 847 848 spin_lock_irqsave(c->reglock, flags); 849 v = alchemy_rdsys(c->reg); 850 v &= ~((3 << 2) << c->shift); /* mux to "disabled" state */ 851 alchemy_wrsys(v, c->reg); 852 c->isen = 0; 853 spin_unlock_irqrestore(c->reglock, flags); 854 } 855 856 static int alchemy_clk_csrc_setp(struct clk_hw *hw, u8 index) 857 { 858 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 859 unsigned long flags; 860 861 spin_lock_irqsave(c->reglock, flags); 862 c->parent = index + 1; /* value to write to register */ 863 if (c->isen) 864 __alchemy_clk_csrc_en(c); 865 spin_unlock_irqrestore(c->reglock, flags); 866 867 return 0; 868 } 869 870 static u8 alchemy_clk_csrc_getp(struct clk_hw *hw) 871 { 872 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 873 874 return c->parent - 1; 875 } 876 877 static unsigned long alchemy_clk_csrc_recalc(struct clk_hw *hw, 878 unsigned long parent_rate) 879 { 880 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 881 unsigned long v = (alchemy_rdsys(c->reg) >> c->shift) & 3; 882 883 return parent_rate / c->dt[v]; 884 } 885 886 static int alchemy_clk_csrc_setr(struct clk_hw *hw, unsigned long rate, 887 unsigned long parent_rate) 888 { 889 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 890 unsigned long d, v, flags; 891 int i; 892 893 if (!rate || !parent_rate || rate > parent_rate) 894 return -EINVAL; 895 896 d = (parent_rate + (rate / 2)) / rate; 897 if (d > 4) 898 return -EINVAL; 899 if ((d == 3) && (c->dt[2] != 3)) 900 d = 4; 901 902 for (i = 0; i < 4; i++) 903 if (c->dt[i] == d) 904 break; 905 906 if (i >= 4) 907 return -EINVAL; /* oops */ 908 909 spin_lock_irqsave(c->reglock, flags); 910 v = alchemy_rdsys(c->reg); 911 v &= ~(3 << c->shift); 912 v |= (i & 3) << c->shift; 913 alchemy_wrsys(v, c->reg); 914 spin_unlock_irqrestore(c->reglock, flags); 915 916 return 0; 917 } 918 919 static int alchemy_clk_csrc_detr(struct clk_hw *hw, 920 struct clk_rate_request *req) 921 { 922 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 923 int scale = c->dt[2] == 3 ? 1 : 2; /* au1300 check */ 924 925 return alchemy_clk_fgcs_detr(hw, req, scale, 4); 926 } 927 928 static const struct clk_ops alchemy_clkops_csrc = { 929 .recalc_rate = alchemy_clk_csrc_recalc, 930 .determine_rate = alchemy_clk_csrc_detr, 931 .set_rate = alchemy_clk_csrc_setr, 932 .set_parent = alchemy_clk_csrc_setp, 933 .get_parent = alchemy_clk_csrc_getp, 934 .enable = alchemy_clk_csrc_en, 935 .disable = alchemy_clk_csrc_dis, 936 .is_enabled = alchemy_clk_csrc_isen, 937 }; 938 939 static const char * const alchemy_clk_csrc_parents[] = { 940 /* disabled at index 0 */ ALCHEMY_AUXPLL_CLK, 941 ALCHEMY_FG0_CLK, ALCHEMY_FG1_CLK, ALCHEMY_FG2_CLK, 942 ALCHEMY_FG3_CLK, ALCHEMY_FG4_CLK, ALCHEMY_FG5_CLK 943 }; 944 945 /* divider tables */ 946 static int alchemy_csrc_dt1[] = { 1, 4, 1, 2 }; /* rest */ 947 static int alchemy_csrc_dt2[] = { 1, 4, 3, 2 }; /* Au1300 */ 948 949 static int __init alchemy_clk_setup_imux(int ctype) 950 { 951 struct alchemy_fgcs_clk *a; 952 const char * const *names; 953 struct clk_init_data id; 954 unsigned long v; 955 int i, ret, *dt; 956 struct clk *c; 957 958 id.ops = &alchemy_clkops_csrc; 959 id.parent_names = alchemy_clk_csrc_parents; 960 id.num_parents = 7; 961 id.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE; 962 963 dt = alchemy_csrc_dt1; 964 switch (ctype) { 965 case ALCHEMY_CPU_AU1000: 966 names = alchemy_au1000_intclknames; 967 break; 968 case ALCHEMY_CPU_AU1500: 969 names = alchemy_au1500_intclknames; 970 break; 971 case ALCHEMY_CPU_AU1100: 972 names = alchemy_au1100_intclknames; 973 break; 974 case ALCHEMY_CPU_AU1550: 975 names = alchemy_au1550_intclknames; 976 break; 977 case ALCHEMY_CPU_AU1200: 978 names = alchemy_au1200_intclknames; 979 break; 980 case ALCHEMY_CPU_AU1300: 981 dt = alchemy_csrc_dt2; 982 names = alchemy_au1300_intclknames; 983 break; 984 default: 985 return -ENODEV; 986 } 987 988 a = kzalloc((sizeof(*a)) * 6, GFP_KERNEL); 989 if (!a) 990 return -ENOMEM; 991 992 spin_lock_init(&alchemy_clk_csrc_lock); 993 ret = 0; 994 995 for (i = 0; i < 6; i++) { 996 id.name = names[i]; 997 if (!id.name) 998 goto next; 999 1000 a->shift = i * 5; 1001 a->reg = AU1000_SYS_CLKSRC; 1002 a->reglock = &alchemy_clk_csrc_lock; 1003 a->dt = dt; 1004 1005 /* default to first parent clock if mux is initially 1006 * set to disabled state. 1007 */ 1008 v = alchemy_rdsys(a->reg); 1009 a->parent = ((v >> a->shift) >> 2) & 7; 1010 if (!a->parent) { 1011 a->parent = 1; 1012 a->isen = 0; 1013 } else 1014 a->isen = 1; 1015 1016 a->hw.init = &id; 1017 c = clk_register(NULL, &a->hw); 1018 if (IS_ERR(c)) 1019 ret++; 1020 else 1021 clk_register_clkdev(c, id.name, NULL); 1022 next: 1023 a++; 1024 } 1025 1026 return ret; 1027 } 1028 1029 1030 /**********************************************************************/ 1031 1032 1033 #define ERRCK(x) \ 1034 if (IS_ERR(x)) { \ 1035 ret = PTR_ERR(x); \ 1036 goto out; \ 1037 } 1038 1039 static int __init alchemy_clk_init(void) 1040 { 1041 int ctype = alchemy_get_cputype(), ret, i; 1042 struct clk_aliastable *t = alchemy_clk_aliases; 1043 struct clk *c; 1044 1045 /* Root of the Alchemy clock tree: external 12MHz crystal osc */ 1046 c = clk_register_fixed_rate(NULL, ALCHEMY_ROOT_CLK, NULL, 1047 0, ALCHEMY_ROOTCLK_RATE); 1048 ERRCK(c) 1049 1050 /* CPU core clock */ 1051 c = alchemy_clk_setup_cpu(ALCHEMY_ROOT_CLK, ctype); 1052 ERRCK(c) 1053 1054 /* AUXPLLs: max 1GHz on Au1300, 748MHz on older models */ 1055 i = (ctype == ALCHEMY_CPU_AU1300) ? 84 : 63; 1056 c = alchemy_clk_setup_aux(ALCHEMY_ROOT_CLK, ALCHEMY_AUXPLL_CLK, 1057 i, AU1000_SYS_AUXPLL); 1058 ERRCK(c) 1059 1060 if (ctype == ALCHEMY_CPU_AU1300) { 1061 c = alchemy_clk_setup_aux(ALCHEMY_ROOT_CLK, 1062 ALCHEMY_AUXPLL2_CLK, i, 1063 AU1300_SYS_AUXPLL2); 1064 ERRCK(c) 1065 } 1066 1067 /* sysbus clock: cpu core clock divided by 2, 3 or 4 */ 1068 c = alchemy_clk_setup_sysbus(ALCHEMY_CPU_CLK); 1069 ERRCK(c) 1070 1071 /* peripheral clock: runs at half rate of sysbus clk */ 1072 c = alchemy_clk_setup_periph(ALCHEMY_SYSBUS_CLK); 1073 ERRCK(c) 1074 1075 /* SDR/DDR memory clock */ 1076 c = alchemy_clk_setup_mem(ALCHEMY_SYSBUS_CLK, ctype); 1077 ERRCK(c) 1078 1079 /* L/RCLK: external static bus clock for synchronous mode */ 1080 c = alchemy_clk_setup_lrclk(ALCHEMY_PERIPH_CLK, ctype); 1081 ERRCK(c) 1082 1083 /* Frequency dividers 0-5 */ 1084 ret = alchemy_clk_init_fgens(ctype); 1085 if (ret) { 1086 ret = -ENODEV; 1087 goto out; 1088 } 1089 1090 /* diving muxes for internal sources */ 1091 ret = alchemy_clk_setup_imux(ctype); 1092 if (ret) { 1093 ret = -ENODEV; 1094 goto out; 1095 } 1096 1097 /* set up aliases drivers might look for */ 1098 while (t->base) { 1099 if (t->cputype == ctype) 1100 clk_add_alias(t->alias, NULL, t->base, NULL); 1101 t++; 1102 } 1103 1104 pr_info("Alchemy clocktree installed\n"); 1105 return 0; 1106 1107 out: 1108 return ret; 1109 } 1110 postcore_initcall(alchemy_clk_init); 1111