1 /* 2 * Alchemy clocks. 3 * 4 * Exposes all configurable internal clock sources to the clk framework. 5 * 6 * We have: 7 * - Root source, usually 12MHz supplied by an external crystal 8 * - 3 PLLs which generate multiples of root rate [AUX, CPU, AUX2] 9 * 10 * Dividers: 11 * - 6 clock dividers with: 12 * * selectable source [one of the PLLs], 13 * * output divided between [2 .. 512 in steps of 2] (!Au1300) 14 * or [1 .. 256 in steps of 1] (Au1300), 15 * * can be enabled individually. 16 * 17 * - up to 6 "internal" (fixed) consumers which: 18 * * take either AUXPLL or one of the above 6 dividers as input, 19 * * divide this input by 1, 2, or 4 (and 3 on Au1300). 20 * * can be disabled separately. 21 * 22 * Misc clocks: 23 * - sysbus clock: CPU core clock (CPUPLL) divided by 2, 3 or 4. 24 * depends on board design and should be set by bootloader, read-only. 25 * - peripheral clock: half the rate of sysbus clock, source for a lot 26 * of peripheral blocks, read-only. 27 * - memory clock: clk rate to main memory chips, depends on board 28 * design and is read-only, 29 * - lrclk: the static bus clock signal for synchronous operation. 30 * depends on board design, must be set by bootloader, 31 * but may be required to correctly configure devices attached to 32 * the static bus. The Au1000/1500/1100 manuals call it LCLK, on 33 * later models it's called RCLK. 34 */ 35 36 #include <linux/init.h> 37 #include <linux/io.h> 38 #include <linux/clk-provider.h> 39 #include <linux/clkdev.h> 40 #include <linux/slab.h> 41 #include <linux/spinlock.h> 42 #include <linux/types.h> 43 #include <asm/mach-au1x00/au1000.h> 44 45 /* Base clock: 12MHz is the default in all databooks, and I haven't 46 * found any board yet which uses a different rate. 47 */ 48 #define ALCHEMY_ROOTCLK_RATE 12000000 49 50 /* 51 * the internal sources which can be driven by the PLLs and dividers. 52 * Names taken from the databooks, refer to them for more information, 53 * especially which ones are share a clock line. 54 */ 55 static const char * const alchemy_au1300_intclknames[] = { 56 "lcd_intclk", "gpemgp_clk", "maempe_clk", "maebsa_clk", 57 "EXTCLK0", "EXTCLK1" 58 }; 59 60 static const char * const alchemy_au1200_intclknames[] = { 61 "lcd_intclk", NULL, NULL, NULL, "EXTCLK0", "EXTCLK1" 62 }; 63 64 static const char * const alchemy_au1550_intclknames[] = { 65 "usb_clk", "psc0_intclk", "psc1_intclk", "pci_clko", 66 "EXTCLK0", "EXTCLK1" 67 }; 68 69 static const char * const alchemy_au1100_intclknames[] = { 70 "usb_clk", "lcd_intclk", NULL, "i2s_clk", "EXTCLK0", "EXTCLK1" 71 }; 72 73 static const char * const alchemy_au1500_intclknames[] = { 74 NULL, "usbd_clk", "usbh_clk", "pci_clko", "EXTCLK0", "EXTCLK1" 75 }; 76 77 static const char * const alchemy_au1000_intclknames[] = { 78 "irda_clk", "usbd_clk", "usbh_clk", "i2s_clk", "EXTCLK0", 79 "EXTCLK1" 80 }; 81 82 /* aliases for a few on-chip sources which are either shared 83 * or have gone through name changes. 84 */ 85 static struct clk_aliastable { 86 char *alias; 87 char *base; 88 int cputype; 89 } alchemy_clk_aliases[] __initdata = { 90 { "usbh_clk", "usb_clk", ALCHEMY_CPU_AU1100 }, 91 { "usbd_clk", "usb_clk", ALCHEMY_CPU_AU1100 }, 92 { "irda_clk", "usb_clk", ALCHEMY_CPU_AU1100 }, 93 { "usbh_clk", "usb_clk", ALCHEMY_CPU_AU1550 }, 94 { "usbd_clk", "usb_clk", ALCHEMY_CPU_AU1550 }, 95 { "psc2_intclk", "usb_clk", ALCHEMY_CPU_AU1550 }, 96 { "psc3_intclk", "EXTCLK0", ALCHEMY_CPU_AU1550 }, 97 { "psc0_intclk", "EXTCLK0", ALCHEMY_CPU_AU1200 }, 98 { "psc1_intclk", "EXTCLK1", ALCHEMY_CPU_AU1200 }, 99 { "psc0_intclk", "EXTCLK0", ALCHEMY_CPU_AU1300 }, 100 { "psc2_intclk", "EXTCLK0", ALCHEMY_CPU_AU1300 }, 101 { "psc1_intclk", "EXTCLK1", ALCHEMY_CPU_AU1300 }, 102 { "psc3_intclk", "EXTCLK1", ALCHEMY_CPU_AU1300 }, 103 104 { NULL, NULL, 0 }, 105 }; 106 107 #define IOMEM(x) ((void __iomem *)(KSEG1ADDR(CPHYSADDR(x)))) 108 109 /* access locks to SYS_FREQCTRL0/1 and SYS_CLKSRC registers */ 110 static spinlock_t alchemy_clk_fg0_lock; 111 static spinlock_t alchemy_clk_fg1_lock; 112 static spinlock_t alchemy_clk_csrc_lock; 113 114 /* CPU Core clock *****************************************************/ 115 116 static unsigned long alchemy_clk_cpu_recalc(struct clk_hw *hw, 117 unsigned long parent_rate) 118 { 119 unsigned long t; 120 121 /* 122 * On early Au1000, sys_cpupll was write-only. Since these 123 * silicon versions of Au1000 are not sold, we don't bend 124 * over backwards trying to determine the frequency. 125 */ 126 if (unlikely(au1xxx_cpu_has_pll_wo())) 127 t = 396000000; 128 else { 129 t = alchemy_rdsys(AU1000_SYS_CPUPLL) & 0x7f; 130 t *= parent_rate; 131 } 132 133 return t; 134 } 135 136 static struct clk_ops alchemy_clkops_cpu = { 137 .recalc_rate = alchemy_clk_cpu_recalc, 138 }; 139 140 static struct clk __init *alchemy_clk_setup_cpu(const char *parent_name, 141 int ctype) 142 { 143 struct clk_init_data id; 144 struct clk_hw *h; 145 146 h = kzalloc(sizeof(*h), GFP_KERNEL); 147 if (!h) 148 return ERR_PTR(-ENOMEM); 149 150 id.name = ALCHEMY_CPU_CLK; 151 id.parent_names = &parent_name; 152 id.num_parents = 1; 153 id.flags = CLK_IS_BASIC; 154 id.ops = &alchemy_clkops_cpu; 155 h->init = &id; 156 157 return clk_register(NULL, h); 158 } 159 160 /* AUXPLLs ************************************************************/ 161 162 struct alchemy_auxpll_clk { 163 struct clk_hw hw; 164 unsigned long reg; /* au1300 has also AUXPLL2 */ 165 int maxmult; /* max multiplier */ 166 }; 167 #define to_auxpll_clk(x) container_of(x, struct alchemy_auxpll_clk, hw) 168 169 static unsigned long alchemy_clk_aux_recalc(struct clk_hw *hw, 170 unsigned long parent_rate) 171 { 172 struct alchemy_auxpll_clk *a = to_auxpll_clk(hw); 173 174 return (alchemy_rdsys(a->reg) & 0xff) * parent_rate; 175 } 176 177 static int alchemy_clk_aux_setr(struct clk_hw *hw, 178 unsigned long rate, 179 unsigned long parent_rate) 180 { 181 struct alchemy_auxpll_clk *a = to_auxpll_clk(hw); 182 unsigned long d = rate; 183 184 if (rate) 185 d /= parent_rate; 186 else 187 d = 0; 188 189 /* minimum is 84MHz, max is 756-1032 depending on variant */ 190 if (((d < 7) && (d != 0)) || (d > a->maxmult)) 191 return -EINVAL; 192 193 alchemy_wrsys(d, a->reg); 194 return 0; 195 } 196 197 static long alchemy_clk_aux_roundr(struct clk_hw *hw, 198 unsigned long rate, 199 unsigned long *parent_rate) 200 { 201 struct alchemy_auxpll_clk *a = to_auxpll_clk(hw); 202 unsigned long mult; 203 204 if (!rate || !*parent_rate) 205 return 0; 206 207 mult = rate / (*parent_rate); 208 209 if (mult && (mult < 7)) 210 mult = 7; 211 if (mult > a->maxmult) 212 mult = a->maxmult; 213 214 return (*parent_rate) * mult; 215 } 216 217 static struct clk_ops alchemy_clkops_aux = { 218 .recalc_rate = alchemy_clk_aux_recalc, 219 .set_rate = alchemy_clk_aux_setr, 220 .round_rate = alchemy_clk_aux_roundr, 221 }; 222 223 static struct clk __init *alchemy_clk_setup_aux(const char *parent_name, 224 char *name, int maxmult, 225 unsigned long reg) 226 { 227 struct clk_init_data id; 228 struct clk *c; 229 struct alchemy_auxpll_clk *a; 230 231 a = kzalloc(sizeof(*a), GFP_KERNEL); 232 if (!a) 233 return ERR_PTR(-ENOMEM); 234 235 id.name = name; 236 id.parent_names = &parent_name; 237 id.num_parents = 1; 238 id.flags = CLK_GET_RATE_NOCACHE; 239 id.ops = &alchemy_clkops_aux; 240 241 a->reg = reg; 242 a->maxmult = maxmult; 243 a->hw.init = &id; 244 245 c = clk_register(NULL, &a->hw); 246 if (!IS_ERR(c)) 247 clk_register_clkdev(c, name, NULL); 248 else 249 kfree(a); 250 251 return c; 252 } 253 254 /* sysbus_clk *********************************************************/ 255 256 static struct clk __init *alchemy_clk_setup_sysbus(const char *pn) 257 { 258 unsigned long v = (alchemy_rdsys(AU1000_SYS_POWERCTRL) & 3) + 2; 259 struct clk *c; 260 261 c = clk_register_fixed_factor(NULL, ALCHEMY_SYSBUS_CLK, 262 pn, 0, 1, v); 263 if (!IS_ERR(c)) 264 clk_register_clkdev(c, ALCHEMY_SYSBUS_CLK, NULL); 265 return c; 266 } 267 268 /* Peripheral Clock ***************************************************/ 269 270 static struct clk __init *alchemy_clk_setup_periph(const char *pn) 271 { 272 /* Peripheral clock runs at half the rate of sysbus clk */ 273 struct clk *c; 274 275 c = clk_register_fixed_factor(NULL, ALCHEMY_PERIPH_CLK, 276 pn, 0, 1, 2); 277 if (!IS_ERR(c)) 278 clk_register_clkdev(c, ALCHEMY_PERIPH_CLK, NULL); 279 return c; 280 } 281 282 /* mem clock **********************************************************/ 283 284 static struct clk __init *alchemy_clk_setup_mem(const char *pn, int ct) 285 { 286 void __iomem *addr = IOMEM(AU1000_MEM_PHYS_ADDR); 287 unsigned long v; 288 struct clk *c; 289 int div; 290 291 switch (ct) { 292 case ALCHEMY_CPU_AU1550: 293 case ALCHEMY_CPU_AU1200: 294 v = __raw_readl(addr + AU1550_MEM_SDCONFIGB); 295 div = (v & (1 << 15)) ? 1 : 2; 296 break; 297 case ALCHEMY_CPU_AU1300: 298 v = __raw_readl(addr + AU1550_MEM_SDCONFIGB); 299 div = (v & (1 << 31)) ? 1 : 2; 300 break; 301 case ALCHEMY_CPU_AU1000: 302 case ALCHEMY_CPU_AU1500: 303 case ALCHEMY_CPU_AU1100: 304 default: 305 div = 2; 306 break; 307 } 308 309 c = clk_register_fixed_factor(NULL, ALCHEMY_MEM_CLK, pn, 310 0, 1, div); 311 if (!IS_ERR(c)) 312 clk_register_clkdev(c, ALCHEMY_MEM_CLK, NULL); 313 return c; 314 } 315 316 /* lrclk: external synchronous static bus clock ***********************/ 317 318 static struct clk __init *alchemy_clk_setup_lrclk(const char *pn) 319 { 320 /* MEM_STCFG0[15:13] = divisor. 321 * L/RCLK = periph_clk / (divisor + 1) 322 * On Au1000, Au1500, Au1100 it's called LCLK, 323 * on later models it's called RCLK, but it's the same thing. 324 */ 325 struct clk *c; 326 unsigned long v = alchemy_rdsmem(AU1000_MEM_STCFG0) >> 13; 327 328 v = (v & 7) + 1; 329 c = clk_register_fixed_factor(NULL, ALCHEMY_LR_CLK, 330 pn, 0, 1, v); 331 if (!IS_ERR(c)) 332 clk_register_clkdev(c, ALCHEMY_LR_CLK, NULL); 333 return c; 334 } 335 336 /* Clock dividers and muxes *******************************************/ 337 338 /* data for fgen and csrc mux-dividers */ 339 struct alchemy_fgcs_clk { 340 struct clk_hw hw; 341 spinlock_t *reglock; /* register lock */ 342 unsigned long reg; /* SYS_FREQCTRL0/1 */ 343 int shift; /* offset in register */ 344 int parent; /* parent before disable [Au1300] */ 345 int isen; /* is it enabled? */ 346 int *dt; /* dividertable for csrc */ 347 }; 348 #define to_fgcs_clk(x) container_of(x, struct alchemy_fgcs_clk, hw) 349 350 static long alchemy_calc_div(unsigned long rate, unsigned long prate, 351 int scale, int maxdiv, unsigned long *rv) 352 { 353 long div1, div2; 354 355 div1 = prate / rate; 356 if ((prate / div1) > rate) 357 div1++; 358 359 if (scale == 2) { /* only div-by-multiple-of-2 possible */ 360 if (div1 & 1) 361 div1++; /* stay <=prate */ 362 } 363 364 div2 = (div1 / scale) - 1; /* value to write to register */ 365 366 if (div2 > maxdiv) 367 div2 = maxdiv; 368 if (rv) 369 *rv = div2; 370 371 div1 = ((div2 + 1) * scale); 372 return div1; 373 } 374 375 static long alchemy_clk_fgcs_detr(struct clk_hw *hw, unsigned long rate, 376 unsigned long *best_parent_rate, 377 struct clk_hw **best_parent_clk, 378 int scale, int maxdiv) 379 { 380 struct clk *pc, *bpc, *free; 381 long tdv, tpr, pr, nr, br, bpr, diff, lastdiff; 382 int j; 383 384 lastdiff = INT_MAX; 385 bpr = 0; 386 bpc = NULL; 387 br = -EINVAL; 388 free = NULL; 389 390 /* look at the rates each enabled parent supplies and select 391 * the one that gets closest to but not over the requested rate. 392 */ 393 for (j = 0; j < 7; j++) { 394 pc = clk_get_parent_by_index(hw->clk, j); 395 if (!pc) 396 break; 397 398 /* if this parent is currently unused, remember it. 399 * XXX: we would actually want clk_has_active_children() 400 * but this is a good-enough approximation for now. 401 */ 402 if (!__clk_is_prepared(pc)) { 403 if (!free) 404 free = pc; 405 } 406 407 pr = clk_get_rate(pc); 408 if (pr < rate) 409 continue; 410 411 /* what can hardware actually provide */ 412 tdv = alchemy_calc_div(rate, pr, scale, maxdiv, NULL); 413 nr = pr / tdv; 414 diff = rate - nr; 415 if (nr > rate) 416 continue; 417 418 if (diff < lastdiff) { 419 lastdiff = diff; 420 bpr = pr; 421 bpc = pc; 422 br = nr; 423 } 424 if (diff == 0) 425 break; 426 } 427 428 /* if we couldn't get the exact rate we wanted from the enabled 429 * parents, maybe we can tell an available disabled/inactive one 430 * to give us a rate we can divide down to the requested rate. 431 */ 432 if (lastdiff && free) { 433 for (j = (maxdiv == 4) ? 1 : scale; j <= maxdiv; j += scale) { 434 tpr = rate * j; 435 if (tpr < 0) 436 break; 437 pr = clk_round_rate(free, tpr); 438 439 tdv = alchemy_calc_div(rate, pr, scale, maxdiv, NULL); 440 nr = pr / tdv; 441 diff = rate - nr; 442 if (nr > rate) 443 continue; 444 if (diff < lastdiff) { 445 lastdiff = diff; 446 bpr = pr; 447 bpc = free; 448 br = nr; 449 } 450 if (diff == 0) 451 break; 452 } 453 } 454 455 *best_parent_rate = bpr; 456 *best_parent_clk = __clk_get_hw(bpc); 457 return br; 458 } 459 460 static int alchemy_clk_fgv1_en(struct clk_hw *hw) 461 { 462 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 463 unsigned long v, flags; 464 465 spin_lock_irqsave(c->reglock, flags); 466 v = alchemy_rdsys(c->reg); 467 v |= (1 << 1) << c->shift; 468 alchemy_wrsys(v, c->reg); 469 spin_unlock_irqrestore(c->reglock, flags); 470 471 return 0; 472 } 473 474 static int alchemy_clk_fgv1_isen(struct clk_hw *hw) 475 { 476 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 477 unsigned long v = alchemy_rdsys(c->reg) >> (c->shift + 1); 478 479 return v & 1; 480 } 481 482 static void alchemy_clk_fgv1_dis(struct clk_hw *hw) 483 { 484 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 485 unsigned long v, flags; 486 487 spin_lock_irqsave(c->reglock, flags); 488 v = alchemy_rdsys(c->reg); 489 v &= ~((1 << 1) << c->shift); 490 alchemy_wrsys(v, c->reg); 491 spin_unlock_irqrestore(c->reglock, flags); 492 } 493 494 static int alchemy_clk_fgv1_setp(struct clk_hw *hw, u8 index) 495 { 496 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 497 unsigned long v, flags; 498 499 spin_lock_irqsave(c->reglock, flags); 500 v = alchemy_rdsys(c->reg); 501 if (index) 502 v |= (1 << c->shift); 503 else 504 v &= ~(1 << c->shift); 505 alchemy_wrsys(v, c->reg); 506 spin_unlock_irqrestore(c->reglock, flags); 507 508 return 0; 509 } 510 511 static u8 alchemy_clk_fgv1_getp(struct clk_hw *hw) 512 { 513 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 514 515 return (alchemy_rdsys(c->reg) >> c->shift) & 1; 516 } 517 518 static int alchemy_clk_fgv1_setr(struct clk_hw *hw, unsigned long rate, 519 unsigned long parent_rate) 520 { 521 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 522 unsigned long div, v, flags, ret; 523 int sh = c->shift + 2; 524 525 if (!rate || !parent_rate || rate > (parent_rate / 2)) 526 return -EINVAL; 527 ret = alchemy_calc_div(rate, parent_rate, 2, 512, &div); 528 spin_lock_irqsave(c->reglock, flags); 529 v = alchemy_rdsys(c->reg); 530 v &= ~(0xff << sh); 531 v |= div << sh; 532 alchemy_wrsys(v, c->reg); 533 spin_unlock_irqrestore(c->reglock, flags); 534 535 return 0; 536 } 537 538 static unsigned long alchemy_clk_fgv1_recalc(struct clk_hw *hw, 539 unsigned long parent_rate) 540 { 541 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 542 unsigned long v = alchemy_rdsys(c->reg) >> (c->shift + 2); 543 544 v = ((v & 0xff) + 1) * 2; 545 return parent_rate / v; 546 } 547 548 static long alchemy_clk_fgv1_detr(struct clk_hw *hw, unsigned long rate, 549 unsigned long *best_parent_rate, 550 struct clk_hw **best_parent_clk) 551 { 552 return alchemy_clk_fgcs_detr(hw, rate, best_parent_rate, 553 best_parent_clk, 2, 512); 554 } 555 556 /* Au1000, Au1100, Au15x0, Au12x0 */ 557 static struct clk_ops alchemy_clkops_fgenv1 = { 558 .recalc_rate = alchemy_clk_fgv1_recalc, 559 .determine_rate = alchemy_clk_fgv1_detr, 560 .set_rate = alchemy_clk_fgv1_setr, 561 .set_parent = alchemy_clk_fgv1_setp, 562 .get_parent = alchemy_clk_fgv1_getp, 563 .enable = alchemy_clk_fgv1_en, 564 .disable = alchemy_clk_fgv1_dis, 565 .is_enabled = alchemy_clk_fgv1_isen, 566 }; 567 568 static void __alchemy_clk_fgv2_en(struct alchemy_fgcs_clk *c) 569 { 570 unsigned long v = alchemy_rdsys(c->reg); 571 572 v &= ~(3 << c->shift); 573 v |= (c->parent & 3) << c->shift; 574 alchemy_wrsys(v, c->reg); 575 c->isen = 1; 576 } 577 578 static int alchemy_clk_fgv2_en(struct clk_hw *hw) 579 { 580 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 581 unsigned long flags; 582 583 /* enable by setting the previous parent clock */ 584 spin_lock_irqsave(c->reglock, flags); 585 __alchemy_clk_fgv2_en(c); 586 spin_unlock_irqrestore(c->reglock, flags); 587 588 return 0; 589 } 590 591 static int alchemy_clk_fgv2_isen(struct clk_hw *hw) 592 { 593 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 594 595 return ((alchemy_rdsys(c->reg) >> c->shift) & 3) != 0; 596 } 597 598 static void alchemy_clk_fgv2_dis(struct clk_hw *hw) 599 { 600 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 601 unsigned long v, flags; 602 603 spin_lock_irqsave(c->reglock, flags); 604 v = alchemy_rdsys(c->reg); 605 v &= ~(3 << c->shift); /* set input mux to "disabled" state */ 606 alchemy_wrsys(v, c->reg); 607 c->isen = 0; 608 spin_unlock_irqrestore(c->reglock, flags); 609 } 610 611 static int alchemy_clk_fgv2_setp(struct clk_hw *hw, u8 index) 612 { 613 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 614 unsigned long flags; 615 616 spin_lock_irqsave(c->reglock, flags); 617 c->parent = index + 1; /* value to write to register */ 618 if (c->isen) 619 __alchemy_clk_fgv2_en(c); 620 spin_unlock_irqrestore(c->reglock, flags); 621 622 return 0; 623 } 624 625 static u8 alchemy_clk_fgv2_getp(struct clk_hw *hw) 626 { 627 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 628 unsigned long flags, v; 629 630 spin_lock_irqsave(c->reglock, flags); 631 v = c->parent - 1; 632 spin_unlock_irqrestore(c->reglock, flags); 633 return v; 634 } 635 636 /* fg0-2 and fg4-6 share a "scale"-bit. With this bit cleared, the 637 * dividers behave exactly as on previous models (dividers are multiples 638 * of 2); with the bit set, dividers are multiples of 1, halving their 639 * range, but making them also much more flexible. 640 */ 641 static int alchemy_clk_fgv2_setr(struct clk_hw *hw, unsigned long rate, 642 unsigned long parent_rate) 643 { 644 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 645 int sh = c->shift + 2; 646 unsigned long div, v, flags, ret; 647 648 if (!rate || !parent_rate || rate > parent_rate) 649 return -EINVAL; 650 651 v = alchemy_rdsys(c->reg) & (1 << 30); /* test "scale" bit */ 652 ret = alchemy_calc_div(rate, parent_rate, v ? 1 : 2, 653 v ? 256 : 512, &div); 654 655 spin_lock_irqsave(c->reglock, flags); 656 v = alchemy_rdsys(c->reg); 657 v &= ~(0xff << sh); 658 v |= (div & 0xff) << sh; 659 alchemy_wrsys(v, c->reg); 660 spin_unlock_irqrestore(c->reglock, flags); 661 662 return 0; 663 } 664 665 static unsigned long alchemy_clk_fgv2_recalc(struct clk_hw *hw, 666 unsigned long parent_rate) 667 { 668 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 669 int sh = c->shift + 2; 670 unsigned long v, t; 671 672 v = alchemy_rdsys(c->reg); 673 t = parent_rate / (((v >> sh) & 0xff) + 1); 674 if ((v & (1 << 30)) == 0) /* test scale bit */ 675 t /= 2; 676 677 return t; 678 } 679 680 static long alchemy_clk_fgv2_detr(struct clk_hw *hw, unsigned long rate, 681 unsigned long *best_parent_rate, 682 struct clk_hw **best_parent_clk) 683 { 684 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 685 int scale, maxdiv; 686 687 if (alchemy_rdsys(c->reg) & (1 << 30)) { 688 scale = 1; 689 maxdiv = 256; 690 } else { 691 scale = 2; 692 maxdiv = 512; 693 } 694 695 return alchemy_clk_fgcs_detr(hw, rate, best_parent_rate, 696 best_parent_clk, scale, maxdiv); 697 } 698 699 /* Au1300 larger input mux, no separate disable bit, flexible divider */ 700 static struct clk_ops alchemy_clkops_fgenv2 = { 701 .recalc_rate = alchemy_clk_fgv2_recalc, 702 .determine_rate = alchemy_clk_fgv2_detr, 703 .set_rate = alchemy_clk_fgv2_setr, 704 .set_parent = alchemy_clk_fgv2_setp, 705 .get_parent = alchemy_clk_fgv2_getp, 706 .enable = alchemy_clk_fgv2_en, 707 .disable = alchemy_clk_fgv2_dis, 708 .is_enabled = alchemy_clk_fgv2_isen, 709 }; 710 711 static const char * const alchemy_clk_fgv1_parents[] = { 712 ALCHEMY_CPU_CLK, ALCHEMY_AUXPLL_CLK 713 }; 714 715 static const char * const alchemy_clk_fgv2_parents[] = { 716 ALCHEMY_AUXPLL2_CLK, ALCHEMY_CPU_CLK, ALCHEMY_AUXPLL_CLK 717 }; 718 719 static const char * const alchemy_clk_fgen_names[] = { 720 ALCHEMY_FG0_CLK, ALCHEMY_FG1_CLK, ALCHEMY_FG2_CLK, 721 ALCHEMY_FG3_CLK, ALCHEMY_FG4_CLK, ALCHEMY_FG5_CLK }; 722 723 static int __init alchemy_clk_init_fgens(int ctype) 724 { 725 struct clk *c; 726 struct clk_init_data id; 727 struct alchemy_fgcs_clk *a; 728 unsigned long v; 729 int i, ret; 730 731 switch (ctype) { 732 case ALCHEMY_CPU_AU1000...ALCHEMY_CPU_AU1200: 733 id.ops = &alchemy_clkops_fgenv1; 734 id.parent_names = (const char **)alchemy_clk_fgv1_parents; 735 id.num_parents = 2; 736 break; 737 case ALCHEMY_CPU_AU1300: 738 id.ops = &alchemy_clkops_fgenv2; 739 id.parent_names = (const char **)alchemy_clk_fgv2_parents; 740 id.num_parents = 3; 741 break; 742 default: 743 return -ENODEV; 744 } 745 id.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE; 746 747 a = kzalloc((sizeof(*a)) * 6, GFP_KERNEL); 748 if (!a) 749 return -ENOMEM; 750 751 spin_lock_init(&alchemy_clk_fg0_lock); 752 spin_lock_init(&alchemy_clk_fg1_lock); 753 ret = 0; 754 for (i = 0; i < 6; i++) { 755 id.name = alchemy_clk_fgen_names[i]; 756 a->shift = 10 * (i < 3 ? i : i - 3); 757 if (i > 2) { 758 a->reg = AU1000_SYS_FREQCTRL1; 759 a->reglock = &alchemy_clk_fg1_lock; 760 } else { 761 a->reg = AU1000_SYS_FREQCTRL0; 762 a->reglock = &alchemy_clk_fg0_lock; 763 } 764 765 /* default to first parent if bootloader has set 766 * the mux to disabled state. 767 */ 768 if (ctype == ALCHEMY_CPU_AU1300) { 769 v = alchemy_rdsys(a->reg); 770 a->parent = (v >> a->shift) & 3; 771 if (!a->parent) { 772 a->parent = 1; 773 a->isen = 0; 774 } else 775 a->isen = 1; 776 } 777 778 a->hw.init = &id; 779 c = clk_register(NULL, &a->hw); 780 if (IS_ERR(c)) 781 ret++; 782 else 783 clk_register_clkdev(c, id.name, NULL); 784 a++; 785 } 786 787 return ret; 788 } 789 790 /* internal sources muxes *********************************************/ 791 792 static int alchemy_clk_csrc_isen(struct clk_hw *hw) 793 { 794 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 795 unsigned long v = alchemy_rdsys(c->reg); 796 797 return (((v >> c->shift) >> 2) & 7) != 0; 798 } 799 800 static void __alchemy_clk_csrc_en(struct alchemy_fgcs_clk *c) 801 { 802 unsigned long v = alchemy_rdsys(c->reg); 803 804 v &= ~((7 << 2) << c->shift); 805 v |= ((c->parent & 7) << 2) << c->shift; 806 alchemy_wrsys(v, c->reg); 807 c->isen = 1; 808 } 809 810 static int alchemy_clk_csrc_en(struct clk_hw *hw) 811 { 812 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 813 unsigned long flags; 814 815 /* enable by setting the previous parent clock */ 816 spin_lock_irqsave(c->reglock, flags); 817 __alchemy_clk_csrc_en(c); 818 spin_unlock_irqrestore(c->reglock, flags); 819 820 return 0; 821 } 822 823 static void alchemy_clk_csrc_dis(struct clk_hw *hw) 824 { 825 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 826 unsigned long v, flags; 827 828 spin_lock_irqsave(c->reglock, flags); 829 v = alchemy_rdsys(c->reg); 830 v &= ~((3 << 2) << c->shift); /* mux to "disabled" state */ 831 alchemy_wrsys(v, c->reg); 832 c->isen = 0; 833 spin_unlock_irqrestore(c->reglock, flags); 834 } 835 836 static int alchemy_clk_csrc_setp(struct clk_hw *hw, u8 index) 837 { 838 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 839 unsigned long flags; 840 841 spin_lock_irqsave(c->reglock, flags); 842 c->parent = index + 1; /* value to write to register */ 843 if (c->isen) 844 __alchemy_clk_csrc_en(c); 845 spin_unlock_irqrestore(c->reglock, flags); 846 847 return 0; 848 } 849 850 static u8 alchemy_clk_csrc_getp(struct clk_hw *hw) 851 { 852 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 853 854 return c->parent - 1; 855 } 856 857 static unsigned long alchemy_clk_csrc_recalc(struct clk_hw *hw, 858 unsigned long parent_rate) 859 { 860 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 861 unsigned long v = (alchemy_rdsys(c->reg) >> c->shift) & 3; 862 863 return parent_rate / c->dt[v]; 864 } 865 866 static int alchemy_clk_csrc_setr(struct clk_hw *hw, unsigned long rate, 867 unsigned long parent_rate) 868 { 869 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 870 unsigned long d, v, flags; 871 int i; 872 873 if (!rate || !parent_rate || rate > parent_rate) 874 return -EINVAL; 875 876 d = (parent_rate + (rate / 2)) / rate; 877 if (d > 4) 878 return -EINVAL; 879 if ((d == 3) && (c->dt[2] != 3)) 880 d = 4; 881 882 for (i = 0; i < 4; i++) 883 if (c->dt[i] == d) 884 break; 885 886 if (i >= 4) 887 return -EINVAL; /* oops */ 888 889 spin_lock_irqsave(c->reglock, flags); 890 v = alchemy_rdsys(c->reg); 891 v &= ~(3 << c->shift); 892 v |= (i & 3) << c->shift; 893 alchemy_wrsys(v, c->reg); 894 spin_unlock_irqrestore(c->reglock, flags); 895 896 return 0; 897 } 898 899 static long alchemy_clk_csrc_detr(struct clk_hw *hw, unsigned long rate, 900 unsigned long *best_parent_rate, 901 struct clk_hw **best_parent_clk) 902 { 903 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 904 int scale = c->dt[2] == 3 ? 1 : 2; /* au1300 check */ 905 906 return alchemy_clk_fgcs_detr(hw, rate, best_parent_rate, 907 best_parent_clk, scale, 4); 908 } 909 910 static struct clk_ops alchemy_clkops_csrc = { 911 .recalc_rate = alchemy_clk_csrc_recalc, 912 .determine_rate = alchemy_clk_csrc_detr, 913 .set_rate = alchemy_clk_csrc_setr, 914 .set_parent = alchemy_clk_csrc_setp, 915 .get_parent = alchemy_clk_csrc_getp, 916 .enable = alchemy_clk_csrc_en, 917 .disable = alchemy_clk_csrc_dis, 918 .is_enabled = alchemy_clk_csrc_isen, 919 }; 920 921 static const char * const alchemy_clk_csrc_parents[] = { 922 /* disabled at index 0 */ ALCHEMY_AUXPLL_CLK, 923 ALCHEMY_FG0_CLK, ALCHEMY_FG1_CLK, ALCHEMY_FG2_CLK, 924 ALCHEMY_FG3_CLK, ALCHEMY_FG4_CLK, ALCHEMY_FG5_CLK 925 }; 926 927 /* divider tables */ 928 static int alchemy_csrc_dt1[] = { 1, 4, 1, 2 }; /* rest */ 929 static int alchemy_csrc_dt2[] = { 1, 4, 3, 2 }; /* Au1300 */ 930 931 static int __init alchemy_clk_setup_imux(int ctype) 932 { 933 struct alchemy_fgcs_clk *a; 934 const char * const *names; 935 struct clk_init_data id; 936 unsigned long v; 937 int i, ret, *dt; 938 struct clk *c; 939 940 id.ops = &alchemy_clkops_csrc; 941 id.parent_names = (const char **)alchemy_clk_csrc_parents; 942 id.num_parents = 7; 943 id.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE; 944 945 dt = alchemy_csrc_dt1; 946 switch (ctype) { 947 case ALCHEMY_CPU_AU1000: 948 names = alchemy_au1000_intclknames; 949 break; 950 case ALCHEMY_CPU_AU1500: 951 names = alchemy_au1500_intclknames; 952 break; 953 case ALCHEMY_CPU_AU1100: 954 names = alchemy_au1100_intclknames; 955 break; 956 case ALCHEMY_CPU_AU1550: 957 names = alchemy_au1550_intclknames; 958 break; 959 case ALCHEMY_CPU_AU1200: 960 names = alchemy_au1200_intclknames; 961 break; 962 case ALCHEMY_CPU_AU1300: 963 dt = alchemy_csrc_dt2; 964 names = alchemy_au1300_intclknames; 965 break; 966 default: 967 return -ENODEV; 968 } 969 970 a = kzalloc((sizeof(*a)) * 6, GFP_KERNEL); 971 if (!a) 972 return -ENOMEM; 973 974 spin_lock_init(&alchemy_clk_csrc_lock); 975 ret = 0; 976 977 for (i = 0; i < 6; i++) { 978 id.name = names[i]; 979 if (!id.name) 980 goto next; 981 982 a->shift = i * 5; 983 a->reg = AU1000_SYS_CLKSRC; 984 a->reglock = &alchemy_clk_csrc_lock; 985 a->dt = dt; 986 987 /* default to first parent clock if mux is initially 988 * set to disabled state. 989 */ 990 v = alchemy_rdsys(a->reg); 991 a->parent = ((v >> a->shift) >> 2) & 7; 992 if (!a->parent) { 993 a->parent = 1; 994 a->isen = 0; 995 } else 996 a->isen = 1; 997 998 a->hw.init = &id; 999 c = clk_register(NULL, &a->hw); 1000 if (IS_ERR(c)) 1001 ret++; 1002 else 1003 clk_register_clkdev(c, id.name, NULL); 1004 next: 1005 a++; 1006 } 1007 1008 return ret; 1009 } 1010 1011 1012 /**********************************************************************/ 1013 1014 1015 #define ERRCK(x) \ 1016 if (IS_ERR(x)) { \ 1017 ret = PTR_ERR(x); \ 1018 goto out; \ 1019 } 1020 1021 static int __init alchemy_clk_init(void) 1022 { 1023 int ctype = alchemy_get_cputype(), ret, i; 1024 struct clk_aliastable *t = alchemy_clk_aliases; 1025 struct clk *c; 1026 1027 /* Root of the Alchemy clock tree: external 12MHz crystal osc */ 1028 c = clk_register_fixed_rate(NULL, ALCHEMY_ROOT_CLK, NULL, 1029 CLK_IS_ROOT, 1030 ALCHEMY_ROOTCLK_RATE); 1031 ERRCK(c) 1032 1033 /* CPU core clock */ 1034 c = alchemy_clk_setup_cpu(ALCHEMY_ROOT_CLK, ctype); 1035 ERRCK(c) 1036 1037 /* AUXPLLs: max 1GHz on Au1300, 748MHz on older models */ 1038 i = (ctype == ALCHEMY_CPU_AU1300) ? 84 : 63; 1039 c = alchemy_clk_setup_aux(ALCHEMY_ROOT_CLK, ALCHEMY_AUXPLL_CLK, 1040 i, AU1000_SYS_AUXPLL); 1041 ERRCK(c) 1042 1043 if (ctype == ALCHEMY_CPU_AU1300) { 1044 c = alchemy_clk_setup_aux(ALCHEMY_ROOT_CLK, 1045 ALCHEMY_AUXPLL2_CLK, i, 1046 AU1300_SYS_AUXPLL2); 1047 ERRCK(c) 1048 } 1049 1050 /* sysbus clock: cpu core clock divided by 2, 3 or 4 */ 1051 c = alchemy_clk_setup_sysbus(ALCHEMY_CPU_CLK); 1052 ERRCK(c) 1053 1054 /* peripheral clock: runs at half rate of sysbus clk */ 1055 c = alchemy_clk_setup_periph(ALCHEMY_SYSBUS_CLK); 1056 ERRCK(c) 1057 1058 /* SDR/DDR memory clock */ 1059 c = alchemy_clk_setup_mem(ALCHEMY_SYSBUS_CLK, ctype); 1060 ERRCK(c) 1061 1062 /* L/RCLK: external static bus clock for synchronous mode */ 1063 c = alchemy_clk_setup_lrclk(ALCHEMY_PERIPH_CLK); 1064 ERRCK(c) 1065 1066 /* Frequency dividers 0-5 */ 1067 ret = alchemy_clk_init_fgens(ctype); 1068 if (ret) { 1069 ret = -ENODEV; 1070 goto out; 1071 } 1072 1073 /* diving muxes for internal sources */ 1074 ret = alchemy_clk_setup_imux(ctype); 1075 if (ret) { 1076 ret = -ENODEV; 1077 goto out; 1078 } 1079 1080 /* set up aliases drivers might look for */ 1081 while (t->base) { 1082 if (t->cputype == ctype) 1083 clk_add_alias(t->alias, NULL, t->base, NULL); 1084 t++; 1085 } 1086 1087 pr_info("Alchemy clocktree installed\n"); 1088 return 0; 1089 1090 out: 1091 return ret; 1092 } 1093 postcore_initcall(alchemy_clk_init); 1094