1 /* 2 * Alchemy clocks. 3 * 4 * Exposes all configurable internal clock sources to the clk framework. 5 * 6 * We have: 7 * - Root source, usually 12MHz supplied by an external crystal 8 * - 3 PLLs which generate multiples of root rate [AUX, CPU, AUX2] 9 * 10 * Dividers: 11 * - 6 clock dividers with: 12 * * selectable source [one of the PLLs], 13 * * output divided between [2 .. 512 in steps of 2] (!Au1300) 14 * or [1 .. 256 in steps of 1] (Au1300), 15 * * can be enabled individually. 16 * 17 * - up to 6 "internal" (fixed) consumers which: 18 * * take either AUXPLL or one of the above 6 dividers as input, 19 * * divide this input by 1, 2, or 4 (and 3 on Au1300). 20 * * can be disabled separately. 21 * 22 * Misc clocks: 23 * - sysbus clock: CPU core clock (CPUPLL) divided by 2, 3 or 4. 24 * depends on board design and should be set by bootloader, read-only. 25 * - peripheral clock: half the rate of sysbus clock, source for a lot 26 * of peripheral blocks, read-only. 27 * - memory clock: clk rate to main memory chips, depends on board 28 * design and is read-only, 29 * - lrclk: the static bus clock signal for synchronous operation. 30 * depends on board design, must be set by bootloader, 31 * but may be required to correctly configure devices attached to 32 * the static bus. The Au1000/1500/1100 manuals call it LCLK, on 33 * later models it's called RCLK. 34 */ 35 36 #include <linux/init.h> 37 #include <linux/io.h> 38 #include <linux/clk.h> 39 #include <linux/clk-provider.h> 40 #include <linux/clkdev.h> 41 #include <linux/slab.h> 42 #include <linux/spinlock.h> 43 #include <linux/types.h> 44 #include <asm/mach-au1x00/au1000.h> 45 46 /* Base clock: 12MHz is the default in all databooks, and I haven't 47 * found any board yet which uses a different rate. 48 */ 49 #define ALCHEMY_ROOTCLK_RATE 12000000 50 51 /* 52 * the internal sources which can be driven by the PLLs and dividers. 53 * Names taken from the databooks, refer to them for more information, 54 * especially which ones are share a clock line. 55 */ 56 static const char * const alchemy_au1300_intclknames[] = { 57 "lcd_intclk", "gpemgp_clk", "maempe_clk", "maebsa_clk", 58 "EXTCLK0", "EXTCLK1" 59 }; 60 61 static const char * const alchemy_au1200_intclknames[] = { 62 "lcd_intclk", NULL, NULL, NULL, "EXTCLK0", "EXTCLK1" 63 }; 64 65 static const char * const alchemy_au1550_intclknames[] = { 66 "usb_clk", "psc0_intclk", "psc1_intclk", "pci_clko", 67 "EXTCLK0", "EXTCLK1" 68 }; 69 70 static const char * const alchemy_au1100_intclknames[] = { 71 "usb_clk", "lcd_intclk", NULL, "i2s_clk", "EXTCLK0", "EXTCLK1" 72 }; 73 74 static const char * const alchemy_au1500_intclknames[] = { 75 NULL, "usbd_clk", "usbh_clk", "pci_clko", "EXTCLK0", "EXTCLK1" 76 }; 77 78 static const char * const alchemy_au1000_intclknames[] = { 79 "irda_clk", "usbd_clk", "usbh_clk", "i2s_clk", "EXTCLK0", 80 "EXTCLK1" 81 }; 82 83 /* aliases for a few on-chip sources which are either shared 84 * or have gone through name changes. 85 */ 86 static struct clk_aliastable { 87 char *alias; 88 char *base; 89 int cputype; 90 } alchemy_clk_aliases[] __initdata = { 91 { "usbh_clk", "usb_clk", ALCHEMY_CPU_AU1100 }, 92 { "usbd_clk", "usb_clk", ALCHEMY_CPU_AU1100 }, 93 { "irda_clk", "usb_clk", ALCHEMY_CPU_AU1100 }, 94 { "usbh_clk", "usb_clk", ALCHEMY_CPU_AU1550 }, 95 { "usbd_clk", "usb_clk", ALCHEMY_CPU_AU1550 }, 96 { "psc2_intclk", "usb_clk", ALCHEMY_CPU_AU1550 }, 97 { "psc3_intclk", "EXTCLK0", ALCHEMY_CPU_AU1550 }, 98 { "psc0_intclk", "EXTCLK0", ALCHEMY_CPU_AU1200 }, 99 { "psc1_intclk", "EXTCLK1", ALCHEMY_CPU_AU1200 }, 100 { "psc0_intclk", "EXTCLK0", ALCHEMY_CPU_AU1300 }, 101 { "psc2_intclk", "EXTCLK0", ALCHEMY_CPU_AU1300 }, 102 { "psc1_intclk", "EXTCLK1", ALCHEMY_CPU_AU1300 }, 103 { "psc3_intclk", "EXTCLK1", ALCHEMY_CPU_AU1300 }, 104 105 { NULL, NULL, 0 }, 106 }; 107 108 #define IOMEM(x) ((void __iomem *)(KSEG1ADDR(CPHYSADDR(x)))) 109 110 /* access locks to SYS_FREQCTRL0/1 and SYS_CLKSRC registers */ 111 static spinlock_t alchemy_clk_fg0_lock; 112 static spinlock_t alchemy_clk_fg1_lock; 113 static spinlock_t alchemy_clk_csrc_lock; 114 115 /* CPU Core clock *****************************************************/ 116 117 static unsigned long alchemy_clk_cpu_recalc(struct clk_hw *hw, 118 unsigned long parent_rate) 119 { 120 unsigned long t; 121 122 /* 123 * On early Au1000, sys_cpupll was write-only. Since these 124 * silicon versions of Au1000 are not sold, we don't bend 125 * over backwards trying to determine the frequency. 126 */ 127 if (unlikely(au1xxx_cpu_has_pll_wo())) 128 t = 396000000; 129 else { 130 t = alchemy_rdsys(AU1000_SYS_CPUPLL) & 0x7f; 131 if (alchemy_get_cputype() < ALCHEMY_CPU_AU1300) 132 t &= 0x3f; 133 t *= parent_rate; 134 } 135 136 return t; 137 } 138 139 void __init alchemy_set_lpj(void) 140 { 141 preset_lpj = alchemy_clk_cpu_recalc(NULL, ALCHEMY_ROOTCLK_RATE); 142 preset_lpj /= 2 * HZ; 143 } 144 145 static struct clk_ops alchemy_clkops_cpu = { 146 .recalc_rate = alchemy_clk_cpu_recalc, 147 }; 148 149 static struct clk __init *alchemy_clk_setup_cpu(const char *parent_name, 150 int ctype) 151 { 152 struct clk_init_data id; 153 struct clk_hw *h; 154 155 h = kzalloc(sizeof(*h), GFP_KERNEL); 156 if (!h) 157 return ERR_PTR(-ENOMEM); 158 159 id.name = ALCHEMY_CPU_CLK; 160 id.parent_names = &parent_name; 161 id.num_parents = 1; 162 id.flags = CLK_IS_BASIC; 163 id.ops = &alchemy_clkops_cpu; 164 h->init = &id; 165 166 return clk_register(NULL, h); 167 } 168 169 /* AUXPLLs ************************************************************/ 170 171 struct alchemy_auxpll_clk { 172 struct clk_hw hw; 173 unsigned long reg; /* au1300 has also AUXPLL2 */ 174 int maxmult; /* max multiplier */ 175 }; 176 #define to_auxpll_clk(x) container_of(x, struct alchemy_auxpll_clk, hw) 177 178 static unsigned long alchemy_clk_aux_recalc(struct clk_hw *hw, 179 unsigned long parent_rate) 180 { 181 struct alchemy_auxpll_clk *a = to_auxpll_clk(hw); 182 183 return (alchemy_rdsys(a->reg) & 0xff) * parent_rate; 184 } 185 186 static int alchemy_clk_aux_setr(struct clk_hw *hw, 187 unsigned long rate, 188 unsigned long parent_rate) 189 { 190 struct alchemy_auxpll_clk *a = to_auxpll_clk(hw); 191 unsigned long d = rate; 192 193 if (rate) 194 d /= parent_rate; 195 else 196 d = 0; 197 198 /* minimum is 84MHz, max is 756-1032 depending on variant */ 199 if (((d < 7) && (d != 0)) || (d > a->maxmult)) 200 return -EINVAL; 201 202 alchemy_wrsys(d, a->reg); 203 return 0; 204 } 205 206 static long alchemy_clk_aux_roundr(struct clk_hw *hw, 207 unsigned long rate, 208 unsigned long *parent_rate) 209 { 210 struct alchemy_auxpll_clk *a = to_auxpll_clk(hw); 211 unsigned long mult; 212 213 if (!rate || !*parent_rate) 214 return 0; 215 216 mult = rate / (*parent_rate); 217 218 if (mult && (mult < 7)) 219 mult = 7; 220 if (mult > a->maxmult) 221 mult = a->maxmult; 222 223 return (*parent_rate) * mult; 224 } 225 226 static struct clk_ops alchemy_clkops_aux = { 227 .recalc_rate = alchemy_clk_aux_recalc, 228 .set_rate = alchemy_clk_aux_setr, 229 .round_rate = alchemy_clk_aux_roundr, 230 }; 231 232 static struct clk __init *alchemy_clk_setup_aux(const char *parent_name, 233 char *name, int maxmult, 234 unsigned long reg) 235 { 236 struct clk_init_data id; 237 struct clk *c; 238 struct alchemy_auxpll_clk *a; 239 240 a = kzalloc(sizeof(*a), GFP_KERNEL); 241 if (!a) 242 return ERR_PTR(-ENOMEM); 243 244 id.name = name; 245 id.parent_names = &parent_name; 246 id.num_parents = 1; 247 id.flags = CLK_GET_RATE_NOCACHE; 248 id.ops = &alchemy_clkops_aux; 249 250 a->reg = reg; 251 a->maxmult = maxmult; 252 a->hw.init = &id; 253 254 c = clk_register(NULL, &a->hw); 255 if (!IS_ERR(c)) 256 clk_register_clkdev(c, name, NULL); 257 else 258 kfree(a); 259 260 return c; 261 } 262 263 /* sysbus_clk *********************************************************/ 264 265 static struct clk __init *alchemy_clk_setup_sysbus(const char *pn) 266 { 267 unsigned long v = (alchemy_rdsys(AU1000_SYS_POWERCTRL) & 3) + 2; 268 struct clk *c; 269 270 c = clk_register_fixed_factor(NULL, ALCHEMY_SYSBUS_CLK, 271 pn, 0, 1, v); 272 if (!IS_ERR(c)) 273 clk_register_clkdev(c, ALCHEMY_SYSBUS_CLK, NULL); 274 return c; 275 } 276 277 /* Peripheral Clock ***************************************************/ 278 279 static struct clk __init *alchemy_clk_setup_periph(const char *pn) 280 { 281 /* Peripheral clock runs at half the rate of sysbus clk */ 282 struct clk *c; 283 284 c = clk_register_fixed_factor(NULL, ALCHEMY_PERIPH_CLK, 285 pn, 0, 1, 2); 286 if (!IS_ERR(c)) 287 clk_register_clkdev(c, ALCHEMY_PERIPH_CLK, NULL); 288 return c; 289 } 290 291 /* mem clock **********************************************************/ 292 293 static struct clk __init *alchemy_clk_setup_mem(const char *pn, int ct) 294 { 295 void __iomem *addr = IOMEM(AU1000_MEM_PHYS_ADDR); 296 unsigned long v; 297 struct clk *c; 298 int div; 299 300 switch (ct) { 301 case ALCHEMY_CPU_AU1550: 302 case ALCHEMY_CPU_AU1200: 303 v = __raw_readl(addr + AU1550_MEM_SDCONFIGB); 304 div = (v & (1 << 15)) ? 1 : 2; 305 break; 306 case ALCHEMY_CPU_AU1300: 307 v = __raw_readl(addr + AU1550_MEM_SDCONFIGB); 308 div = (v & (1 << 31)) ? 1 : 2; 309 break; 310 case ALCHEMY_CPU_AU1000: 311 case ALCHEMY_CPU_AU1500: 312 case ALCHEMY_CPU_AU1100: 313 default: 314 div = 2; 315 break; 316 } 317 318 c = clk_register_fixed_factor(NULL, ALCHEMY_MEM_CLK, pn, 319 0, 1, div); 320 if (!IS_ERR(c)) 321 clk_register_clkdev(c, ALCHEMY_MEM_CLK, NULL); 322 return c; 323 } 324 325 /* lrclk: external synchronous static bus clock ***********************/ 326 327 static struct clk __init *alchemy_clk_setup_lrclk(const char *pn, int t) 328 { 329 /* Au1000, Au1500: MEM_STCFG0[11]: If bit is set, lrclk=pclk/5, 330 * otherwise lrclk=pclk/4. 331 * All other variants: MEM_STCFG0[15:13] = divisor. 332 * L/RCLK = periph_clk / (divisor + 1) 333 * On Au1000, Au1500, Au1100 it's called LCLK, 334 * on later models it's called RCLK, but it's the same thing. 335 */ 336 struct clk *c; 337 unsigned long v = alchemy_rdsmem(AU1000_MEM_STCFG0); 338 339 switch (t) { 340 case ALCHEMY_CPU_AU1000: 341 case ALCHEMY_CPU_AU1500: 342 v = 4 + ((v >> 11) & 1); 343 break; 344 default: /* all other models */ 345 v = ((v >> 13) & 7) + 1; 346 } 347 c = clk_register_fixed_factor(NULL, ALCHEMY_LR_CLK, 348 pn, 0, 1, v); 349 if (!IS_ERR(c)) 350 clk_register_clkdev(c, ALCHEMY_LR_CLK, NULL); 351 return c; 352 } 353 354 /* Clock dividers and muxes *******************************************/ 355 356 /* data for fgen and csrc mux-dividers */ 357 struct alchemy_fgcs_clk { 358 struct clk_hw hw; 359 spinlock_t *reglock; /* register lock */ 360 unsigned long reg; /* SYS_FREQCTRL0/1 */ 361 int shift; /* offset in register */ 362 int parent; /* parent before disable [Au1300] */ 363 int isen; /* is it enabled? */ 364 int *dt; /* dividertable for csrc */ 365 }; 366 #define to_fgcs_clk(x) container_of(x, struct alchemy_fgcs_clk, hw) 367 368 static long alchemy_calc_div(unsigned long rate, unsigned long prate, 369 int scale, int maxdiv, unsigned long *rv) 370 { 371 long div1, div2; 372 373 div1 = prate / rate; 374 if ((prate / div1) > rate) 375 div1++; 376 377 if (scale == 2) { /* only div-by-multiple-of-2 possible */ 378 if (div1 & 1) 379 div1++; /* stay <=prate */ 380 } 381 382 div2 = (div1 / scale) - 1; /* value to write to register */ 383 384 if (div2 > maxdiv) 385 div2 = maxdiv; 386 if (rv) 387 *rv = div2; 388 389 div1 = ((div2 + 1) * scale); 390 return div1; 391 } 392 393 static int alchemy_clk_fgcs_detr(struct clk_hw *hw, 394 struct clk_rate_request *req, 395 int scale, int maxdiv) 396 { 397 struct clk_hw *pc, *bpc, *free; 398 long tdv, tpr, pr, nr, br, bpr, diff, lastdiff; 399 int j; 400 401 lastdiff = INT_MAX; 402 bpr = 0; 403 bpc = NULL; 404 br = -EINVAL; 405 free = NULL; 406 407 /* look at the rates each enabled parent supplies and select 408 * the one that gets closest to but not over the requested rate. 409 */ 410 for (j = 0; j < 7; j++) { 411 pc = clk_hw_get_parent_by_index(hw, j); 412 if (!pc) 413 break; 414 415 /* if this parent is currently unused, remember it. 416 * XXX: we would actually want clk_has_active_children() 417 * but this is a good-enough approximation for now. 418 */ 419 if (!clk_hw_is_prepared(pc)) { 420 if (!free) 421 free = pc; 422 } 423 424 pr = clk_hw_get_rate(pc); 425 if (pr < req->rate) 426 continue; 427 428 /* what can hardware actually provide */ 429 tdv = alchemy_calc_div(req->rate, pr, scale, maxdiv, NULL); 430 nr = pr / tdv; 431 diff = req->rate - nr; 432 if (nr > req->rate) 433 continue; 434 435 if (diff < lastdiff) { 436 lastdiff = diff; 437 bpr = pr; 438 bpc = pc; 439 br = nr; 440 } 441 if (diff == 0) 442 break; 443 } 444 445 /* if we couldn't get the exact rate we wanted from the enabled 446 * parents, maybe we can tell an available disabled/inactive one 447 * to give us a rate we can divide down to the requested rate. 448 */ 449 if (lastdiff && free) { 450 for (j = (maxdiv == 4) ? 1 : scale; j <= maxdiv; j += scale) { 451 tpr = req->rate * j; 452 if (tpr < 0) 453 break; 454 pr = clk_hw_round_rate(free, tpr); 455 456 tdv = alchemy_calc_div(req->rate, pr, scale, maxdiv, 457 NULL); 458 nr = pr / tdv; 459 diff = req->rate - nr; 460 if (nr > req->rate) 461 continue; 462 if (diff < lastdiff) { 463 lastdiff = diff; 464 bpr = pr; 465 bpc = free; 466 br = nr; 467 } 468 if (diff == 0) 469 break; 470 } 471 } 472 473 if (br < 0) 474 return br; 475 476 req->best_parent_rate = bpr; 477 req->best_parent_hw = bpc; 478 req->rate = br; 479 480 return 0; 481 } 482 483 static int alchemy_clk_fgv1_en(struct clk_hw *hw) 484 { 485 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 486 unsigned long v, flags; 487 488 spin_lock_irqsave(c->reglock, flags); 489 v = alchemy_rdsys(c->reg); 490 v |= (1 << 1) << c->shift; 491 alchemy_wrsys(v, c->reg); 492 spin_unlock_irqrestore(c->reglock, flags); 493 494 return 0; 495 } 496 497 static int alchemy_clk_fgv1_isen(struct clk_hw *hw) 498 { 499 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 500 unsigned long v = alchemy_rdsys(c->reg) >> (c->shift + 1); 501 502 return v & 1; 503 } 504 505 static void alchemy_clk_fgv1_dis(struct clk_hw *hw) 506 { 507 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 508 unsigned long v, flags; 509 510 spin_lock_irqsave(c->reglock, flags); 511 v = alchemy_rdsys(c->reg); 512 v &= ~((1 << 1) << c->shift); 513 alchemy_wrsys(v, c->reg); 514 spin_unlock_irqrestore(c->reglock, flags); 515 } 516 517 static int alchemy_clk_fgv1_setp(struct clk_hw *hw, u8 index) 518 { 519 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 520 unsigned long v, flags; 521 522 spin_lock_irqsave(c->reglock, flags); 523 v = alchemy_rdsys(c->reg); 524 if (index) 525 v |= (1 << c->shift); 526 else 527 v &= ~(1 << c->shift); 528 alchemy_wrsys(v, c->reg); 529 spin_unlock_irqrestore(c->reglock, flags); 530 531 return 0; 532 } 533 534 static u8 alchemy_clk_fgv1_getp(struct clk_hw *hw) 535 { 536 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 537 538 return (alchemy_rdsys(c->reg) >> c->shift) & 1; 539 } 540 541 static int alchemy_clk_fgv1_setr(struct clk_hw *hw, unsigned long rate, 542 unsigned long parent_rate) 543 { 544 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 545 unsigned long div, v, flags, ret; 546 int sh = c->shift + 2; 547 548 if (!rate || !parent_rate || rate > (parent_rate / 2)) 549 return -EINVAL; 550 ret = alchemy_calc_div(rate, parent_rate, 2, 512, &div); 551 spin_lock_irqsave(c->reglock, flags); 552 v = alchemy_rdsys(c->reg); 553 v &= ~(0xff << sh); 554 v |= div << sh; 555 alchemy_wrsys(v, c->reg); 556 spin_unlock_irqrestore(c->reglock, flags); 557 558 return 0; 559 } 560 561 static unsigned long alchemy_clk_fgv1_recalc(struct clk_hw *hw, 562 unsigned long parent_rate) 563 { 564 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 565 unsigned long v = alchemy_rdsys(c->reg) >> (c->shift + 2); 566 567 v = ((v & 0xff) + 1) * 2; 568 return parent_rate / v; 569 } 570 571 static int alchemy_clk_fgv1_detr(struct clk_hw *hw, 572 struct clk_rate_request *req) 573 { 574 return alchemy_clk_fgcs_detr(hw, req, 2, 512); 575 } 576 577 /* Au1000, Au1100, Au15x0, Au12x0 */ 578 static struct clk_ops alchemy_clkops_fgenv1 = { 579 .recalc_rate = alchemy_clk_fgv1_recalc, 580 .determine_rate = alchemy_clk_fgv1_detr, 581 .set_rate = alchemy_clk_fgv1_setr, 582 .set_parent = alchemy_clk_fgv1_setp, 583 .get_parent = alchemy_clk_fgv1_getp, 584 .enable = alchemy_clk_fgv1_en, 585 .disable = alchemy_clk_fgv1_dis, 586 .is_enabled = alchemy_clk_fgv1_isen, 587 }; 588 589 static void __alchemy_clk_fgv2_en(struct alchemy_fgcs_clk *c) 590 { 591 unsigned long v = alchemy_rdsys(c->reg); 592 593 v &= ~(3 << c->shift); 594 v |= (c->parent & 3) << c->shift; 595 alchemy_wrsys(v, c->reg); 596 c->isen = 1; 597 } 598 599 static int alchemy_clk_fgv2_en(struct clk_hw *hw) 600 { 601 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 602 unsigned long flags; 603 604 /* enable by setting the previous parent clock */ 605 spin_lock_irqsave(c->reglock, flags); 606 __alchemy_clk_fgv2_en(c); 607 spin_unlock_irqrestore(c->reglock, flags); 608 609 return 0; 610 } 611 612 static int alchemy_clk_fgv2_isen(struct clk_hw *hw) 613 { 614 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 615 616 return ((alchemy_rdsys(c->reg) >> c->shift) & 3) != 0; 617 } 618 619 static void alchemy_clk_fgv2_dis(struct clk_hw *hw) 620 { 621 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 622 unsigned long v, flags; 623 624 spin_lock_irqsave(c->reglock, flags); 625 v = alchemy_rdsys(c->reg); 626 v &= ~(3 << c->shift); /* set input mux to "disabled" state */ 627 alchemy_wrsys(v, c->reg); 628 c->isen = 0; 629 spin_unlock_irqrestore(c->reglock, flags); 630 } 631 632 static int alchemy_clk_fgv2_setp(struct clk_hw *hw, u8 index) 633 { 634 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 635 unsigned long flags; 636 637 spin_lock_irqsave(c->reglock, flags); 638 c->parent = index + 1; /* value to write to register */ 639 if (c->isen) 640 __alchemy_clk_fgv2_en(c); 641 spin_unlock_irqrestore(c->reglock, flags); 642 643 return 0; 644 } 645 646 static u8 alchemy_clk_fgv2_getp(struct clk_hw *hw) 647 { 648 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 649 unsigned long flags, v; 650 651 spin_lock_irqsave(c->reglock, flags); 652 v = c->parent - 1; 653 spin_unlock_irqrestore(c->reglock, flags); 654 return v; 655 } 656 657 /* fg0-2 and fg4-6 share a "scale"-bit. With this bit cleared, the 658 * dividers behave exactly as on previous models (dividers are multiples 659 * of 2); with the bit set, dividers are multiples of 1, halving their 660 * range, but making them also much more flexible. 661 */ 662 static int alchemy_clk_fgv2_setr(struct clk_hw *hw, unsigned long rate, 663 unsigned long parent_rate) 664 { 665 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 666 int sh = c->shift + 2; 667 unsigned long div, v, flags, ret; 668 669 if (!rate || !parent_rate || rate > parent_rate) 670 return -EINVAL; 671 672 v = alchemy_rdsys(c->reg) & (1 << 30); /* test "scale" bit */ 673 ret = alchemy_calc_div(rate, parent_rate, v ? 1 : 2, 674 v ? 256 : 512, &div); 675 676 spin_lock_irqsave(c->reglock, flags); 677 v = alchemy_rdsys(c->reg); 678 v &= ~(0xff << sh); 679 v |= (div & 0xff) << sh; 680 alchemy_wrsys(v, c->reg); 681 spin_unlock_irqrestore(c->reglock, flags); 682 683 return 0; 684 } 685 686 static unsigned long alchemy_clk_fgv2_recalc(struct clk_hw *hw, 687 unsigned long parent_rate) 688 { 689 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 690 int sh = c->shift + 2; 691 unsigned long v, t; 692 693 v = alchemy_rdsys(c->reg); 694 t = parent_rate / (((v >> sh) & 0xff) + 1); 695 if ((v & (1 << 30)) == 0) /* test scale bit */ 696 t /= 2; 697 698 return t; 699 } 700 701 static int alchemy_clk_fgv2_detr(struct clk_hw *hw, 702 struct clk_rate_request *req) 703 { 704 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 705 int scale, maxdiv; 706 707 if (alchemy_rdsys(c->reg) & (1 << 30)) { 708 scale = 1; 709 maxdiv = 256; 710 } else { 711 scale = 2; 712 maxdiv = 512; 713 } 714 715 return alchemy_clk_fgcs_detr(hw, req, scale, maxdiv); 716 } 717 718 /* Au1300 larger input mux, no separate disable bit, flexible divider */ 719 static struct clk_ops alchemy_clkops_fgenv2 = { 720 .recalc_rate = alchemy_clk_fgv2_recalc, 721 .determine_rate = alchemy_clk_fgv2_detr, 722 .set_rate = alchemy_clk_fgv2_setr, 723 .set_parent = alchemy_clk_fgv2_setp, 724 .get_parent = alchemy_clk_fgv2_getp, 725 .enable = alchemy_clk_fgv2_en, 726 .disable = alchemy_clk_fgv2_dis, 727 .is_enabled = alchemy_clk_fgv2_isen, 728 }; 729 730 static const char * const alchemy_clk_fgv1_parents[] = { 731 ALCHEMY_CPU_CLK, ALCHEMY_AUXPLL_CLK 732 }; 733 734 static const char * const alchemy_clk_fgv2_parents[] = { 735 ALCHEMY_AUXPLL2_CLK, ALCHEMY_CPU_CLK, ALCHEMY_AUXPLL_CLK 736 }; 737 738 static const char * const alchemy_clk_fgen_names[] = { 739 ALCHEMY_FG0_CLK, ALCHEMY_FG1_CLK, ALCHEMY_FG2_CLK, 740 ALCHEMY_FG3_CLK, ALCHEMY_FG4_CLK, ALCHEMY_FG5_CLK }; 741 742 static int __init alchemy_clk_init_fgens(int ctype) 743 { 744 struct clk *c; 745 struct clk_init_data id; 746 struct alchemy_fgcs_clk *a; 747 unsigned long v; 748 int i, ret; 749 750 switch (ctype) { 751 case ALCHEMY_CPU_AU1000...ALCHEMY_CPU_AU1200: 752 id.ops = &alchemy_clkops_fgenv1; 753 id.parent_names = alchemy_clk_fgv1_parents; 754 id.num_parents = 2; 755 break; 756 case ALCHEMY_CPU_AU1300: 757 id.ops = &alchemy_clkops_fgenv2; 758 id.parent_names = alchemy_clk_fgv2_parents; 759 id.num_parents = 3; 760 break; 761 default: 762 return -ENODEV; 763 } 764 id.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE; 765 766 a = kzalloc((sizeof(*a)) * 6, GFP_KERNEL); 767 if (!a) 768 return -ENOMEM; 769 770 spin_lock_init(&alchemy_clk_fg0_lock); 771 spin_lock_init(&alchemy_clk_fg1_lock); 772 ret = 0; 773 for (i = 0; i < 6; i++) { 774 id.name = alchemy_clk_fgen_names[i]; 775 a->shift = 10 * (i < 3 ? i : i - 3); 776 if (i > 2) { 777 a->reg = AU1000_SYS_FREQCTRL1; 778 a->reglock = &alchemy_clk_fg1_lock; 779 } else { 780 a->reg = AU1000_SYS_FREQCTRL0; 781 a->reglock = &alchemy_clk_fg0_lock; 782 } 783 784 /* default to first parent if bootloader has set 785 * the mux to disabled state. 786 */ 787 if (ctype == ALCHEMY_CPU_AU1300) { 788 v = alchemy_rdsys(a->reg); 789 a->parent = (v >> a->shift) & 3; 790 if (!a->parent) { 791 a->parent = 1; 792 a->isen = 0; 793 } else 794 a->isen = 1; 795 } 796 797 a->hw.init = &id; 798 c = clk_register(NULL, &a->hw); 799 if (IS_ERR(c)) 800 ret++; 801 else 802 clk_register_clkdev(c, id.name, NULL); 803 a++; 804 } 805 806 return ret; 807 } 808 809 /* internal sources muxes *********************************************/ 810 811 static int alchemy_clk_csrc_isen(struct clk_hw *hw) 812 { 813 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 814 unsigned long v = alchemy_rdsys(c->reg); 815 816 return (((v >> c->shift) >> 2) & 7) != 0; 817 } 818 819 static void __alchemy_clk_csrc_en(struct alchemy_fgcs_clk *c) 820 { 821 unsigned long v = alchemy_rdsys(c->reg); 822 823 v &= ~((7 << 2) << c->shift); 824 v |= ((c->parent & 7) << 2) << c->shift; 825 alchemy_wrsys(v, c->reg); 826 c->isen = 1; 827 } 828 829 static int alchemy_clk_csrc_en(struct clk_hw *hw) 830 { 831 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 832 unsigned long flags; 833 834 /* enable by setting the previous parent clock */ 835 spin_lock_irqsave(c->reglock, flags); 836 __alchemy_clk_csrc_en(c); 837 spin_unlock_irqrestore(c->reglock, flags); 838 839 return 0; 840 } 841 842 static void alchemy_clk_csrc_dis(struct clk_hw *hw) 843 { 844 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 845 unsigned long v, flags; 846 847 spin_lock_irqsave(c->reglock, flags); 848 v = alchemy_rdsys(c->reg); 849 v &= ~((3 << 2) << c->shift); /* mux to "disabled" state */ 850 alchemy_wrsys(v, c->reg); 851 c->isen = 0; 852 spin_unlock_irqrestore(c->reglock, flags); 853 } 854 855 static int alchemy_clk_csrc_setp(struct clk_hw *hw, u8 index) 856 { 857 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 858 unsigned long flags; 859 860 spin_lock_irqsave(c->reglock, flags); 861 c->parent = index + 1; /* value to write to register */ 862 if (c->isen) 863 __alchemy_clk_csrc_en(c); 864 spin_unlock_irqrestore(c->reglock, flags); 865 866 return 0; 867 } 868 869 static u8 alchemy_clk_csrc_getp(struct clk_hw *hw) 870 { 871 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 872 873 return c->parent - 1; 874 } 875 876 static unsigned long alchemy_clk_csrc_recalc(struct clk_hw *hw, 877 unsigned long parent_rate) 878 { 879 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 880 unsigned long v = (alchemy_rdsys(c->reg) >> c->shift) & 3; 881 882 return parent_rate / c->dt[v]; 883 } 884 885 static int alchemy_clk_csrc_setr(struct clk_hw *hw, unsigned long rate, 886 unsigned long parent_rate) 887 { 888 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 889 unsigned long d, v, flags; 890 int i; 891 892 if (!rate || !parent_rate || rate > parent_rate) 893 return -EINVAL; 894 895 d = (parent_rate + (rate / 2)) / rate; 896 if (d > 4) 897 return -EINVAL; 898 if ((d == 3) && (c->dt[2] != 3)) 899 d = 4; 900 901 for (i = 0; i < 4; i++) 902 if (c->dt[i] == d) 903 break; 904 905 if (i >= 4) 906 return -EINVAL; /* oops */ 907 908 spin_lock_irqsave(c->reglock, flags); 909 v = alchemy_rdsys(c->reg); 910 v &= ~(3 << c->shift); 911 v |= (i & 3) << c->shift; 912 alchemy_wrsys(v, c->reg); 913 spin_unlock_irqrestore(c->reglock, flags); 914 915 return 0; 916 } 917 918 static int alchemy_clk_csrc_detr(struct clk_hw *hw, 919 struct clk_rate_request *req) 920 { 921 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 922 int scale = c->dt[2] == 3 ? 1 : 2; /* au1300 check */ 923 924 return alchemy_clk_fgcs_detr(hw, req, scale, 4); 925 } 926 927 static struct clk_ops alchemy_clkops_csrc = { 928 .recalc_rate = alchemy_clk_csrc_recalc, 929 .determine_rate = alchemy_clk_csrc_detr, 930 .set_rate = alchemy_clk_csrc_setr, 931 .set_parent = alchemy_clk_csrc_setp, 932 .get_parent = alchemy_clk_csrc_getp, 933 .enable = alchemy_clk_csrc_en, 934 .disable = alchemy_clk_csrc_dis, 935 .is_enabled = alchemy_clk_csrc_isen, 936 }; 937 938 static const char * const alchemy_clk_csrc_parents[] = { 939 /* disabled at index 0 */ ALCHEMY_AUXPLL_CLK, 940 ALCHEMY_FG0_CLK, ALCHEMY_FG1_CLK, ALCHEMY_FG2_CLK, 941 ALCHEMY_FG3_CLK, ALCHEMY_FG4_CLK, ALCHEMY_FG5_CLK 942 }; 943 944 /* divider tables */ 945 static int alchemy_csrc_dt1[] = { 1, 4, 1, 2 }; /* rest */ 946 static int alchemy_csrc_dt2[] = { 1, 4, 3, 2 }; /* Au1300 */ 947 948 static int __init alchemy_clk_setup_imux(int ctype) 949 { 950 struct alchemy_fgcs_clk *a; 951 const char * const *names; 952 struct clk_init_data id; 953 unsigned long v; 954 int i, ret, *dt; 955 struct clk *c; 956 957 id.ops = &alchemy_clkops_csrc; 958 id.parent_names = alchemy_clk_csrc_parents; 959 id.num_parents = 7; 960 id.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE; 961 962 dt = alchemy_csrc_dt1; 963 switch (ctype) { 964 case ALCHEMY_CPU_AU1000: 965 names = alchemy_au1000_intclknames; 966 break; 967 case ALCHEMY_CPU_AU1500: 968 names = alchemy_au1500_intclknames; 969 break; 970 case ALCHEMY_CPU_AU1100: 971 names = alchemy_au1100_intclknames; 972 break; 973 case ALCHEMY_CPU_AU1550: 974 names = alchemy_au1550_intclknames; 975 break; 976 case ALCHEMY_CPU_AU1200: 977 names = alchemy_au1200_intclknames; 978 break; 979 case ALCHEMY_CPU_AU1300: 980 dt = alchemy_csrc_dt2; 981 names = alchemy_au1300_intclknames; 982 break; 983 default: 984 return -ENODEV; 985 } 986 987 a = kzalloc((sizeof(*a)) * 6, GFP_KERNEL); 988 if (!a) 989 return -ENOMEM; 990 991 spin_lock_init(&alchemy_clk_csrc_lock); 992 ret = 0; 993 994 for (i = 0; i < 6; i++) { 995 id.name = names[i]; 996 if (!id.name) 997 goto next; 998 999 a->shift = i * 5; 1000 a->reg = AU1000_SYS_CLKSRC; 1001 a->reglock = &alchemy_clk_csrc_lock; 1002 a->dt = dt; 1003 1004 /* default to first parent clock if mux is initially 1005 * set to disabled state. 1006 */ 1007 v = alchemy_rdsys(a->reg); 1008 a->parent = ((v >> a->shift) >> 2) & 7; 1009 if (!a->parent) { 1010 a->parent = 1; 1011 a->isen = 0; 1012 } else 1013 a->isen = 1; 1014 1015 a->hw.init = &id; 1016 c = clk_register(NULL, &a->hw); 1017 if (IS_ERR(c)) 1018 ret++; 1019 else 1020 clk_register_clkdev(c, id.name, NULL); 1021 next: 1022 a++; 1023 } 1024 1025 return ret; 1026 } 1027 1028 1029 /**********************************************************************/ 1030 1031 1032 #define ERRCK(x) \ 1033 if (IS_ERR(x)) { \ 1034 ret = PTR_ERR(x); \ 1035 goto out; \ 1036 } 1037 1038 static int __init alchemy_clk_init(void) 1039 { 1040 int ctype = alchemy_get_cputype(), ret, i; 1041 struct clk_aliastable *t = alchemy_clk_aliases; 1042 struct clk *c; 1043 1044 /* Root of the Alchemy clock tree: external 12MHz crystal osc */ 1045 c = clk_register_fixed_rate(NULL, ALCHEMY_ROOT_CLK, NULL, 1046 0, ALCHEMY_ROOTCLK_RATE); 1047 ERRCK(c) 1048 1049 /* CPU core clock */ 1050 c = alchemy_clk_setup_cpu(ALCHEMY_ROOT_CLK, ctype); 1051 ERRCK(c) 1052 1053 /* AUXPLLs: max 1GHz on Au1300, 748MHz on older models */ 1054 i = (ctype == ALCHEMY_CPU_AU1300) ? 84 : 63; 1055 c = alchemy_clk_setup_aux(ALCHEMY_ROOT_CLK, ALCHEMY_AUXPLL_CLK, 1056 i, AU1000_SYS_AUXPLL); 1057 ERRCK(c) 1058 1059 if (ctype == ALCHEMY_CPU_AU1300) { 1060 c = alchemy_clk_setup_aux(ALCHEMY_ROOT_CLK, 1061 ALCHEMY_AUXPLL2_CLK, i, 1062 AU1300_SYS_AUXPLL2); 1063 ERRCK(c) 1064 } 1065 1066 /* sysbus clock: cpu core clock divided by 2, 3 or 4 */ 1067 c = alchemy_clk_setup_sysbus(ALCHEMY_CPU_CLK); 1068 ERRCK(c) 1069 1070 /* peripheral clock: runs at half rate of sysbus clk */ 1071 c = alchemy_clk_setup_periph(ALCHEMY_SYSBUS_CLK); 1072 ERRCK(c) 1073 1074 /* SDR/DDR memory clock */ 1075 c = alchemy_clk_setup_mem(ALCHEMY_SYSBUS_CLK, ctype); 1076 ERRCK(c) 1077 1078 /* L/RCLK: external static bus clock for synchronous mode */ 1079 c = alchemy_clk_setup_lrclk(ALCHEMY_PERIPH_CLK, ctype); 1080 ERRCK(c) 1081 1082 /* Frequency dividers 0-5 */ 1083 ret = alchemy_clk_init_fgens(ctype); 1084 if (ret) { 1085 ret = -ENODEV; 1086 goto out; 1087 } 1088 1089 /* diving muxes for internal sources */ 1090 ret = alchemy_clk_setup_imux(ctype); 1091 if (ret) { 1092 ret = -ENODEV; 1093 goto out; 1094 } 1095 1096 /* set up aliases drivers might look for */ 1097 while (t->base) { 1098 if (t->cputype == ctype) 1099 clk_add_alias(t->alias, NULL, t->base, NULL); 1100 t++; 1101 } 1102 1103 pr_info("Alchemy clocktree installed\n"); 1104 return 0; 1105 1106 out: 1107 return ret; 1108 } 1109 postcore_initcall(alchemy_clk_init); 1110