1 /* 2 * 3 * Clock initialization for OMAP4 4 * 5 * (C) Copyright 2010 6 * Texas Instruments, <www.ti.com> 7 * 8 * Aneesh V <aneesh@ti.com> 9 * 10 * Based on previous work by: 11 * Santosh Shilimkar <santosh.shilimkar@ti.com> 12 * Rajendra Nayak <rnayak@ti.com> 13 * 14 * SPDX-License-Identifier: GPL-2.0+ 15 */ 16 #include <common.h> 17 #include <i2c.h> 18 #include <asm/omap_common.h> 19 #include <asm/gpio.h> 20 #include <asm/arch/clock.h> 21 #include <asm/arch/sys_proto.h> 22 #include <asm/utils.h> 23 #include <asm/omap_gpio.h> 24 #include <asm/emif.h> 25 26 #ifndef CONFIG_SPL_BUILD 27 /* 28 * printing to console doesn't work unless 29 * this code is executed from SPL 30 */ 31 #define printf(fmt, args...) 32 #define puts(s) 33 #endif 34 35 const u32 sys_clk_array[8] = { 36 12000000, /* 12 MHz */ 37 20000000, /* 20 MHz */ 38 16800000, /* 16.8 MHz */ 39 19200000, /* 19.2 MHz */ 40 26000000, /* 26 MHz */ 41 27000000, /* 27 MHz */ 42 38400000, /* 38.4 MHz */ 43 }; 44 45 static inline u32 __get_sys_clk_index(void) 46 { 47 s8 ind; 48 /* 49 * For ES1 the ROM code calibration of sys clock is not reliable 50 * due to hw issue. So, use hard-coded value. If this value is not 51 * correct for any board over-ride this function in board file 52 * From ES2.0 onwards you will get this information from 53 * CM_SYS_CLKSEL 54 */ 55 if (omap_revision() == OMAP4430_ES1_0) 56 ind = OMAP_SYS_CLK_IND_38_4_MHZ; 57 else { 58 /* SYS_CLKSEL - 1 to match the dpll param array indices */ 59 ind = (readl((*prcm)->cm_sys_clksel) & 60 CM_SYS_CLKSEL_SYS_CLKSEL_MASK) - 1; 61 } 62 return ind; 63 } 64 65 u32 get_sys_clk_index(void) 66 __attribute__ ((weak, alias("__get_sys_clk_index"))); 67 68 u32 get_sys_clk_freq(void) 69 { 70 u8 index = get_sys_clk_index(); 71 return sys_clk_array[index]; 72 } 73 74 void setup_post_dividers(u32 const base, const struct dpll_params *params) 75 { 76 struct dpll_regs *const dpll_regs = (struct dpll_regs *)base; 77 78 /* Setup post-dividers */ 79 if (params->m2 >= 0) 80 writel(params->m2, &dpll_regs->cm_div_m2_dpll); 81 if (params->m3 >= 0) 82 writel(params->m3, &dpll_regs->cm_div_m3_dpll); 83 if (params->m4_h11 >= 0) 84 writel(params->m4_h11, &dpll_regs->cm_div_m4_h11_dpll); 85 if (params->m5_h12 >= 0) 86 writel(params->m5_h12, &dpll_regs->cm_div_m5_h12_dpll); 87 if (params->m6_h13 >= 0) 88 writel(params->m6_h13, &dpll_regs->cm_div_m6_h13_dpll); 89 if (params->m7_h14 >= 0) 90 writel(params->m7_h14, &dpll_regs->cm_div_m7_h14_dpll); 91 if (params->h21 >= 0) 92 writel(params->h21, &dpll_regs->cm_div_h21_dpll); 93 if (params->h22 >= 0) 94 writel(params->h22, &dpll_regs->cm_div_h22_dpll); 95 if (params->h23 >= 0) 96 writel(params->h23, &dpll_regs->cm_div_h23_dpll); 97 if (params->h24 >= 0) 98 writel(params->h24, &dpll_regs->cm_div_h24_dpll); 99 } 100 101 static inline void do_bypass_dpll(u32 const base) 102 { 103 struct dpll_regs *dpll_regs = (struct dpll_regs *)base; 104 105 clrsetbits_le32(&dpll_regs->cm_clkmode_dpll, 106 CM_CLKMODE_DPLL_DPLL_EN_MASK, 107 DPLL_EN_FAST_RELOCK_BYPASS << 108 CM_CLKMODE_DPLL_EN_SHIFT); 109 } 110 111 static inline void wait_for_bypass(u32 const base) 112 { 113 struct dpll_regs *const dpll_regs = (struct dpll_regs *)base; 114 115 if (!wait_on_value(ST_DPLL_CLK_MASK, 0, &dpll_regs->cm_idlest_dpll, 116 LDELAY)) { 117 printf("Bypassing DPLL failed %x\n", base); 118 } 119 } 120 121 static inline void do_lock_dpll(u32 const base) 122 { 123 struct dpll_regs *const dpll_regs = (struct dpll_regs *)base; 124 125 clrsetbits_le32(&dpll_regs->cm_clkmode_dpll, 126 CM_CLKMODE_DPLL_DPLL_EN_MASK, 127 DPLL_EN_LOCK << CM_CLKMODE_DPLL_EN_SHIFT); 128 } 129 130 static inline void wait_for_lock(u32 const base) 131 { 132 struct dpll_regs *const dpll_regs = (struct dpll_regs *)base; 133 134 if (!wait_on_value(ST_DPLL_CLK_MASK, ST_DPLL_CLK_MASK, 135 &dpll_regs->cm_idlest_dpll, LDELAY)) { 136 printf("DPLL locking failed for %x\n", base); 137 hang(); 138 } 139 } 140 141 inline u32 check_for_lock(u32 const base) 142 { 143 struct dpll_regs *const dpll_regs = (struct dpll_regs *)base; 144 u32 lock = readl(&dpll_regs->cm_idlest_dpll) & ST_DPLL_CLK_MASK; 145 146 return lock; 147 } 148 149 const struct dpll_params *get_mpu_dpll_params(struct dplls const *dpll_data) 150 { 151 u32 sysclk_ind = get_sys_clk_index(); 152 return &dpll_data->mpu[sysclk_ind]; 153 } 154 155 const struct dpll_params *get_core_dpll_params(struct dplls const *dpll_data) 156 { 157 u32 sysclk_ind = get_sys_clk_index(); 158 return &dpll_data->core[sysclk_ind]; 159 } 160 161 const struct dpll_params *get_per_dpll_params(struct dplls const *dpll_data) 162 { 163 u32 sysclk_ind = get_sys_clk_index(); 164 return &dpll_data->per[sysclk_ind]; 165 } 166 167 const struct dpll_params *get_iva_dpll_params(struct dplls const *dpll_data) 168 { 169 u32 sysclk_ind = get_sys_clk_index(); 170 return &dpll_data->iva[sysclk_ind]; 171 } 172 173 const struct dpll_params *get_usb_dpll_params(struct dplls const *dpll_data) 174 { 175 u32 sysclk_ind = get_sys_clk_index(); 176 return &dpll_data->usb[sysclk_ind]; 177 } 178 179 const struct dpll_params *get_abe_dpll_params(struct dplls const *dpll_data) 180 { 181 #ifdef CONFIG_SYS_OMAP_ABE_SYSCK 182 u32 sysclk_ind = get_sys_clk_index(); 183 return &dpll_data->abe[sysclk_ind]; 184 #else 185 return dpll_data->abe; 186 #endif 187 } 188 189 static const struct dpll_params *get_ddr_dpll_params 190 (struct dplls const *dpll_data) 191 { 192 u32 sysclk_ind = get_sys_clk_index(); 193 194 if (!dpll_data->ddr) 195 return NULL; 196 return &dpll_data->ddr[sysclk_ind]; 197 } 198 199 #ifdef CONFIG_DRIVER_TI_CPSW 200 static const struct dpll_params *get_gmac_dpll_params 201 (struct dplls const *dpll_data) 202 { 203 u32 sysclk_ind = get_sys_clk_index(); 204 205 if (!dpll_data->gmac) 206 return NULL; 207 return &dpll_data->gmac[sysclk_ind]; 208 } 209 #endif 210 211 static void do_setup_dpll(u32 const base, const struct dpll_params *params, 212 u8 lock, char *dpll) 213 { 214 u32 temp, M, N; 215 struct dpll_regs *const dpll_regs = (struct dpll_regs *)base; 216 217 if (!params) 218 return; 219 220 temp = readl(&dpll_regs->cm_clksel_dpll); 221 222 if (check_for_lock(base)) { 223 /* 224 * The Dpll has already been locked by rom code using CH. 225 * Check if M,N are matching with Ideal nominal opp values. 226 * If matches, skip the rest otherwise relock. 227 */ 228 M = (temp & CM_CLKSEL_DPLL_M_MASK) >> CM_CLKSEL_DPLL_M_SHIFT; 229 N = (temp & CM_CLKSEL_DPLL_N_MASK) >> CM_CLKSEL_DPLL_N_SHIFT; 230 if ((M != (params->m)) || (N != (params->n))) { 231 debug("\n %s Dpll locked, but not for ideal M = %d," 232 "N = %d values, current values are M = %d," 233 "N= %d" , dpll, params->m, params->n, 234 M, N); 235 } else { 236 /* Dpll locked with ideal values for nominal opps. */ 237 debug("\n %s Dpll already locked with ideal" 238 "nominal opp values", dpll); 239 240 bypass_dpll(base); 241 goto setup_post_dividers; 242 } 243 } 244 245 bypass_dpll(base); 246 247 /* Set M & N */ 248 temp &= ~CM_CLKSEL_DPLL_M_MASK; 249 temp |= (params->m << CM_CLKSEL_DPLL_M_SHIFT) & CM_CLKSEL_DPLL_M_MASK; 250 251 temp &= ~CM_CLKSEL_DPLL_N_MASK; 252 temp |= (params->n << CM_CLKSEL_DPLL_N_SHIFT) & CM_CLKSEL_DPLL_N_MASK; 253 254 writel(temp, &dpll_regs->cm_clksel_dpll); 255 256 setup_post_dividers: 257 setup_post_dividers(base, params); 258 259 /* Lock */ 260 if (lock) 261 do_lock_dpll(base); 262 263 /* Wait till the DPLL locks */ 264 if (lock) 265 wait_for_lock(base); 266 } 267 268 u32 omap_ddr_clk(void) 269 { 270 u32 ddr_clk, sys_clk_khz, omap_rev, divider; 271 const struct dpll_params *core_dpll_params; 272 273 omap_rev = omap_revision(); 274 sys_clk_khz = get_sys_clk_freq() / 1000; 275 276 core_dpll_params = get_core_dpll_params(*dplls_data); 277 278 debug("sys_clk %d\n ", sys_clk_khz * 1000); 279 280 /* Find Core DPLL locked frequency first */ 281 ddr_clk = sys_clk_khz * 2 * core_dpll_params->m / 282 (core_dpll_params->n + 1); 283 284 if (omap_rev < OMAP5430_ES1_0) { 285 /* 286 * DDR frequency is PHY_ROOT_CLK/2 287 * PHY_ROOT_CLK = Fdpll/2/M2 288 */ 289 divider = 4; 290 } else { 291 /* 292 * DDR frequency is PHY_ROOT_CLK 293 * PHY_ROOT_CLK = Fdpll/2/M2 294 */ 295 divider = 2; 296 } 297 298 ddr_clk = ddr_clk / divider / core_dpll_params->m2; 299 ddr_clk *= 1000; /* convert to Hz */ 300 debug("ddr_clk %d\n ", ddr_clk); 301 302 return ddr_clk; 303 } 304 305 /* 306 * Lock MPU dpll 307 * 308 * Resulting MPU frequencies: 309 * 4430 ES1.0 : 600 MHz 310 * 4430 ES2.x : 792 MHz (OPP Turbo) 311 * 4460 : 920 MHz (OPP Turbo) - DCC disabled 312 */ 313 void configure_mpu_dpll(void) 314 { 315 const struct dpll_params *params; 316 struct dpll_regs *mpu_dpll_regs; 317 u32 omap_rev; 318 omap_rev = omap_revision(); 319 320 /* 321 * DCC and clock divider settings for 4460. 322 * DCC is required, if more than a certain frequency is required. 323 * For, 4460 > 1GHZ. 324 * 5430 > 1.4GHZ. 325 */ 326 if ((omap_rev >= OMAP4460_ES1_0) && (omap_rev < OMAP5430_ES1_0)) { 327 mpu_dpll_regs = 328 (struct dpll_regs *)((*prcm)->cm_clkmode_dpll_mpu); 329 bypass_dpll((*prcm)->cm_clkmode_dpll_mpu); 330 clrbits_le32((*prcm)->cm_mpu_mpu_clkctrl, 331 MPU_CLKCTRL_CLKSEL_EMIF_DIV_MODE_MASK); 332 setbits_le32((*prcm)->cm_mpu_mpu_clkctrl, 333 MPU_CLKCTRL_CLKSEL_ABE_DIV_MODE_MASK); 334 clrbits_le32(&mpu_dpll_regs->cm_clksel_dpll, 335 CM_CLKSEL_DCC_EN_MASK); 336 } 337 338 params = get_mpu_dpll_params(*dplls_data); 339 340 do_setup_dpll((*prcm)->cm_clkmode_dpll_mpu, params, DPLL_LOCK, "mpu"); 341 debug("MPU DPLL locked\n"); 342 } 343 344 #if defined(CONFIG_USB_EHCI_OMAP) || defined(CONFIG_USB_XHCI_OMAP) || \ 345 defined(CONFIG_USB_MUSB_OMAP2PLUS) 346 static void setup_usb_dpll(void) 347 { 348 const struct dpll_params *params; 349 u32 sys_clk_khz, sd_div, num, den; 350 351 sys_clk_khz = get_sys_clk_freq() / 1000; 352 /* 353 * USB: 354 * USB dpll is J-type. Need to set DPLL_SD_DIV for jitter correction 355 * DPLL_SD_DIV = CEILING ([DPLL_MULT/(DPLL_DIV+1)]* CLKINP / 250) 356 * - where CLKINP is sys_clk in MHz 357 * Use CLKINP in KHz and adjust the denominator accordingly so 358 * that we have enough accuracy and at the same time no overflow 359 */ 360 params = get_usb_dpll_params(*dplls_data); 361 num = params->m * sys_clk_khz; 362 den = (params->n + 1) * 250 * 1000; 363 num += den - 1; 364 sd_div = num / den; 365 clrsetbits_le32((*prcm)->cm_clksel_dpll_usb, 366 CM_CLKSEL_DPLL_DPLL_SD_DIV_MASK, 367 sd_div << CM_CLKSEL_DPLL_DPLL_SD_DIV_SHIFT); 368 369 /* Now setup the dpll with the regular function */ 370 do_setup_dpll((*prcm)->cm_clkmode_dpll_usb, params, DPLL_LOCK, "usb"); 371 } 372 #endif 373 374 static void setup_dplls(void) 375 { 376 u32 temp; 377 const struct dpll_params *params; 378 struct emif_reg_struct *emif = (struct emif_reg_struct *)EMIF1_BASE; 379 380 debug("setup_dplls\n"); 381 382 /* CORE dpll */ 383 params = get_core_dpll_params(*dplls_data); /* default - safest */ 384 /* 385 * Do not lock the core DPLL now. Just set it up. 386 * Core DPLL will be locked after setting up EMIF 387 * using the FREQ_UPDATE method(freq_update_core()) 388 */ 389 if (emif_sdram_type(readl(&emif->emif_sdram_config)) == 390 EMIF_SDRAM_TYPE_LPDDR2) 391 do_setup_dpll((*prcm)->cm_clkmode_dpll_core, params, 392 DPLL_NO_LOCK, "core"); 393 else 394 do_setup_dpll((*prcm)->cm_clkmode_dpll_core, params, 395 DPLL_LOCK, "core"); 396 /* Set the ratios for CORE_CLK, L3_CLK, L4_CLK */ 397 temp = (CLKSEL_CORE_X2_DIV_1 << CLKSEL_CORE_SHIFT) | 398 (CLKSEL_L3_CORE_DIV_2 << CLKSEL_L3_SHIFT) | 399 (CLKSEL_L4_L3_DIV_2 << CLKSEL_L4_SHIFT); 400 writel(temp, (*prcm)->cm_clksel_core); 401 debug("Core DPLL configured\n"); 402 403 /* lock PER dpll */ 404 params = get_per_dpll_params(*dplls_data); 405 do_setup_dpll((*prcm)->cm_clkmode_dpll_per, 406 params, DPLL_LOCK, "per"); 407 debug("PER DPLL locked\n"); 408 409 /* MPU dpll */ 410 configure_mpu_dpll(); 411 412 #if defined(CONFIG_USB_EHCI_OMAP) || defined(CONFIG_USB_XHCI_OMAP) || \ 413 defined(CONFIG_USB_MUSB_OMAP2PLUS) 414 setup_usb_dpll(); 415 #endif 416 params = get_ddr_dpll_params(*dplls_data); 417 do_setup_dpll((*prcm)->cm_clkmode_dpll_ddrphy, 418 params, DPLL_LOCK, "ddr"); 419 420 #ifdef CONFIG_DRIVER_TI_CPSW 421 params = get_gmac_dpll_params(*dplls_data); 422 do_setup_dpll((*prcm)->cm_clkmode_dpll_gmac, params, 423 DPLL_LOCK, "gmac"); 424 #endif 425 } 426 427 u32 get_offset_code(u32 volt_offset, struct pmic_data *pmic) 428 { 429 u32 offset_code; 430 431 volt_offset -= pmic->base_offset; 432 433 offset_code = (volt_offset + pmic->step - 1) / pmic->step; 434 435 /* 436 * Offset codes 1-6 all give the base voltage in Palmas 437 * Offset code 0 switches OFF the SMPS 438 */ 439 return offset_code + pmic->start_code; 440 } 441 442 void do_scale_vcore(u32 vcore_reg, u32 volt_mv, struct pmic_data *pmic) 443 { 444 u32 offset_code; 445 u32 offset = volt_mv; 446 int ret = 0; 447 448 if (!volt_mv) 449 return; 450 451 pmic->pmic_bus_init(); 452 /* See if we can first get the GPIO if needed */ 453 if (pmic->gpio_en) 454 ret = gpio_request(pmic->gpio, "PMIC_GPIO"); 455 456 if (ret < 0) { 457 printf("%s: gpio %d request failed %d\n", __func__, 458 pmic->gpio, ret); 459 return; 460 } 461 462 /* Pull the GPIO low to select SET0 register, while we program SET1 */ 463 if (pmic->gpio_en) 464 gpio_direction_output(pmic->gpio, 0); 465 466 /* convert to uV for better accuracy in the calculations */ 467 offset *= 1000; 468 469 offset_code = get_offset_code(offset, pmic); 470 471 debug("do_scale_vcore: volt - %d offset_code - 0x%x\n", volt_mv, 472 offset_code); 473 474 if (pmic->pmic_write(pmic->i2c_slave_addr, vcore_reg, offset_code)) 475 printf("Scaling voltage failed for 0x%x\n", vcore_reg); 476 if (pmic->gpio_en) 477 gpio_direction_output(pmic->gpio, 1); 478 } 479 480 int __weak get_voltrail_opp(int rail_offset) 481 { 482 /* 483 * By default return OPP_NOM for all voltage rails. 484 */ 485 return OPP_NOM; 486 } 487 488 static u32 optimize_vcore_voltage(struct volts const *v, int opp) 489 { 490 u32 val; 491 492 if (!v->value[opp]) 493 return 0; 494 if (!v->efuse.reg[opp]) 495 return v->value[opp]; 496 497 switch (v->efuse.reg_bits) { 498 case 16: 499 val = readw(v->efuse.reg[opp]); 500 break; 501 case 32: 502 val = readl(v->efuse.reg[opp]); 503 break; 504 default: 505 printf("Error: efuse 0x%08x bits=%d unknown\n", 506 v->efuse.reg[opp], v->efuse.reg_bits); 507 return v->value[opp]; 508 } 509 510 if (!val) { 511 printf("Error: efuse 0x%08x bits=%d val=0, using %d\n", 512 v->efuse.reg[opp], v->efuse.reg_bits, v->value[opp]); 513 return v->value[opp]; 514 } 515 516 debug("%s:efuse 0x%08x bits=%d Vnom=%d, using efuse value %d\n", 517 __func__, v->efuse.reg[opp], v->efuse.reg_bits, v->value[opp], 518 val); 519 return val; 520 } 521 522 #ifdef CONFIG_IODELAY_RECALIBRATION 523 void __weak recalibrate_iodelay(void) 524 { 525 } 526 #endif 527 528 /* 529 * Setup the voltages for the main SoC core power domains. 530 * We start with the maximum voltages allowed here, as set in the corresponding 531 * vcores_data struct, and then scale (usually down) to the fused values that 532 * are retrieved from the SoC. The scaling happens only if the efuse.reg fields 533 * are initialised. 534 * Rail grouping is supported for the DRA7xx SoCs only, therefore the code is 535 * compiled conditionally. Note that the new code writes the scaled (or zeroed) 536 * values back to the vcores_data struct for eventual reuse. Zero values mean 537 * that the corresponding rails are not controlled separately, and are not sent 538 * to the PMIC. 539 */ 540 void scale_vcores(struct vcores_data const *vcores) 541 { 542 int i, opp, j, ol; 543 struct volts *pv = (struct volts *)vcores; 544 struct volts *px; 545 546 for (i=0; i<(sizeof(struct vcores_data)/sizeof(struct volts)); i++) { 547 opp = get_voltrail_opp(i); 548 debug("%d -> ", pv->value[opp]); 549 550 if (pv->value[opp]) { 551 /* Handle non-empty members only */ 552 pv->value[opp] = optimize_vcore_voltage(pv, opp); 553 px = (struct volts *)vcores; 554 j = 0; 555 while (px < pv) { 556 /* 557 * Scan already handled non-empty members to see 558 * if we have a group and find the max voltage, 559 * which is set to the first occurance of the 560 * particular SMPS; the other group voltages are 561 * zeroed. 562 */ 563 ol = get_voltrail_opp(j); 564 if (px->value[ol] && 565 (pv->pmic->i2c_slave_addr == 566 px->pmic->i2c_slave_addr) && 567 (pv->addr == px->addr)) { 568 /* Same PMIC, same SMPS */ 569 if (pv->value[opp] > px->value[ol]) 570 px->value[ol] = pv->value[opp]; 571 572 pv->value[opp] = 0; 573 } 574 px++; 575 j++; 576 } 577 } 578 debug("%d\n", pv->value[opp]); 579 pv++; 580 } 581 582 opp = get_voltrail_opp(VOLT_CORE); 583 debug("cor: %d\n", vcores->core.value[opp]); 584 do_scale_vcore(vcores->core.addr, vcores->core.value[opp], 585 vcores->core.pmic); 586 /* 587 * IO delay recalibration should be done immediately after 588 * adjusting AVS voltages for VDD_CORE_L. 589 * Respective boards should call __recalibrate_iodelay() 590 * with proper mux, virtual and manual mode configurations. 591 */ 592 #ifdef CONFIG_IODELAY_RECALIBRATION 593 recalibrate_iodelay(); 594 #endif 595 596 opp = get_voltrail_opp(VOLT_MPU); 597 debug("mpu: %d\n", vcores->mpu.value[opp]); 598 do_scale_vcore(vcores->mpu.addr, vcores->mpu.value[opp], 599 vcores->mpu.pmic); 600 /* Configure MPU ABB LDO after scale */ 601 abb_setup(vcores->mpu.efuse.reg[opp], 602 (*ctrl)->control_wkup_ldovbb_mpu_voltage_ctrl, 603 (*prcm)->prm_abbldo_mpu_setup, 604 (*prcm)->prm_abbldo_mpu_ctrl, 605 (*prcm)->prm_irqstatus_mpu_2, 606 vcores->mpu.abb_tx_done_mask, 607 OMAP_ABB_FAST_OPP); 608 609 opp = get_voltrail_opp(VOLT_MM); 610 debug("mm: %d\n", vcores->mm.value[opp]); 611 do_scale_vcore(vcores->mm.addr, vcores->mm.value[opp], 612 vcores->mm.pmic); 613 /* Configure MM ABB LDO after scale */ 614 abb_setup(vcores->mm.efuse.reg[opp], 615 (*ctrl)->control_wkup_ldovbb_mm_voltage_ctrl, 616 (*prcm)->prm_abbldo_mm_setup, 617 (*prcm)->prm_abbldo_mm_ctrl, 618 (*prcm)->prm_irqstatus_mpu, 619 vcores->mm.abb_tx_done_mask, 620 OMAP_ABB_FAST_OPP); 621 622 opp = get_voltrail_opp(VOLT_GPU); 623 debug("gpu: %d\n", vcores->gpu.value[opp]); 624 do_scale_vcore(vcores->gpu.addr, vcores->gpu.value[opp], 625 vcores->gpu.pmic); 626 /* Configure GPU ABB LDO after scale */ 627 abb_setup(vcores->gpu.efuse.reg[opp], 628 (*ctrl)->control_wkup_ldovbb_gpu_voltage_ctrl, 629 (*prcm)->prm_abbldo_gpu_setup, 630 (*prcm)->prm_abbldo_gpu_ctrl, 631 (*prcm)->prm_irqstatus_mpu, 632 vcores->gpu.abb_tx_done_mask, 633 OMAP_ABB_FAST_OPP); 634 635 opp = get_voltrail_opp(VOLT_EVE); 636 debug("eve: %d\n", vcores->eve.value[opp]); 637 do_scale_vcore(vcores->eve.addr, vcores->eve.value[opp], 638 vcores->eve.pmic); 639 /* Configure EVE ABB LDO after scale */ 640 abb_setup(vcores->eve.efuse.reg[opp], 641 (*ctrl)->control_wkup_ldovbb_eve_voltage_ctrl, 642 (*prcm)->prm_abbldo_eve_setup, 643 (*prcm)->prm_abbldo_eve_ctrl, 644 (*prcm)->prm_irqstatus_mpu, 645 vcores->eve.abb_tx_done_mask, 646 OMAP_ABB_FAST_OPP); 647 648 opp = get_voltrail_opp(VOLT_IVA); 649 debug("iva: %d\n", vcores->iva.value[opp]); 650 do_scale_vcore(vcores->iva.addr, vcores->iva.value[opp], 651 vcores->iva.pmic); 652 /* Configure IVA ABB LDO after scale */ 653 abb_setup(vcores->iva.efuse.reg[opp], 654 (*ctrl)->control_wkup_ldovbb_iva_voltage_ctrl, 655 (*prcm)->prm_abbldo_iva_setup, 656 (*prcm)->prm_abbldo_iva_ctrl, 657 (*prcm)->prm_irqstatus_mpu, 658 vcores->iva.abb_tx_done_mask, 659 OMAP_ABB_FAST_OPP); 660 } 661 662 static inline void enable_clock_domain(u32 const clkctrl_reg, u32 enable_mode) 663 { 664 clrsetbits_le32(clkctrl_reg, CD_CLKCTRL_CLKTRCTRL_MASK, 665 enable_mode << CD_CLKCTRL_CLKTRCTRL_SHIFT); 666 debug("Enable clock domain - %x\n", clkctrl_reg); 667 } 668 669 static inline void disable_clock_domain(u32 const clkctrl_reg) 670 { 671 clrsetbits_le32(clkctrl_reg, CD_CLKCTRL_CLKTRCTRL_MASK, 672 CD_CLKCTRL_CLKTRCTRL_SW_SLEEP << 673 CD_CLKCTRL_CLKTRCTRL_SHIFT); 674 debug("Disable clock domain - %x\n", clkctrl_reg); 675 } 676 677 static inline void wait_for_clk_enable(u32 clkctrl_addr) 678 { 679 u32 clkctrl, idlest = MODULE_CLKCTRL_IDLEST_DISABLED; 680 u32 bound = LDELAY; 681 682 while ((idlest == MODULE_CLKCTRL_IDLEST_DISABLED) || 683 (idlest == MODULE_CLKCTRL_IDLEST_TRANSITIONING)) { 684 685 clkctrl = readl(clkctrl_addr); 686 idlest = (clkctrl & MODULE_CLKCTRL_IDLEST_MASK) >> 687 MODULE_CLKCTRL_IDLEST_SHIFT; 688 if (--bound == 0) { 689 printf("Clock enable failed for 0x%x idlest 0x%x\n", 690 clkctrl_addr, clkctrl); 691 return; 692 } 693 } 694 } 695 696 static inline void enable_clock_module(u32 const clkctrl_addr, u32 enable_mode, 697 u32 wait_for_enable) 698 { 699 clrsetbits_le32(clkctrl_addr, MODULE_CLKCTRL_MODULEMODE_MASK, 700 enable_mode << MODULE_CLKCTRL_MODULEMODE_SHIFT); 701 debug("Enable clock module - %x\n", clkctrl_addr); 702 if (wait_for_enable) 703 wait_for_clk_enable(clkctrl_addr); 704 } 705 706 static inline void wait_for_clk_disable(u32 clkctrl_addr) 707 { 708 u32 clkctrl, idlest = MODULE_CLKCTRL_IDLEST_FULLY_FUNCTIONAL; 709 u32 bound = LDELAY; 710 711 while ((idlest != MODULE_CLKCTRL_IDLEST_DISABLED)) { 712 clkctrl = readl(clkctrl_addr); 713 idlest = (clkctrl & MODULE_CLKCTRL_IDLEST_MASK) >> 714 MODULE_CLKCTRL_IDLEST_SHIFT; 715 if (--bound == 0) { 716 printf("Clock disable failed for 0x%x idlest 0x%x\n", 717 clkctrl_addr, clkctrl); 718 return; 719 } 720 } 721 } 722 723 static inline void disable_clock_module(u32 const clkctrl_addr, 724 u32 wait_for_disable) 725 { 726 clrsetbits_le32(clkctrl_addr, MODULE_CLKCTRL_MODULEMODE_MASK, 727 MODULE_CLKCTRL_MODULEMODE_SW_DISABLE << 728 MODULE_CLKCTRL_MODULEMODE_SHIFT); 729 debug("Disable clock module - %x\n", clkctrl_addr); 730 if (wait_for_disable) 731 wait_for_clk_disable(clkctrl_addr); 732 } 733 734 void freq_update_core(void) 735 { 736 u32 freq_config1 = 0; 737 const struct dpll_params *core_dpll_params; 738 u32 omap_rev = omap_revision(); 739 740 core_dpll_params = get_core_dpll_params(*dplls_data); 741 /* Put EMIF clock domain in sw wakeup mode */ 742 enable_clock_domain((*prcm)->cm_memif_clkstctrl, 743 CD_CLKCTRL_CLKTRCTRL_SW_WKUP); 744 wait_for_clk_enable((*prcm)->cm_memif_emif_1_clkctrl); 745 wait_for_clk_enable((*prcm)->cm_memif_emif_2_clkctrl); 746 747 freq_config1 = SHADOW_FREQ_CONFIG1_FREQ_UPDATE_MASK | 748 SHADOW_FREQ_CONFIG1_DLL_RESET_MASK; 749 750 freq_config1 |= (DPLL_EN_LOCK << SHADOW_FREQ_CONFIG1_DPLL_EN_SHIFT) & 751 SHADOW_FREQ_CONFIG1_DPLL_EN_MASK; 752 753 freq_config1 |= (core_dpll_params->m2 << 754 SHADOW_FREQ_CONFIG1_M2_DIV_SHIFT) & 755 SHADOW_FREQ_CONFIG1_M2_DIV_MASK; 756 757 writel(freq_config1, (*prcm)->cm_shadow_freq_config1); 758 if (!wait_on_value(SHADOW_FREQ_CONFIG1_FREQ_UPDATE_MASK, 0, 759 (u32 *) (*prcm)->cm_shadow_freq_config1, LDELAY)) { 760 puts("FREQ UPDATE procedure failed!!"); 761 hang(); 762 } 763 764 /* 765 * Putting EMIF in HW_AUTO is seen to be causing issues with 766 * EMIF clocks and the master DLL. Keep EMIF in SW_WKUP 767 * in OMAP5430 ES1.0 silicon 768 */ 769 if (omap_rev != OMAP5430_ES1_0) { 770 /* Put EMIF clock domain back in hw auto mode */ 771 enable_clock_domain((*prcm)->cm_memif_clkstctrl, 772 CD_CLKCTRL_CLKTRCTRL_HW_AUTO); 773 wait_for_clk_enable((*prcm)->cm_memif_emif_1_clkctrl); 774 wait_for_clk_enable((*prcm)->cm_memif_emif_2_clkctrl); 775 } 776 } 777 778 void bypass_dpll(u32 const base) 779 { 780 do_bypass_dpll(base); 781 wait_for_bypass(base); 782 } 783 784 void lock_dpll(u32 const base) 785 { 786 do_lock_dpll(base); 787 wait_for_lock(base); 788 } 789 790 static void setup_clocks_for_console(void) 791 { 792 /* Do not add any spl_debug prints in this function */ 793 clrsetbits_le32((*prcm)->cm_l4per_clkstctrl, CD_CLKCTRL_CLKTRCTRL_MASK, 794 CD_CLKCTRL_CLKTRCTRL_SW_WKUP << 795 CD_CLKCTRL_CLKTRCTRL_SHIFT); 796 797 /* Enable all UARTs - console will be on one of them */ 798 clrsetbits_le32((*prcm)->cm_l4per_uart1_clkctrl, 799 MODULE_CLKCTRL_MODULEMODE_MASK, 800 MODULE_CLKCTRL_MODULEMODE_SW_EXPLICIT_EN << 801 MODULE_CLKCTRL_MODULEMODE_SHIFT); 802 803 clrsetbits_le32((*prcm)->cm_l4per_uart2_clkctrl, 804 MODULE_CLKCTRL_MODULEMODE_MASK, 805 MODULE_CLKCTRL_MODULEMODE_SW_EXPLICIT_EN << 806 MODULE_CLKCTRL_MODULEMODE_SHIFT); 807 808 clrsetbits_le32((*prcm)->cm_l4per_uart3_clkctrl, 809 MODULE_CLKCTRL_MODULEMODE_MASK, 810 MODULE_CLKCTRL_MODULEMODE_SW_EXPLICIT_EN << 811 MODULE_CLKCTRL_MODULEMODE_SHIFT); 812 813 clrsetbits_le32((*prcm)->cm_l4per_uart4_clkctrl, 814 MODULE_CLKCTRL_MODULEMODE_MASK, 815 MODULE_CLKCTRL_MODULEMODE_SW_EXPLICIT_EN << 816 MODULE_CLKCTRL_MODULEMODE_SHIFT); 817 818 clrsetbits_le32((*prcm)->cm_l4per_clkstctrl, CD_CLKCTRL_CLKTRCTRL_MASK, 819 CD_CLKCTRL_CLKTRCTRL_HW_AUTO << 820 CD_CLKCTRL_CLKTRCTRL_SHIFT); 821 } 822 823 void do_enable_clocks(u32 const *clk_domains, 824 u32 const *clk_modules_hw_auto, 825 u32 const *clk_modules_explicit_en, 826 u8 wait_for_enable) 827 { 828 u32 i, max = 100; 829 830 /* Put the clock domains in SW_WKUP mode */ 831 for (i = 0; (i < max) && clk_domains && clk_domains[i]; i++) { 832 enable_clock_domain(clk_domains[i], 833 CD_CLKCTRL_CLKTRCTRL_SW_WKUP); 834 } 835 836 /* Clock modules that need to be put in HW_AUTO */ 837 for (i = 0; (i < max) && clk_modules_hw_auto && 838 clk_modules_hw_auto[i]; i++) { 839 enable_clock_module(clk_modules_hw_auto[i], 840 MODULE_CLKCTRL_MODULEMODE_HW_AUTO, 841 wait_for_enable); 842 }; 843 844 /* Clock modules that need to be put in SW_EXPLICIT_EN mode */ 845 for (i = 0; (i < max) && clk_modules_explicit_en && 846 clk_modules_explicit_en[i]; i++) { 847 enable_clock_module(clk_modules_explicit_en[i], 848 MODULE_CLKCTRL_MODULEMODE_SW_EXPLICIT_EN, 849 wait_for_enable); 850 }; 851 852 /* Put the clock domains in HW_AUTO mode now */ 853 for (i = 0; (i < max) && clk_domains && clk_domains[i]; i++) { 854 enable_clock_domain(clk_domains[i], 855 CD_CLKCTRL_CLKTRCTRL_HW_AUTO); 856 } 857 } 858 859 void do_disable_clocks(u32 const *clk_domains, 860 u32 const *clk_modules_disable, 861 u8 wait_for_disable) 862 { 863 u32 i, max = 100; 864 865 866 /* Clock modules that need to be put in SW_DISABLE */ 867 for (i = 0; (i < max) && clk_modules_disable[i]; i++) 868 disable_clock_module(clk_modules_disable[i], 869 wait_for_disable); 870 871 /* Put the clock domains in SW_SLEEP mode */ 872 for (i = 0; (i < max) && clk_domains[i]; i++) 873 disable_clock_domain(clk_domains[i]); 874 } 875 876 /** 877 * setup_early_clocks() - Setup early clocks needed for SoC 878 * 879 * Setup clocks for console, SPL basic initialization clocks and initialize 880 * the timer. This is invoked prior prcm_init. 881 */ 882 void setup_early_clocks(void) 883 { 884 switch (omap_hw_init_context()) { 885 case OMAP_INIT_CONTEXT_SPL: 886 case OMAP_INIT_CONTEXT_UBOOT_FROM_NOR: 887 case OMAP_INIT_CONTEXT_UBOOT_AFTER_CH: 888 setup_clocks_for_console(); 889 enable_basic_clocks(); 890 timer_init(); 891 /* Fall through */ 892 } 893 } 894 895 void prcm_init(void) 896 { 897 switch (omap_hw_init_context()) { 898 case OMAP_INIT_CONTEXT_SPL: 899 case OMAP_INIT_CONTEXT_UBOOT_FROM_NOR: 900 case OMAP_INIT_CONTEXT_UBOOT_AFTER_CH: 901 scale_vcores(*omap_vcores); 902 setup_dplls(); 903 setup_warmreset_time(); 904 break; 905 default: 906 break; 907 } 908 909 if (OMAP_INIT_CONTEXT_SPL != omap_hw_init_context()) 910 enable_basic_uboot_clocks(); 911 } 912 913 void gpi2c_init(void) 914 { 915 static int gpi2c = 1; 916 917 if (gpi2c) { 918 i2c_init(CONFIG_SYS_OMAP24_I2C_SPEED, 919 CONFIG_SYS_OMAP24_I2C_SLAVE); 920 gpi2c = 0; 921 } 922 } 923