1 /* 2 * clkgen-mux.c: ST GEN-MUX Clock driver 3 * 4 * Copyright (C) 2014 STMicroelectronics (R&D) Limited 5 * 6 * Authors: Stephen Gallimore <stephen.gallimore@st.com> 7 * Pankaj Dev <pankaj.dev@st.com> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License as published by 11 * the Free Software Foundation; either version 2 of the License, or 12 * (at your option) any later version. 13 * 14 */ 15 16 #include <linux/slab.h> 17 #include <linux/of_address.h> 18 #include <linux/clk.h> 19 #include <linux/clk-provider.h> 20 21 static DEFINE_SPINLOCK(clkgena_divmux_lock); 22 static DEFINE_SPINLOCK(clkgenf_lock); 23 24 static const char ** __init clkgen_mux_get_parents(struct device_node *np, 25 int *num_parents) 26 { 27 const char **parents; 28 int nparents; 29 30 nparents = of_clk_get_parent_count(np); 31 if (WARN_ON(nparents <= 0)) 32 return ERR_PTR(-EINVAL); 33 34 parents = kcalloc(nparents, sizeof(const char *), GFP_KERNEL); 35 if (!parents) 36 return ERR_PTR(-ENOMEM); 37 38 *num_parents = of_clk_parent_fill(np, parents, nparents); 39 return parents; 40 } 41 42 /** 43 * DOC: Clock mux with a programmable divider on each of its three inputs. 44 * The mux has an input setting which effectively gates its output. 45 * 46 * Traits of this clock: 47 * prepare - clk_(un)prepare only ensures parent is (un)prepared 48 * enable - clk_enable and clk_disable are functional & control gating 49 * rate - set rate is supported 50 * parent - set/get parent 51 */ 52 53 #define NUM_INPUTS 3 54 55 struct clkgena_divmux { 56 struct clk_hw hw; 57 /* Subclassed mux and divider structures */ 58 struct clk_mux mux; 59 struct clk_divider div[NUM_INPUTS]; 60 /* Enable/running feedback register bits for each input */ 61 void __iomem *feedback_reg[NUM_INPUTS]; 62 int feedback_bit_idx; 63 64 u8 muxsel; 65 }; 66 67 #define to_clkgena_divmux(_hw) container_of(_hw, struct clkgena_divmux, hw) 68 69 struct clkgena_divmux_data { 70 int num_outputs; 71 int mux_offset; 72 int mux_offset2; 73 int mux_start_bit; 74 int div_offsets[NUM_INPUTS]; 75 int fb_offsets[NUM_INPUTS]; 76 int fb_start_bit_idx; 77 }; 78 79 #define CKGAX_CLKOPSRC_SWITCH_OFF 0x3 80 81 static int clkgena_divmux_is_running(struct clkgena_divmux *mux) 82 { 83 u32 regval = readl(mux->feedback_reg[mux->muxsel]); 84 u32 running = regval & BIT(mux->feedback_bit_idx); 85 return !!running; 86 } 87 88 static int clkgena_divmux_enable(struct clk_hw *hw) 89 { 90 struct clkgena_divmux *genamux = to_clkgena_divmux(hw); 91 struct clk_hw *mux_hw = &genamux->mux.hw; 92 unsigned long timeout; 93 int ret = 0; 94 95 __clk_hw_set_clk(mux_hw, hw); 96 97 ret = clk_mux_ops.set_parent(mux_hw, genamux->muxsel); 98 if (ret) 99 return ret; 100 101 timeout = jiffies + msecs_to_jiffies(10); 102 103 while (!clkgena_divmux_is_running(genamux)) { 104 if (time_after(jiffies, timeout)) 105 return -ETIMEDOUT; 106 cpu_relax(); 107 } 108 109 return 0; 110 } 111 112 static void clkgena_divmux_disable(struct clk_hw *hw) 113 { 114 struct clkgena_divmux *genamux = to_clkgena_divmux(hw); 115 struct clk_hw *mux_hw = &genamux->mux.hw; 116 117 __clk_hw_set_clk(mux_hw, hw); 118 119 clk_mux_ops.set_parent(mux_hw, CKGAX_CLKOPSRC_SWITCH_OFF); 120 } 121 122 static int clkgena_divmux_is_enabled(struct clk_hw *hw) 123 { 124 struct clkgena_divmux *genamux = to_clkgena_divmux(hw); 125 struct clk_hw *mux_hw = &genamux->mux.hw; 126 127 __clk_hw_set_clk(mux_hw, hw); 128 129 return (s8)clk_mux_ops.get_parent(mux_hw) > 0; 130 } 131 132 static u8 clkgena_divmux_get_parent(struct clk_hw *hw) 133 { 134 struct clkgena_divmux *genamux = to_clkgena_divmux(hw); 135 struct clk_hw *mux_hw = &genamux->mux.hw; 136 137 __clk_hw_set_clk(mux_hw, hw); 138 139 genamux->muxsel = clk_mux_ops.get_parent(mux_hw); 140 if ((s8)genamux->muxsel < 0) { 141 pr_debug("%s: %s: Invalid parent, setting to default.\n", 142 __func__, clk_hw_get_name(hw)); 143 genamux->muxsel = 0; 144 } 145 146 return genamux->muxsel; 147 } 148 149 static int clkgena_divmux_set_parent(struct clk_hw *hw, u8 index) 150 { 151 struct clkgena_divmux *genamux = to_clkgena_divmux(hw); 152 153 if (index >= CKGAX_CLKOPSRC_SWITCH_OFF) 154 return -EINVAL; 155 156 genamux->muxsel = index; 157 158 /* 159 * If the mux is already enabled, call enable directly to set the 160 * new mux position and wait for it to start running again. Otherwise 161 * do nothing. 162 */ 163 if (clkgena_divmux_is_enabled(hw)) 164 clkgena_divmux_enable(hw); 165 166 return 0; 167 } 168 169 static unsigned long clkgena_divmux_recalc_rate(struct clk_hw *hw, 170 unsigned long parent_rate) 171 { 172 struct clkgena_divmux *genamux = to_clkgena_divmux(hw); 173 struct clk_hw *div_hw = &genamux->div[genamux->muxsel].hw; 174 175 __clk_hw_set_clk(div_hw, hw); 176 177 return clk_divider_ops.recalc_rate(div_hw, parent_rate); 178 } 179 180 static int clkgena_divmux_set_rate(struct clk_hw *hw, unsigned long rate, 181 unsigned long parent_rate) 182 { 183 struct clkgena_divmux *genamux = to_clkgena_divmux(hw); 184 struct clk_hw *div_hw = &genamux->div[genamux->muxsel].hw; 185 186 __clk_hw_set_clk(div_hw, hw); 187 188 return clk_divider_ops.set_rate(div_hw, rate, parent_rate); 189 } 190 191 static long clkgena_divmux_round_rate(struct clk_hw *hw, unsigned long rate, 192 unsigned long *prate) 193 { 194 struct clkgena_divmux *genamux = to_clkgena_divmux(hw); 195 struct clk_hw *div_hw = &genamux->div[genamux->muxsel].hw; 196 197 __clk_hw_set_clk(div_hw, hw); 198 199 return clk_divider_ops.round_rate(div_hw, rate, prate); 200 } 201 202 static const struct clk_ops clkgena_divmux_ops = { 203 .enable = clkgena_divmux_enable, 204 .disable = clkgena_divmux_disable, 205 .is_enabled = clkgena_divmux_is_enabled, 206 .get_parent = clkgena_divmux_get_parent, 207 .set_parent = clkgena_divmux_set_parent, 208 .round_rate = clkgena_divmux_round_rate, 209 .recalc_rate = clkgena_divmux_recalc_rate, 210 .set_rate = clkgena_divmux_set_rate, 211 }; 212 213 /** 214 * clk_register_genamux - register a genamux clock with the clock framework 215 */ 216 static struct clk * __init clk_register_genamux(const char *name, 217 const char **parent_names, u8 num_parents, 218 void __iomem *reg, 219 const struct clkgena_divmux_data *muxdata, 220 u32 idx) 221 { 222 /* 223 * Fixed constants across all ClockgenA variants 224 */ 225 const int mux_width = 2; 226 const int divider_width = 5; 227 struct clkgena_divmux *genamux; 228 struct clk *clk; 229 struct clk_init_data init; 230 int i; 231 232 genamux = kzalloc(sizeof(*genamux), GFP_KERNEL); 233 if (!genamux) 234 return ERR_PTR(-ENOMEM); 235 236 init.name = name; 237 init.ops = &clkgena_divmux_ops; 238 init.flags = CLK_IS_BASIC | CLK_GET_RATE_NOCACHE; 239 init.parent_names = parent_names; 240 init.num_parents = num_parents; 241 242 genamux->mux.lock = &clkgena_divmux_lock; 243 genamux->mux.mask = BIT(mux_width) - 1; 244 genamux->mux.shift = muxdata->mux_start_bit + (idx * mux_width); 245 if (genamux->mux.shift > 31) { 246 /* 247 * We have spilled into the second mux register so 248 * adjust the register address and the bit shift accordingly 249 */ 250 genamux->mux.reg = reg + muxdata->mux_offset2; 251 genamux->mux.shift -= 32; 252 } else { 253 genamux->mux.reg = reg + muxdata->mux_offset; 254 } 255 256 for (i = 0; i < NUM_INPUTS; i++) { 257 /* 258 * Divider config for each input 259 */ 260 void __iomem *divbase = reg + muxdata->div_offsets[i]; 261 genamux->div[i].width = divider_width; 262 genamux->div[i].reg = divbase + (idx * sizeof(u32)); 263 264 /* 265 * Mux enabled/running feedback register for each input. 266 */ 267 genamux->feedback_reg[i] = reg + muxdata->fb_offsets[i]; 268 } 269 270 genamux->feedback_bit_idx = muxdata->fb_start_bit_idx + idx; 271 genamux->hw.init = &init; 272 273 clk = clk_register(NULL, &genamux->hw); 274 if (IS_ERR(clk)) { 275 kfree(genamux); 276 goto err; 277 } 278 279 pr_debug("%s: parent %s rate %lu\n", 280 __clk_get_name(clk), 281 __clk_get_name(clk_get_parent(clk)), 282 clk_get_rate(clk)); 283 err: 284 return clk; 285 } 286 287 static struct clkgena_divmux_data st_divmux_c65hs = { 288 .num_outputs = 4, 289 .mux_offset = 0x14, 290 .mux_start_bit = 0, 291 .div_offsets = { 0x800, 0x900, 0xb00 }, 292 .fb_offsets = { 0x18, 0x1c, 0x20 }, 293 .fb_start_bit_idx = 0, 294 }; 295 296 static struct clkgena_divmux_data st_divmux_c65ls = { 297 .num_outputs = 14, 298 .mux_offset = 0x14, 299 .mux_offset2 = 0x24, 300 .mux_start_bit = 8, 301 .div_offsets = { 0x810, 0xa10, 0xb10 }, 302 .fb_offsets = { 0x18, 0x1c, 0x20 }, 303 .fb_start_bit_idx = 4, 304 }; 305 306 static struct clkgena_divmux_data st_divmux_c32odf0 = { 307 .num_outputs = 8, 308 .mux_offset = 0x1c, 309 .mux_start_bit = 0, 310 .div_offsets = { 0x800, 0x900, 0xa60 }, 311 .fb_offsets = { 0x2c, 0x24, 0x28 }, 312 .fb_start_bit_idx = 0, 313 }; 314 315 static struct clkgena_divmux_data st_divmux_c32odf1 = { 316 .num_outputs = 8, 317 .mux_offset = 0x1c, 318 .mux_start_bit = 16, 319 .div_offsets = { 0x820, 0x980, 0xa80 }, 320 .fb_offsets = { 0x2c, 0x24, 0x28 }, 321 .fb_start_bit_idx = 8, 322 }; 323 324 static struct clkgena_divmux_data st_divmux_c32odf2 = { 325 .num_outputs = 8, 326 .mux_offset = 0x20, 327 .mux_start_bit = 0, 328 .div_offsets = { 0x840, 0xa20, 0xb10 }, 329 .fb_offsets = { 0x2c, 0x24, 0x28 }, 330 .fb_start_bit_idx = 16, 331 }; 332 333 static struct clkgena_divmux_data st_divmux_c32odf3 = { 334 .num_outputs = 8, 335 .mux_offset = 0x20, 336 .mux_start_bit = 16, 337 .div_offsets = { 0x860, 0xa40, 0xb30 }, 338 .fb_offsets = { 0x2c, 0x24, 0x28 }, 339 .fb_start_bit_idx = 24, 340 }; 341 342 static const struct of_device_id clkgena_divmux_of_match[] = { 343 { 344 .compatible = "st,clkgena-divmux-c65-hs", 345 .data = &st_divmux_c65hs, 346 }, 347 { 348 .compatible = "st,clkgena-divmux-c65-ls", 349 .data = &st_divmux_c65ls, 350 }, 351 { 352 .compatible = "st,clkgena-divmux-c32-odf0", 353 .data = &st_divmux_c32odf0, 354 }, 355 { 356 .compatible = "st,clkgena-divmux-c32-odf1", 357 .data = &st_divmux_c32odf1, 358 }, 359 { 360 .compatible = "st,clkgena-divmux-c32-odf2", 361 .data = &st_divmux_c32odf2, 362 }, 363 { 364 .compatible = "st,clkgena-divmux-c32-odf3", 365 .data = &st_divmux_c32odf3, 366 }, 367 {} 368 }; 369 370 static void __iomem * __init clkgen_get_register_base(struct device_node *np) 371 { 372 struct device_node *pnode; 373 void __iomem *reg; 374 375 pnode = of_get_parent(np); 376 if (!pnode) 377 return NULL; 378 379 reg = of_iomap(pnode, 0); 380 381 of_node_put(pnode); 382 return reg; 383 } 384 385 static void __init st_of_clkgena_divmux_setup(struct device_node *np) 386 { 387 const struct of_device_id *match; 388 const struct clkgena_divmux_data *data; 389 struct clk_onecell_data *clk_data; 390 void __iomem *reg; 391 const char **parents; 392 int num_parents = 0, i; 393 394 match = of_match_node(clkgena_divmux_of_match, np); 395 if (WARN_ON(!match)) 396 return; 397 398 data = match->data; 399 400 reg = clkgen_get_register_base(np); 401 if (!reg) 402 return; 403 404 parents = clkgen_mux_get_parents(np, &num_parents); 405 if (IS_ERR(parents)) 406 goto err_parents; 407 408 clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL); 409 if (!clk_data) 410 goto err_alloc; 411 412 clk_data->clk_num = data->num_outputs; 413 clk_data->clks = kcalloc(clk_data->clk_num, sizeof(struct clk *), 414 GFP_KERNEL); 415 416 if (!clk_data->clks) 417 goto err_alloc_clks; 418 419 for (i = 0; i < clk_data->clk_num; i++) { 420 struct clk *clk; 421 const char *clk_name; 422 423 if (of_property_read_string_index(np, "clock-output-names", 424 i, &clk_name)) 425 break; 426 427 /* 428 * If we read an empty clock name then the output is unused 429 */ 430 if (*clk_name == '\0') 431 continue; 432 433 clk = clk_register_genamux(clk_name, parents, num_parents, 434 reg, data, i); 435 436 if (IS_ERR(clk)) 437 goto err; 438 439 clk_data->clks[i] = clk; 440 } 441 442 kfree(parents); 443 444 of_clk_add_provider(np, of_clk_src_onecell_get, clk_data); 445 return; 446 err: 447 kfree(clk_data->clks); 448 err_alloc_clks: 449 kfree(clk_data); 450 err_alloc: 451 kfree(parents); 452 err_parents: 453 iounmap(reg); 454 } 455 CLK_OF_DECLARE(clkgenadivmux, "st,clkgena-divmux", st_of_clkgena_divmux_setup); 456 457 struct clkgena_prediv_data { 458 u32 offset; 459 u8 shift; 460 struct clk_div_table *table; 461 }; 462 463 static struct clk_div_table prediv_table16[] = { 464 { .val = 0, .div = 1 }, 465 { .val = 1, .div = 16 }, 466 { .div = 0 }, 467 }; 468 469 static struct clkgena_prediv_data prediv_c65_data = { 470 .offset = 0x4c, 471 .shift = 31, 472 .table = prediv_table16, 473 }; 474 475 static struct clkgena_prediv_data prediv_c32_data = { 476 .offset = 0x50, 477 .shift = 1, 478 .table = prediv_table16, 479 }; 480 481 static const struct of_device_id clkgena_prediv_of_match[] = { 482 { .compatible = "st,clkgena-prediv-c65", .data = &prediv_c65_data }, 483 { .compatible = "st,clkgena-prediv-c32", .data = &prediv_c32_data }, 484 {} 485 }; 486 487 static void __init st_of_clkgena_prediv_setup(struct device_node *np) 488 { 489 const struct of_device_id *match; 490 void __iomem *reg; 491 const char *parent_name, *clk_name; 492 struct clk *clk; 493 const struct clkgena_prediv_data *data; 494 495 match = of_match_node(clkgena_prediv_of_match, np); 496 if (!match) { 497 pr_err("%s: No matching data\n", __func__); 498 return; 499 } 500 501 data = match->data; 502 503 reg = clkgen_get_register_base(np); 504 if (!reg) 505 return; 506 507 parent_name = of_clk_get_parent_name(np, 0); 508 if (!parent_name) 509 goto err; 510 511 if (of_property_read_string_index(np, "clock-output-names", 512 0, &clk_name)) 513 goto err; 514 515 clk = clk_register_divider_table(NULL, clk_name, parent_name, 516 CLK_GET_RATE_NOCACHE, 517 reg + data->offset, data->shift, 1, 518 0, data->table, NULL); 519 if (IS_ERR(clk)) 520 goto err; 521 522 of_clk_add_provider(np, of_clk_src_simple_get, clk); 523 pr_debug("%s: parent %s rate %u\n", 524 __clk_get_name(clk), 525 __clk_get_name(clk_get_parent(clk)), 526 (unsigned int)clk_get_rate(clk)); 527 528 return; 529 err: 530 iounmap(reg); 531 } 532 CLK_OF_DECLARE(clkgenaprediv, "st,clkgena-prediv", st_of_clkgena_prediv_setup); 533 534 struct clkgen_mux_data { 535 u32 offset; 536 u8 shift; 537 u8 width; 538 spinlock_t *lock; 539 unsigned long clk_flags; 540 u8 mux_flags; 541 }; 542 543 static struct clkgen_mux_data clkgen_mux_c_vcc_hd_416 = { 544 .offset = 0, 545 .shift = 0, 546 .width = 1, 547 }; 548 549 static struct clkgen_mux_data clkgen_mux_f_vcc_fvdp_416 = { 550 .offset = 0, 551 .shift = 0, 552 .width = 1, 553 }; 554 555 static struct clkgen_mux_data clkgen_mux_f_vcc_hva_416 = { 556 .offset = 0, 557 .shift = 0, 558 .width = 1, 559 }; 560 561 static struct clkgen_mux_data clkgen_mux_f_vcc_hd_416 = { 562 .offset = 0, 563 .shift = 16, 564 .width = 1, 565 .lock = &clkgenf_lock, 566 }; 567 568 static struct clkgen_mux_data clkgen_mux_c_vcc_sd_416 = { 569 .offset = 0, 570 .shift = 17, 571 .width = 1, 572 .lock = &clkgenf_lock, 573 }; 574 575 static struct clkgen_mux_data stih415_a9_mux_data = { 576 .offset = 0, 577 .shift = 1, 578 .width = 2, 579 }; 580 static struct clkgen_mux_data stih416_a9_mux_data = { 581 .offset = 0, 582 .shift = 0, 583 .width = 2, 584 }; 585 static struct clkgen_mux_data stih407_a9_mux_data = { 586 .offset = 0x1a4, 587 .shift = 0, 588 .width = 2, 589 }; 590 591 static const struct of_device_id mux_of_match[] = { 592 { 593 .compatible = "st,stih416-clkgenc-vcc-hd", 594 .data = &clkgen_mux_c_vcc_hd_416, 595 }, 596 { 597 .compatible = "st,stih416-clkgenf-vcc-fvdp", 598 .data = &clkgen_mux_f_vcc_fvdp_416, 599 }, 600 { 601 .compatible = "st,stih416-clkgenf-vcc-hva", 602 .data = &clkgen_mux_f_vcc_hva_416, 603 }, 604 { 605 .compatible = "st,stih416-clkgenf-vcc-hd", 606 .data = &clkgen_mux_f_vcc_hd_416, 607 }, 608 { 609 .compatible = "st,stih416-clkgenf-vcc-sd", 610 .data = &clkgen_mux_c_vcc_sd_416, 611 }, 612 { 613 .compatible = "st,stih415-clkgen-a9-mux", 614 .data = &stih415_a9_mux_data, 615 }, 616 { 617 .compatible = "st,stih416-clkgen-a9-mux", 618 .data = &stih416_a9_mux_data, 619 }, 620 { 621 .compatible = "st,stih407-clkgen-a9-mux", 622 .data = &stih407_a9_mux_data, 623 }, 624 {} 625 }; 626 627 static void __init st_of_clkgen_mux_setup(struct device_node *np) 628 { 629 const struct of_device_id *match; 630 struct clk *clk; 631 void __iomem *reg; 632 const char **parents; 633 int num_parents; 634 const struct clkgen_mux_data *data; 635 636 match = of_match_node(mux_of_match, np); 637 if (!match) { 638 pr_err("%s: No matching data\n", __func__); 639 return; 640 } 641 642 data = match->data; 643 644 reg = of_iomap(np, 0); 645 if (!reg) { 646 pr_err("%s: Failed to get base address\n", __func__); 647 return; 648 } 649 650 parents = clkgen_mux_get_parents(np, &num_parents); 651 if (IS_ERR(parents)) { 652 pr_err("%s: Failed to get parents (%ld)\n", 653 __func__, PTR_ERR(parents)); 654 goto err_parents; 655 } 656 657 clk = clk_register_mux(NULL, np->name, parents, num_parents, 658 data->clk_flags | CLK_SET_RATE_PARENT, 659 reg + data->offset, 660 data->shift, data->width, data->mux_flags, 661 data->lock); 662 if (IS_ERR(clk)) 663 goto err; 664 665 pr_debug("%s: parent %s rate %u\n", 666 __clk_get_name(clk), 667 __clk_get_name(clk_get_parent(clk)), 668 (unsigned int)clk_get_rate(clk)); 669 670 kfree(parents); 671 of_clk_add_provider(np, of_clk_src_simple_get, clk); 672 return; 673 674 err: 675 kfree(parents); 676 err_parents: 677 iounmap(reg); 678 } 679 CLK_OF_DECLARE(clkgen_mux, "st,clkgen-mux", st_of_clkgen_mux_setup); 680 681 #define VCC_MAX_CHANNELS 16 682 683 #define VCC_GATE_OFFSET 0x0 684 #define VCC_MUX_OFFSET 0x4 685 #define VCC_DIV_OFFSET 0x8 686 687 struct clkgen_vcc_data { 688 spinlock_t *lock; 689 unsigned long clk_flags; 690 }; 691 692 static struct clkgen_vcc_data st_clkgenc_vcc_416 = { 693 .clk_flags = CLK_SET_RATE_PARENT, 694 }; 695 696 static struct clkgen_vcc_data st_clkgenf_vcc_416 = { 697 .lock = &clkgenf_lock, 698 }; 699 700 static const struct of_device_id vcc_of_match[] = { 701 { .compatible = "st,stih416-clkgenc", .data = &st_clkgenc_vcc_416 }, 702 { .compatible = "st,stih416-clkgenf", .data = &st_clkgenf_vcc_416 }, 703 {} 704 }; 705 706 static void __init st_of_clkgen_vcc_setup(struct device_node *np) 707 { 708 const struct of_device_id *match; 709 void __iomem *reg; 710 const char **parents; 711 int num_parents, i; 712 struct clk_onecell_data *clk_data; 713 const struct clkgen_vcc_data *data; 714 715 match = of_match_node(vcc_of_match, np); 716 if (WARN_ON(!match)) 717 return; 718 data = match->data; 719 720 reg = of_iomap(np, 0); 721 if (!reg) 722 return; 723 724 parents = clkgen_mux_get_parents(np, &num_parents); 725 if (IS_ERR(parents)) 726 goto err_parents; 727 728 clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL); 729 if (!clk_data) 730 goto err_alloc; 731 732 clk_data->clk_num = VCC_MAX_CHANNELS; 733 clk_data->clks = kcalloc(clk_data->clk_num, sizeof(struct clk *), 734 GFP_KERNEL); 735 736 if (!clk_data->clks) 737 goto err_alloc_clks; 738 739 for (i = 0; i < clk_data->clk_num; i++) { 740 struct clk *clk; 741 const char *clk_name; 742 struct clk_gate *gate; 743 struct clk_divider *div; 744 struct clk_mux *mux; 745 746 if (of_property_read_string_index(np, "clock-output-names", 747 i, &clk_name)) 748 break; 749 750 /* 751 * If we read an empty clock name then the output is unused 752 */ 753 if (*clk_name == '\0') 754 continue; 755 756 gate = kzalloc(sizeof(*gate), GFP_KERNEL); 757 if (!gate) 758 goto err; 759 760 div = kzalloc(sizeof(*div), GFP_KERNEL); 761 if (!div) { 762 kfree(gate); 763 goto err; 764 } 765 766 mux = kzalloc(sizeof(*mux), GFP_KERNEL); 767 if (!mux) { 768 kfree(gate); 769 kfree(div); 770 goto err; 771 } 772 773 gate->reg = reg + VCC_GATE_OFFSET; 774 gate->bit_idx = i; 775 gate->flags = CLK_GATE_SET_TO_DISABLE; 776 gate->lock = data->lock; 777 778 div->reg = reg + VCC_DIV_OFFSET; 779 div->shift = 2 * i; 780 div->width = 2; 781 div->flags = CLK_DIVIDER_POWER_OF_TWO | 782 CLK_DIVIDER_ROUND_CLOSEST; 783 784 mux->reg = reg + VCC_MUX_OFFSET; 785 mux->shift = 2 * i; 786 mux->mask = 0x3; 787 788 clk = clk_register_composite(NULL, clk_name, parents, 789 num_parents, 790 &mux->hw, &clk_mux_ops, 791 &div->hw, &clk_divider_ops, 792 &gate->hw, &clk_gate_ops, 793 data->clk_flags | 794 CLK_GET_RATE_NOCACHE); 795 if (IS_ERR(clk)) { 796 kfree(gate); 797 kfree(div); 798 kfree(mux); 799 goto err; 800 } 801 802 pr_debug("%s: parent %s rate %u\n", 803 __clk_get_name(clk), 804 __clk_get_name(clk_get_parent(clk)), 805 (unsigned int)clk_get_rate(clk)); 806 807 clk_data->clks[i] = clk; 808 } 809 810 kfree(parents); 811 812 of_clk_add_provider(np, of_clk_src_onecell_get, clk_data); 813 return; 814 815 err: 816 for (i = 0; i < clk_data->clk_num; i++) { 817 struct clk_composite *composite; 818 819 if (!clk_data->clks[i]) 820 continue; 821 822 composite = container_of(__clk_get_hw(clk_data->clks[i]), 823 struct clk_composite, hw); 824 kfree(container_of(composite->gate_hw, struct clk_gate, hw)); 825 kfree(container_of(composite->rate_hw, struct clk_divider, hw)); 826 kfree(container_of(composite->mux_hw, struct clk_mux, hw)); 827 } 828 829 kfree(clk_data->clks); 830 err_alloc_clks: 831 kfree(clk_data); 832 err_alloc: 833 kfree(parents); 834 err_parents: 835 iounmap(reg); 836 } 837 CLK_OF_DECLARE(clkgen_vcc, "st,clkgen-vcc", st_of_clkgen_vcc_setup); 838