1 /* 2 * clkgen-mux.c: ST GEN-MUX Clock driver 3 * 4 * Copyright (C) 2014 STMicroelectronics (R&D) Limited 5 * 6 * Authors: Stephen Gallimore <stephen.gallimore@st.com> 7 * Pankaj Dev <pankaj.dev@st.com> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License as published by 11 * the Free Software Foundation; either version 2 of the License, or 12 * (at your option) any later version. 13 * 14 */ 15 16 #include <linux/slab.h> 17 #include <linux/of_address.h> 18 #include <linux/clk-provider.h> 19 20 static DEFINE_SPINLOCK(clkgena_divmux_lock); 21 static DEFINE_SPINLOCK(clkgenf_lock); 22 23 static const char ** __init clkgen_mux_get_parents(struct device_node *np, 24 int *num_parents) 25 { 26 const char **parents; 27 int nparents, i; 28 29 nparents = of_count_phandle_with_args(np, "clocks", "#clock-cells"); 30 if (WARN_ON(nparents <= 0)) 31 return ERR_PTR(-EINVAL); 32 33 parents = kzalloc(nparents * sizeof(const char *), GFP_KERNEL); 34 if (!parents) 35 return ERR_PTR(-ENOMEM); 36 37 for (i = 0; i < nparents; i++) 38 parents[i] = of_clk_get_parent_name(np, i); 39 40 *num_parents = nparents; 41 return parents; 42 } 43 44 /** 45 * DOC: Clock mux with a programmable divider on each of its three inputs. 46 * The mux has an input setting which effectively gates its output. 47 * 48 * Traits of this clock: 49 * prepare - clk_(un)prepare only ensures parent is (un)prepared 50 * enable - clk_enable and clk_disable are functional & control gating 51 * rate - set rate is supported 52 * parent - set/get parent 53 */ 54 55 #define NUM_INPUTS 3 56 57 struct clkgena_divmux { 58 struct clk_hw hw; 59 /* Subclassed mux and divider structures */ 60 struct clk_mux mux; 61 struct clk_divider div[NUM_INPUTS]; 62 /* Enable/running feedback register bits for each input */ 63 void __iomem *feedback_reg[NUM_INPUTS]; 64 int feedback_bit_idx; 65 66 u8 muxsel; 67 }; 68 69 #define to_clkgena_divmux(_hw) container_of(_hw, struct clkgena_divmux, hw) 70 71 struct clkgena_divmux_data { 72 int num_outputs; 73 int mux_offset; 74 int mux_offset2; 75 int mux_start_bit; 76 int div_offsets[NUM_INPUTS]; 77 int fb_offsets[NUM_INPUTS]; 78 int fb_start_bit_idx; 79 }; 80 81 #define CKGAX_CLKOPSRC_SWITCH_OFF 0x3 82 83 static int clkgena_divmux_is_running(struct clkgena_divmux *mux) 84 { 85 u32 regval = readl(mux->feedback_reg[mux->muxsel]); 86 u32 running = regval & BIT(mux->feedback_bit_idx); 87 return !!running; 88 } 89 90 static int clkgena_divmux_enable(struct clk_hw *hw) 91 { 92 struct clkgena_divmux *genamux = to_clkgena_divmux(hw); 93 struct clk_hw *mux_hw = &genamux->mux.hw; 94 unsigned long timeout; 95 int ret = 0; 96 97 __clk_hw_set_clk(mux_hw, hw); 98 99 ret = clk_mux_ops.set_parent(mux_hw, genamux->muxsel); 100 if (ret) 101 return ret; 102 103 timeout = jiffies + msecs_to_jiffies(10); 104 105 while (!clkgena_divmux_is_running(genamux)) { 106 if (time_after(jiffies, timeout)) 107 return -ETIMEDOUT; 108 cpu_relax(); 109 } 110 111 return 0; 112 } 113 114 static void clkgena_divmux_disable(struct clk_hw *hw) 115 { 116 struct clkgena_divmux *genamux = to_clkgena_divmux(hw); 117 struct clk_hw *mux_hw = &genamux->mux.hw; 118 119 __clk_hw_set_clk(mux_hw, hw); 120 121 clk_mux_ops.set_parent(mux_hw, CKGAX_CLKOPSRC_SWITCH_OFF); 122 } 123 124 static int clkgena_divmux_is_enabled(struct clk_hw *hw) 125 { 126 struct clkgena_divmux *genamux = to_clkgena_divmux(hw); 127 struct clk_hw *mux_hw = &genamux->mux.hw; 128 129 __clk_hw_set_clk(mux_hw, hw); 130 131 return (s8)clk_mux_ops.get_parent(mux_hw) > 0; 132 } 133 134 u8 clkgena_divmux_get_parent(struct clk_hw *hw) 135 { 136 struct clkgena_divmux *genamux = to_clkgena_divmux(hw); 137 struct clk_hw *mux_hw = &genamux->mux.hw; 138 139 __clk_hw_set_clk(mux_hw, hw); 140 141 genamux->muxsel = clk_mux_ops.get_parent(mux_hw); 142 if ((s8)genamux->muxsel < 0) { 143 pr_debug("%s: %s: Invalid parent, setting to default.\n", 144 __func__, __clk_get_name(hw->clk)); 145 genamux->muxsel = 0; 146 } 147 148 return genamux->muxsel; 149 } 150 151 static int clkgena_divmux_set_parent(struct clk_hw *hw, u8 index) 152 { 153 struct clkgena_divmux *genamux = to_clkgena_divmux(hw); 154 155 if (index >= CKGAX_CLKOPSRC_SWITCH_OFF) 156 return -EINVAL; 157 158 genamux->muxsel = index; 159 160 /* 161 * If the mux is already enabled, call enable directly to set the 162 * new mux position and wait for it to start running again. Otherwise 163 * do nothing. 164 */ 165 if (clkgena_divmux_is_enabled(hw)) 166 clkgena_divmux_enable(hw); 167 168 return 0; 169 } 170 171 unsigned long clkgena_divmux_recalc_rate(struct clk_hw *hw, 172 unsigned long parent_rate) 173 { 174 struct clkgena_divmux *genamux = to_clkgena_divmux(hw); 175 struct clk_hw *div_hw = &genamux->div[genamux->muxsel].hw; 176 177 __clk_hw_set_clk(div_hw, hw); 178 179 return clk_divider_ops.recalc_rate(div_hw, parent_rate); 180 } 181 182 static int clkgena_divmux_set_rate(struct clk_hw *hw, unsigned long rate, 183 unsigned long parent_rate) 184 { 185 struct clkgena_divmux *genamux = to_clkgena_divmux(hw); 186 struct clk_hw *div_hw = &genamux->div[genamux->muxsel].hw; 187 188 __clk_hw_set_clk(div_hw, hw); 189 190 return clk_divider_ops.set_rate(div_hw, rate, parent_rate); 191 } 192 193 static long clkgena_divmux_round_rate(struct clk_hw *hw, unsigned long rate, 194 unsigned long *prate) 195 { 196 struct clkgena_divmux *genamux = to_clkgena_divmux(hw); 197 struct clk_hw *div_hw = &genamux->div[genamux->muxsel].hw; 198 199 __clk_hw_set_clk(div_hw, hw); 200 201 return clk_divider_ops.round_rate(div_hw, rate, prate); 202 } 203 204 static const struct clk_ops clkgena_divmux_ops = { 205 .enable = clkgena_divmux_enable, 206 .disable = clkgena_divmux_disable, 207 .is_enabled = clkgena_divmux_is_enabled, 208 .get_parent = clkgena_divmux_get_parent, 209 .set_parent = clkgena_divmux_set_parent, 210 .round_rate = clkgena_divmux_round_rate, 211 .recalc_rate = clkgena_divmux_recalc_rate, 212 .set_rate = clkgena_divmux_set_rate, 213 }; 214 215 /** 216 * clk_register_genamux - register a genamux clock with the clock framework 217 */ 218 struct clk *clk_register_genamux(const char *name, 219 const char **parent_names, u8 num_parents, 220 void __iomem *reg, 221 const struct clkgena_divmux_data *muxdata, 222 u32 idx) 223 { 224 /* 225 * Fixed constants across all ClockgenA variants 226 */ 227 const int mux_width = 2; 228 const int divider_width = 5; 229 struct clkgena_divmux *genamux; 230 struct clk *clk; 231 struct clk_init_data init; 232 int i; 233 234 genamux = kzalloc(sizeof(*genamux), GFP_KERNEL); 235 if (!genamux) 236 return ERR_PTR(-ENOMEM); 237 238 init.name = name; 239 init.ops = &clkgena_divmux_ops; 240 init.flags = CLK_IS_BASIC; 241 init.parent_names = parent_names; 242 init.num_parents = num_parents; 243 244 genamux->mux.lock = &clkgena_divmux_lock; 245 genamux->mux.mask = BIT(mux_width) - 1; 246 genamux->mux.shift = muxdata->mux_start_bit + (idx * mux_width); 247 if (genamux->mux.shift > 31) { 248 /* 249 * We have spilled into the second mux register so 250 * adjust the register address and the bit shift accordingly 251 */ 252 genamux->mux.reg = reg + muxdata->mux_offset2; 253 genamux->mux.shift -= 32; 254 } else { 255 genamux->mux.reg = reg + muxdata->mux_offset; 256 } 257 258 for (i = 0; i < NUM_INPUTS; i++) { 259 /* 260 * Divider config for each input 261 */ 262 void __iomem *divbase = reg + muxdata->div_offsets[i]; 263 genamux->div[i].width = divider_width; 264 genamux->div[i].reg = divbase + (idx * sizeof(u32)); 265 266 /* 267 * Mux enabled/running feedback register for each input. 268 */ 269 genamux->feedback_reg[i] = reg + muxdata->fb_offsets[i]; 270 } 271 272 genamux->feedback_bit_idx = muxdata->fb_start_bit_idx + idx; 273 genamux->hw.init = &init; 274 275 clk = clk_register(NULL, &genamux->hw); 276 if (IS_ERR(clk)) { 277 kfree(genamux); 278 goto err; 279 } 280 281 pr_debug("%s: parent %s rate %lu\n", 282 __clk_get_name(clk), 283 __clk_get_name(clk_get_parent(clk)), 284 clk_get_rate(clk)); 285 err: 286 return clk; 287 } 288 289 static struct clkgena_divmux_data st_divmux_c65hs = { 290 .num_outputs = 4, 291 .mux_offset = 0x14, 292 .mux_start_bit = 0, 293 .div_offsets = { 0x800, 0x900, 0xb00 }, 294 .fb_offsets = { 0x18, 0x1c, 0x20 }, 295 .fb_start_bit_idx = 0, 296 }; 297 298 static struct clkgena_divmux_data st_divmux_c65ls = { 299 .num_outputs = 14, 300 .mux_offset = 0x14, 301 .mux_offset2 = 0x24, 302 .mux_start_bit = 8, 303 .div_offsets = { 0x810, 0xa10, 0xb10 }, 304 .fb_offsets = { 0x18, 0x1c, 0x20 }, 305 .fb_start_bit_idx = 4, 306 }; 307 308 static struct clkgena_divmux_data st_divmux_c32odf0 = { 309 .num_outputs = 8, 310 .mux_offset = 0x1c, 311 .mux_start_bit = 0, 312 .div_offsets = { 0x800, 0x900, 0xa60 }, 313 .fb_offsets = { 0x2c, 0x24, 0x28 }, 314 .fb_start_bit_idx = 0, 315 }; 316 317 static struct clkgena_divmux_data st_divmux_c32odf1 = { 318 .num_outputs = 8, 319 .mux_offset = 0x1c, 320 .mux_start_bit = 16, 321 .div_offsets = { 0x820, 0x980, 0xa80 }, 322 .fb_offsets = { 0x2c, 0x24, 0x28 }, 323 .fb_start_bit_idx = 8, 324 }; 325 326 static struct clkgena_divmux_data st_divmux_c32odf2 = { 327 .num_outputs = 8, 328 .mux_offset = 0x20, 329 .mux_start_bit = 0, 330 .div_offsets = { 0x840, 0xa20, 0xb10 }, 331 .fb_offsets = { 0x2c, 0x24, 0x28 }, 332 .fb_start_bit_idx = 16, 333 }; 334 335 static struct clkgena_divmux_data st_divmux_c32odf3 = { 336 .num_outputs = 8, 337 .mux_offset = 0x20, 338 .mux_start_bit = 16, 339 .div_offsets = { 0x860, 0xa40, 0xb30 }, 340 .fb_offsets = { 0x2c, 0x24, 0x28 }, 341 .fb_start_bit_idx = 24, 342 }; 343 344 static const struct of_device_id clkgena_divmux_of_match[] = { 345 { 346 .compatible = "st,clkgena-divmux-c65-hs", 347 .data = &st_divmux_c65hs, 348 }, 349 { 350 .compatible = "st,clkgena-divmux-c65-ls", 351 .data = &st_divmux_c65ls, 352 }, 353 { 354 .compatible = "st,clkgena-divmux-c32-odf0", 355 .data = &st_divmux_c32odf0, 356 }, 357 { 358 .compatible = "st,clkgena-divmux-c32-odf1", 359 .data = &st_divmux_c32odf1, 360 }, 361 { 362 .compatible = "st,clkgena-divmux-c32-odf2", 363 .data = &st_divmux_c32odf2, 364 }, 365 { 366 .compatible = "st,clkgena-divmux-c32-odf3", 367 .data = &st_divmux_c32odf3, 368 }, 369 {} 370 }; 371 372 static void __iomem * __init clkgen_get_register_base( 373 struct device_node *np) 374 { 375 struct device_node *pnode; 376 void __iomem *reg = NULL; 377 378 pnode = of_get_parent(np); 379 if (!pnode) 380 return NULL; 381 382 reg = of_iomap(pnode, 0); 383 384 of_node_put(pnode); 385 return reg; 386 } 387 388 void __init st_of_clkgena_divmux_setup(struct device_node *np) 389 { 390 const struct of_device_id *match; 391 const struct clkgena_divmux_data *data; 392 struct clk_onecell_data *clk_data; 393 void __iomem *reg; 394 const char **parents; 395 int num_parents = 0, i; 396 397 match = of_match_node(clkgena_divmux_of_match, np); 398 if (WARN_ON(!match)) 399 return; 400 401 data = (struct clkgena_divmux_data *)match->data; 402 403 reg = clkgen_get_register_base(np); 404 if (!reg) 405 return; 406 407 parents = clkgen_mux_get_parents(np, &num_parents); 408 if (IS_ERR(parents)) 409 return; 410 411 clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL); 412 if (!clk_data) 413 goto err; 414 415 clk_data->clk_num = data->num_outputs; 416 clk_data->clks = kzalloc(clk_data->clk_num * sizeof(struct clk *), 417 GFP_KERNEL); 418 419 if (!clk_data->clks) 420 goto err; 421 422 for (i = 0; i < clk_data->clk_num; i++) { 423 struct clk *clk; 424 const char *clk_name; 425 426 if (of_property_read_string_index(np, "clock-output-names", 427 i, &clk_name)) 428 break; 429 430 /* 431 * If we read an empty clock name then the output is unused 432 */ 433 if (*clk_name == '\0') 434 continue; 435 436 clk = clk_register_genamux(clk_name, parents, num_parents, 437 reg, data, i); 438 439 if (IS_ERR(clk)) 440 goto err; 441 442 clk_data->clks[i] = clk; 443 } 444 445 kfree(parents); 446 447 of_clk_add_provider(np, of_clk_src_onecell_get, clk_data); 448 return; 449 err: 450 if (clk_data) 451 kfree(clk_data->clks); 452 453 kfree(clk_data); 454 kfree(parents); 455 } 456 CLK_OF_DECLARE(clkgenadivmux, "st,clkgena-divmux", st_of_clkgena_divmux_setup); 457 458 struct clkgena_prediv_data { 459 u32 offset; 460 u8 shift; 461 struct clk_div_table *table; 462 }; 463 464 static struct clk_div_table prediv_table16[] = { 465 { .val = 0, .div = 1 }, 466 { .val = 1, .div = 16 }, 467 { .div = 0 }, 468 }; 469 470 static struct clkgena_prediv_data prediv_c65_data = { 471 .offset = 0x4c, 472 .shift = 31, 473 .table = prediv_table16, 474 }; 475 476 static struct clkgena_prediv_data prediv_c32_data = { 477 .offset = 0x50, 478 .shift = 1, 479 .table = prediv_table16, 480 }; 481 482 static const struct of_device_id clkgena_prediv_of_match[] = { 483 { .compatible = "st,clkgena-prediv-c65", .data = &prediv_c65_data }, 484 { .compatible = "st,clkgena-prediv-c32", .data = &prediv_c32_data }, 485 {} 486 }; 487 488 void __init st_of_clkgena_prediv_setup(struct device_node *np) 489 { 490 const struct of_device_id *match; 491 void __iomem *reg; 492 const char *parent_name, *clk_name; 493 struct clk *clk; 494 struct clkgena_prediv_data *data; 495 496 match = of_match_node(clkgena_prediv_of_match, np); 497 if (!match) { 498 pr_err("%s: No matching data\n", __func__); 499 return; 500 } 501 502 data = (struct clkgena_prediv_data *)match->data; 503 504 reg = clkgen_get_register_base(np); 505 if (!reg) 506 return; 507 508 parent_name = of_clk_get_parent_name(np, 0); 509 if (!parent_name) 510 return; 511 512 if (of_property_read_string_index(np, "clock-output-names", 513 0, &clk_name)) 514 return; 515 516 clk = clk_register_divider_table(NULL, clk_name, parent_name, 0, 517 reg + data->offset, data->shift, 1, 518 0, data->table, NULL); 519 if (IS_ERR(clk)) 520 return; 521 522 of_clk_add_provider(np, of_clk_src_simple_get, clk); 523 pr_debug("%s: parent %s rate %u\n", 524 __clk_get_name(clk), 525 __clk_get_name(clk_get_parent(clk)), 526 (unsigned int)clk_get_rate(clk)); 527 528 return; 529 } 530 CLK_OF_DECLARE(clkgenaprediv, "st,clkgena-prediv", st_of_clkgena_prediv_setup); 531 532 struct clkgen_mux_data { 533 u32 offset; 534 u8 shift; 535 u8 width; 536 spinlock_t *lock; 537 unsigned long clk_flags; 538 u8 mux_flags; 539 }; 540 541 static struct clkgen_mux_data clkgen_mux_c_vcc_hd_416 = { 542 .offset = 0, 543 .shift = 0, 544 .width = 1, 545 }; 546 547 static struct clkgen_mux_data clkgen_mux_f_vcc_fvdp_416 = { 548 .offset = 0, 549 .shift = 0, 550 .width = 1, 551 }; 552 553 static struct clkgen_mux_data clkgen_mux_f_vcc_hva_416 = { 554 .offset = 0, 555 .shift = 0, 556 .width = 1, 557 }; 558 559 static struct clkgen_mux_data clkgen_mux_f_vcc_hd_416 = { 560 .offset = 0, 561 .shift = 16, 562 .width = 1, 563 .lock = &clkgenf_lock, 564 }; 565 566 static struct clkgen_mux_data clkgen_mux_c_vcc_sd_416 = { 567 .offset = 0, 568 .shift = 17, 569 .width = 1, 570 .lock = &clkgenf_lock, 571 }; 572 573 static struct clkgen_mux_data stih415_a9_mux_data = { 574 .offset = 0, 575 .shift = 1, 576 .width = 2, 577 }; 578 static struct clkgen_mux_data stih416_a9_mux_data = { 579 .offset = 0, 580 .shift = 0, 581 .width = 2, 582 }; 583 static struct clkgen_mux_data stih407_a9_mux_data = { 584 .offset = 0x1a4, 585 .shift = 1, 586 .width = 2, 587 }; 588 589 static const struct of_device_id mux_of_match[] = { 590 { 591 .compatible = "st,stih416-clkgenc-vcc-hd", 592 .data = &clkgen_mux_c_vcc_hd_416, 593 }, 594 { 595 .compatible = "st,stih416-clkgenf-vcc-fvdp", 596 .data = &clkgen_mux_f_vcc_fvdp_416, 597 }, 598 { 599 .compatible = "st,stih416-clkgenf-vcc-hva", 600 .data = &clkgen_mux_f_vcc_hva_416, 601 }, 602 { 603 .compatible = "st,stih416-clkgenf-vcc-hd", 604 .data = &clkgen_mux_f_vcc_hd_416, 605 }, 606 { 607 .compatible = "st,stih416-clkgenf-vcc-sd", 608 .data = &clkgen_mux_c_vcc_sd_416, 609 }, 610 { 611 .compatible = "st,stih415-clkgen-a9-mux", 612 .data = &stih415_a9_mux_data, 613 }, 614 { 615 .compatible = "st,stih416-clkgen-a9-mux", 616 .data = &stih416_a9_mux_data, 617 }, 618 { 619 .compatible = "st,stih407-clkgen-a9-mux", 620 .data = &stih407_a9_mux_data, 621 }, 622 {} 623 }; 624 625 void __init st_of_clkgen_mux_setup(struct device_node *np) 626 { 627 const struct of_device_id *match; 628 struct clk *clk; 629 void __iomem *reg; 630 const char **parents; 631 int num_parents; 632 struct clkgen_mux_data *data; 633 634 match = of_match_node(mux_of_match, np); 635 if (!match) { 636 pr_err("%s: No matching data\n", __func__); 637 return; 638 } 639 640 data = (struct clkgen_mux_data *)match->data; 641 642 reg = of_iomap(np, 0); 643 if (!reg) { 644 pr_err("%s: Failed to get base address\n", __func__); 645 return; 646 } 647 648 parents = clkgen_mux_get_parents(np, &num_parents); 649 if (IS_ERR(parents)) { 650 pr_err("%s: Failed to get parents (%ld)\n", 651 __func__, PTR_ERR(parents)); 652 return; 653 } 654 655 clk = clk_register_mux(NULL, np->name, parents, num_parents, 656 data->clk_flags | CLK_SET_RATE_PARENT, 657 reg + data->offset, 658 data->shift, data->width, data->mux_flags, 659 data->lock); 660 if (IS_ERR(clk)) 661 goto err; 662 663 pr_debug("%s: parent %s rate %u\n", 664 __clk_get_name(clk), 665 __clk_get_name(clk_get_parent(clk)), 666 (unsigned int)clk_get_rate(clk)); 667 668 of_clk_add_provider(np, of_clk_src_simple_get, clk); 669 670 err: 671 kfree(parents); 672 673 return; 674 } 675 CLK_OF_DECLARE(clkgen_mux, "st,clkgen-mux", st_of_clkgen_mux_setup); 676 677 #define VCC_MAX_CHANNELS 16 678 679 #define VCC_GATE_OFFSET 0x0 680 #define VCC_MUX_OFFSET 0x4 681 #define VCC_DIV_OFFSET 0x8 682 683 struct clkgen_vcc_data { 684 spinlock_t *lock; 685 unsigned long clk_flags; 686 }; 687 688 static struct clkgen_vcc_data st_clkgenc_vcc_416 = { 689 .clk_flags = CLK_SET_RATE_PARENT, 690 }; 691 692 static struct clkgen_vcc_data st_clkgenf_vcc_416 = { 693 .lock = &clkgenf_lock, 694 }; 695 696 static const struct of_device_id vcc_of_match[] = { 697 { .compatible = "st,stih416-clkgenc", .data = &st_clkgenc_vcc_416 }, 698 { .compatible = "st,stih416-clkgenf", .data = &st_clkgenf_vcc_416 }, 699 {} 700 }; 701 702 void __init st_of_clkgen_vcc_setup(struct device_node *np) 703 { 704 const struct of_device_id *match; 705 void __iomem *reg; 706 const char **parents; 707 int num_parents, i; 708 struct clk_onecell_data *clk_data; 709 struct clkgen_vcc_data *data; 710 711 match = of_match_node(vcc_of_match, np); 712 if (WARN_ON(!match)) 713 return; 714 data = (struct clkgen_vcc_data *)match->data; 715 716 reg = of_iomap(np, 0); 717 if (!reg) 718 return; 719 720 parents = clkgen_mux_get_parents(np, &num_parents); 721 if (IS_ERR(parents)) 722 return; 723 724 clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL); 725 if (!clk_data) 726 goto err; 727 728 clk_data->clk_num = VCC_MAX_CHANNELS; 729 clk_data->clks = kzalloc(clk_data->clk_num * sizeof(struct clk *), 730 GFP_KERNEL); 731 732 if (!clk_data->clks) 733 goto err; 734 735 for (i = 0; i < clk_data->clk_num; i++) { 736 struct clk *clk; 737 const char *clk_name; 738 struct clk_gate *gate; 739 struct clk_divider *div; 740 struct clk_mux *mux; 741 742 if (of_property_read_string_index(np, "clock-output-names", 743 i, &clk_name)) 744 break; 745 746 /* 747 * If we read an empty clock name then the output is unused 748 */ 749 if (*clk_name == '\0') 750 continue; 751 752 gate = kzalloc(sizeof(struct clk_gate), GFP_KERNEL); 753 if (!gate) 754 break; 755 756 div = kzalloc(sizeof(struct clk_divider), GFP_KERNEL); 757 if (!div) { 758 kfree(gate); 759 break; 760 } 761 762 mux = kzalloc(sizeof(struct clk_mux), GFP_KERNEL); 763 if (!mux) { 764 kfree(gate); 765 kfree(div); 766 break; 767 } 768 769 gate->reg = reg + VCC_GATE_OFFSET; 770 gate->bit_idx = i; 771 gate->flags = CLK_GATE_SET_TO_DISABLE; 772 gate->lock = data->lock; 773 774 div->reg = reg + VCC_DIV_OFFSET; 775 div->shift = 2 * i; 776 div->width = 2; 777 div->flags = CLK_DIVIDER_POWER_OF_TWO | 778 CLK_DIVIDER_ROUND_CLOSEST; 779 780 mux->reg = reg + VCC_MUX_OFFSET; 781 mux->shift = 2 * i; 782 mux->mask = 0x3; 783 784 clk = clk_register_composite(NULL, clk_name, parents, 785 num_parents, 786 &mux->hw, &clk_mux_ops, 787 &div->hw, &clk_divider_ops, 788 &gate->hw, &clk_gate_ops, 789 data->clk_flags); 790 if (IS_ERR(clk)) { 791 kfree(gate); 792 kfree(div); 793 kfree(mux); 794 goto err; 795 } 796 797 pr_debug("%s: parent %s rate %u\n", 798 __clk_get_name(clk), 799 __clk_get_name(clk_get_parent(clk)), 800 (unsigned int)clk_get_rate(clk)); 801 802 clk_data->clks[i] = clk; 803 } 804 805 kfree(parents); 806 807 of_clk_add_provider(np, of_clk_src_onecell_get, clk_data); 808 return; 809 810 err: 811 for (i = 0; i < clk_data->clk_num; i++) { 812 struct clk_composite *composite; 813 814 if (!clk_data->clks[i]) 815 continue; 816 817 composite = container_of(__clk_get_hw(clk_data->clks[i]), 818 struct clk_composite, hw); 819 kfree(container_of(composite->gate_hw, struct clk_gate, hw)); 820 kfree(container_of(composite->rate_hw, struct clk_divider, hw)); 821 kfree(container_of(composite->mux_hw, struct clk_mux, hw)); 822 } 823 824 if (clk_data) 825 kfree(clk_data->clks); 826 827 kfree(clk_data); 828 kfree(parents); 829 } 830 CLK_OF_DECLARE(clkgen_vcc, "st,clkgen-vcc", st_of_clkgen_vcc_setup); 831