1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2020 Intel Corporation. 4 * Zhu YiXin <yixin.zhu@intel.com> 5 * Rahul Tanwar <rahul.tanwar@intel.com> 6 */ 7 #include <linux/clk-provider.h> 8 #include <linux/device.h> 9 #include <linux/of.h> 10 11 #include "clk-cgu.h" 12 13 #define GATE_HW_REG_STAT(reg) ((reg) + 0x0) 14 #define GATE_HW_REG_EN(reg) ((reg) + 0x4) 15 #define GATE_HW_REG_DIS(reg) ((reg) + 0x8) 16 #define MAX_DDIV_REG 8 17 #define MAX_DIVIDER_VAL 64 18 19 #define to_lgm_clk_mux(_hw) container_of(_hw, struct lgm_clk_mux, hw) 20 #define to_lgm_clk_divider(_hw) container_of(_hw, struct lgm_clk_divider, hw) 21 #define to_lgm_clk_gate(_hw) container_of(_hw, struct lgm_clk_gate, hw) 22 #define to_lgm_clk_ddiv(_hw) container_of(_hw, struct lgm_clk_ddiv, hw) 23 24 static struct clk_hw *lgm_clk_register_fixed(struct lgm_clk_provider *ctx, 25 const struct lgm_clk_branch *list) 26 { 27 unsigned long flags; 28 29 if (list->div_flags & CLOCK_FLAG_VAL_INIT) { 30 spin_lock_irqsave(&ctx->lock, flags); 31 lgm_set_clk_val(ctx->membase, list->div_off, list->div_shift, 32 list->div_width, list->div_val); 33 spin_unlock_irqrestore(&ctx->lock, flags); 34 } 35 36 return clk_hw_register_fixed_rate(NULL, list->name, 37 list->parent_data[0].name, 38 list->flags, list->mux_flags); 39 } 40 41 static u8 lgm_clk_mux_get_parent(struct clk_hw *hw) 42 { 43 struct lgm_clk_mux *mux = to_lgm_clk_mux(hw); 44 unsigned long flags; 45 u32 val; 46 47 spin_lock_irqsave(&mux->lock, flags); 48 if (mux->flags & MUX_CLK_SW) 49 val = mux->reg; 50 else 51 val = lgm_get_clk_val(mux->membase, mux->reg, mux->shift, 52 mux->width); 53 spin_unlock_irqrestore(&mux->lock, flags); 54 return clk_mux_val_to_index(hw, NULL, mux->flags, val); 55 } 56 57 static int lgm_clk_mux_set_parent(struct clk_hw *hw, u8 index) 58 { 59 struct lgm_clk_mux *mux = to_lgm_clk_mux(hw); 60 unsigned long flags; 61 u32 val; 62 63 val = clk_mux_index_to_val(NULL, mux->flags, index); 64 spin_lock_irqsave(&mux->lock, flags); 65 if (mux->flags & MUX_CLK_SW) 66 mux->reg = val; 67 else 68 lgm_set_clk_val(mux->membase, mux->reg, mux->shift, 69 mux->width, val); 70 spin_unlock_irqrestore(&mux->lock, flags); 71 72 return 0; 73 } 74 75 static int lgm_clk_mux_determine_rate(struct clk_hw *hw, 76 struct clk_rate_request *req) 77 { 78 struct lgm_clk_mux *mux = to_lgm_clk_mux(hw); 79 80 return clk_mux_determine_rate_flags(hw, req, mux->flags); 81 } 82 83 static const struct clk_ops lgm_clk_mux_ops = { 84 .get_parent = lgm_clk_mux_get_parent, 85 .set_parent = lgm_clk_mux_set_parent, 86 .determine_rate = lgm_clk_mux_determine_rate, 87 }; 88 89 static struct clk_hw * 90 lgm_clk_register_mux(struct lgm_clk_provider *ctx, 91 const struct lgm_clk_branch *list) 92 { 93 unsigned long flags, cflags = list->mux_flags; 94 struct device *dev = ctx->dev; 95 u8 shift = list->mux_shift; 96 u8 width = list->mux_width; 97 struct clk_init_data init = {}; 98 struct lgm_clk_mux *mux; 99 u32 reg = list->mux_off; 100 struct clk_hw *hw; 101 int ret; 102 103 mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL); 104 if (!mux) 105 return ERR_PTR(-ENOMEM); 106 107 init.name = list->name; 108 init.ops = &lgm_clk_mux_ops; 109 init.flags = list->flags; 110 init.parent_data = list->parent_data; 111 init.num_parents = list->num_parents; 112 113 mux->membase = ctx->membase; 114 mux->lock = ctx->lock; 115 mux->reg = reg; 116 mux->shift = shift; 117 mux->width = width; 118 mux->flags = cflags; 119 mux->hw.init = &init; 120 121 hw = &mux->hw; 122 ret = clk_hw_register(dev, hw); 123 if (ret) 124 return ERR_PTR(ret); 125 126 if (cflags & CLOCK_FLAG_VAL_INIT) { 127 spin_lock_irqsave(&mux->lock, flags); 128 lgm_set_clk_val(mux->membase, reg, shift, width, list->mux_val); 129 spin_unlock_irqrestore(&mux->lock, flags); 130 } 131 132 return hw; 133 } 134 135 static unsigned long 136 lgm_clk_divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) 137 { 138 struct lgm_clk_divider *divider = to_lgm_clk_divider(hw); 139 unsigned long flags; 140 unsigned int val; 141 142 spin_lock_irqsave(÷r->lock, flags); 143 val = lgm_get_clk_val(divider->membase, divider->reg, 144 divider->shift, divider->width); 145 spin_unlock_irqrestore(÷r->lock, flags); 146 147 return divider_recalc_rate(hw, parent_rate, val, divider->table, 148 divider->flags, divider->width); 149 } 150 151 static long 152 lgm_clk_divider_round_rate(struct clk_hw *hw, unsigned long rate, 153 unsigned long *prate) 154 { 155 struct lgm_clk_divider *divider = to_lgm_clk_divider(hw); 156 157 return divider_round_rate(hw, rate, prate, divider->table, 158 divider->width, divider->flags); 159 } 160 161 static int 162 lgm_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate, 163 unsigned long prate) 164 { 165 struct lgm_clk_divider *divider = to_lgm_clk_divider(hw); 166 unsigned long flags; 167 int value; 168 169 value = divider_get_val(rate, prate, divider->table, 170 divider->width, divider->flags); 171 if (value < 0) 172 return value; 173 174 spin_lock_irqsave(÷r->lock, flags); 175 lgm_set_clk_val(divider->membase, divider->reg, 176 divider->shift, divider->width, value); 177 spin_unlock_irqrestore(÷r->lock, flags); 178 179 return 0; 180 } 181 182 static int lgm_clk_divider_enable_disable(struct clk_hw *hw, int enable) 183 { 184 struct lgm_clk_divider *div = to_lgm_clk_divider(hw); 185 unsigned long flags; 186 187 spin_lock_irqsave(&div->lock, flags); 188 lgm_set_clk_val(div->membase, div->reg, div->shift_gate, 189 div->width_gate, enable); 190 spin_unlock_irqrestore(&div->lock, flags); 191 return 0; 192 } 193 194 static int lgm_clk_divider_enable(struct clk_hw *hw) 195 { 196 return lgm_clk_divider_enable_disable(hw, 1); 197 } 198 199 static void lgm_clk_divider_disable(struct clk_hw *hw) 200 { 201 lgm_clk_divider_enable_disable(hw, 0); 202 } 203 204 static const struct clk_ops lgm_clk_divider_ops = { 205 .recalc_rate = lgm_clk_divider_recalc_rate, 206 .round_rate = lgm_clk_divider_round_rate, 207 .set_rate = lgm_clk_divider_set_rate, 208 .enable = lgm_clk_divider_enable, 209 .disable = lgm_clk_divider_disable, 210 }; 211 212 static struct clk_hw * 213 lgm_clk_register_divider(struct lgm_clk_provider *ctx, 214 const struct lgm_clk_branch *list) 215 { 216 unsigned long flags, cflags = list->div_flags; 217 struct device *dev = ctx->dev; 218 struct lgm_clk_divider *div; 219 struct clk_init_data init = {}; 220 u8 shift = list->div_shift; 221 u8 width = list->div_width; 222 u8 shift_gate = list->div_shift_gate; 223 u8 width_gate = list->div_width_gate; 224 u32 reg = list->div_off; 225 struct clk_hw *hw; 226 int ret; 227 228 div = devm_kzalloc(dev, sizeof(*div), GFP_KERNEL); 229 if (!div) 230 return ERR_PTR(-ENOMEM); 231 232 init.name = list->name; 233 init.ops = &lgm_clk_divider_ops; 234 init.flags = list->flags; 235 init.parent_data = list->parent_data; 236 init.num_parents = 1; 237 238 div->membase = ctx->membase; 239 div->lock = ctx->lock; 240 div->reg = reg; 241 div->shift = shift; 242 div->width = width; 243 div->shift_gate = shift_gate; 244 div->width_gate = width_gate; 245 div->flags = cflags; 246 div->table = list->div_table; 247 div->hw.init = &init; 248 249 hw = &div->hw; 250 ret = clk_hw_register(dev, hw); 251 if (ret) 252 return ERR_PTR(ret); 253 254 if (cflags & CLOCK_FLAG_VAL_INIT) { 255 spin_lock_irqsave(&div->lock, flags); 256 lgm_set_clk_val(div->membase, reg, shift, width, list->div_val); 257 spin_unlock_irqrestore(&div->lock, flags); 258 } 259 260 return hw; 261 } 262 263 static struct clk_hw * 264 lgm_clk_register_fixed_factor(struct lgm_clk_provider *ctx, 265 const struct lgm_clk_branch *list) 266 { 267 unsigned long flags; 268 struct clk_hw *hw; 269 270 hw = clk_hw_register_fixed_factor(ctx->dev, list->name, 271 list->parent_data[0].name, list->flags, 272 list->mult, list->div); 273 if (IS_ERR(hw)) 274 return ERR_CAST(hw); 275 276 if (list->div_flags & CLOCK_FLAG_VAL_INIT) { 277 spin_lock_irqsave(&ctx->lock, flags); 278 lgm_set_clk_val(ctx->membase, list->div_off, list->div_shift, 279 list->div_width, list->div_val); 280 spin_unlock_irqrestore(&ctx->lock, flags); 281 } 282 283 return hw; 284 } 285 286 static int lgm_clk_gate_enable(struct clk_hw *hw) 287 { 288 struct lgm_clk_gate *gate = to_lgm_clk_gate(hw); 289 unsigned long flags; 290 unsigned int reg; 291 292 spin_lock_irqsave(&gate->lock, flags); 293 reg = GATE_HW_REG_EN(gate->reg); 294 lgm_set_clk_val(gate->membase, reg, gate->shift, 1, 1); 295 spin_unlock_irqrestore(&gate->lock, flags); 296 297 return 0; 298 } 299 300 static void lgm_clk_gate_disable(struct clk_hw *hw) 301 { 302 struct lgm_clk_gate *gate = to_lgm_clk_gate(hw); 303 unsigned long flags; 304 unsigned int reg; 305 306 spin_lock_irqsave(&gate->lock, flags); 307 reg = GATE_HW_REG_DIS(gate->reg); 308 lgm_set_clk_val(gate->membase, reg, gate->shift, 1, 1); 309 spin_unlock_irqrestore(&gate->lock, flags); 310 } 311 312 static int lgm_clk_gate_is_enabled(struct clk_hw *hw) 313 { 314 struct lgm_clk_gate *gate = to_lgm_clk_gate(hw); 315 unsigned int reg, ret; 316 unsigned long flags; 317 318 spin_lock_irqsave(&gate->lock, flags); 319 reg = GATE_HW_REG_STAT(gate->reg); 320 ret = lgm_get_clk_val(gate->membase, reg, gate->shift, 1); 321 spin_unlock_irqrestore(&gate->lock, flags); 322 323 return ret; 324 } 325 326 static const struct clk_ops lgm_clk_gate_ops = { 327 .enable = lgm_clk_gate_enable, 328 .disable = lgm_clk_gate_disable, 329 .is_enabled = lgm_clk_gate_is_enabled, 330 }; 331 332 static struct clk_hw * 333 lgm_clk_register_gate(struct lgm_clk_provider *ctx, 334 const struct lgm_clk_branch *list) 335 { 336 unsigned long flags, cflags = list->gate_flags; 337 const char *pname = list->parent_data[0].name; 338 struct device *dev = ctx->dev; 339 u8 shift = list->gate_shift; 340 struct clk_init_data init = {}; 341 struct lgm_clk_gate *gate; 342 u32 reg = list->gate_off; 343 struct clk_hw *hw; 344 int ret; 345 346 gate = devm_kzalloc(dev, sizeof(*gate), GFP_KERNEL); 347 if (!gate) 348 return ERR_PTR(-ENOMEM); 349 350 init.name = list->name; 351 init.ops = &lgm_clk_gate_ops; 352 init.flags = list->flags; 353 init.parent_names = pname ? &pname : NULL; 354 init.num_parents = pname ? 1 : 0; 355 356 gate->membase = ctx->membase; 357 gate->lock = ctx->lock; 358 gate->reg = reg; 359 gate->shift = shift; 360 gate->flags = cflags; 361 gate->hw.init = &init; 362 363 hw = &gate->hw; 364 ret = clk_hw_register(dev, hw); 365 if (ret) 366 return ERR_PTR(ret); 367 368 if (cflags & CLOCK_FLAG_VAL_INIT) { 369 spin_lock_irqsave(&gate->lock, flags); 370 lgm_set_clk_val(gate->membase, reg, shift, 1, list->gate_val); 371 spin_unlock_irqrestore(&gate->lock, flags); 372 } 373 374 return hw; 375 } 376 377 int lgm_clk_register_branches(struct lgm_clk_provider *ctx, 378 const struct lgm_clk_branch *list, 379 unsigned int nr_clk) 380 { 381 struct clk_hw *hw; 382 unsigned int idx; 383 384 for (idx = 0; idx < nr_clk; idx++, list++) { 385 switch (list->type) { 386 case CLK_TYPE_FIXED: 387 hw = lgm_clk_register_fixed(ctx, list); 388 break; 389 case CLK_TYPE_MUX: 390 hw = lgm_clk_register_mux(ctx, list); 391 break; 392 case CLK_TYPE_DIVIDER: 393 hw = lgm_clk_register_divider(ctx, list); 394 break; 395 case CLK_TYPE_FIXED_FACTOR: 396 hw = lgm_clk_register_fixed_factor(ctx, list); 397 break; 398 case CLK_TYPE_GATE: 399 hw = lgm_clk_register_gate(ctx, list); 400 break; 401 default: 402 dev_err(ctx->dev, "invalid clk type\n"); 403 return -EINVAL; 404 } 405 406 if (IS_ERR(hw)) { 407 dev_err(ctx->dev, 408 "register clk: %s, type: %u failed!\n", 409 list->name, list->type); 410 return -EIO; 411 } 412 ctx->clk_data.hws[list->id] = hw; 413 } 414 415 return 0; 416 } 417 418 static unsigned long 419 lgm_clk_ddiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) 420 { 421 struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw); 422 unsigned int div0, div1, exdiv; 423 unsigned long flags; 424 u64 prate; 425 426 spin_lock_irqsave(&ddiv->lock, flags); 427 div0 = lgm_get_clk_val(ddiv->membase, ddiv->reg, 428 ddiv->shift0, ddiv->width0) + 1; 429 div1 = lgm_get_clk_val(ddiv->membase, ddiv->reg, 430 ddiv->shift1, ddiv->width1) + 1; 431 exdiv = lgm_get_clk_val(ddiv->membase, ddiv->reg, 432 ddiv->shift2, ddiv->width2); 433 spin_unlock_irqrestore(&ddiv->lock, flags); 434 435 prate = (u64)parent_rate; 436 do_div(prate, div0); 437 do_div(prate, div1); 438 439 if (exdiv) { 440 do_div(prate, ddiv->div); 441 prate *= ddiv->mult; 442 } 443 444 return prate; 445 } 446 447 static int lgm_clk_ddiv_enable(struct clk_hw *hw) 448 { 449 struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw); 450 unsigned long flags; 451 452 spin_lock_irqsave(&ddiv->lock, flags); 453 lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift_gate, 454 ddiv->width_gate, 1); 455 spin_unlock_irqrestore(&ddiv->lock, flags); 456 return 0; 457 } 458 459 static void lgm_clk_ddiv_disable(struct clk_hw *hw) 460 { 461 struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw); 462 unsigned long flags; 463 464 spin_lock_irqsave(&ddiv->lock, flags); 465 lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift_gate, 466 ddiv->width_gate, 0); 467 spin_unlock_irqrestore(&ddiv->lock, flags); 468 } 469 470 static int 471 lgm_clk_get_ddiv_val(u32 div, u32 *ddiv1, u32 *ddiv2) 472 { 473 u32 idx, temp; 474 475 *ddiv1 = 1; 476 *ddiv2 = 1; 477 478 if (div > MAX_DIVIDER_VAL) 479 div = MAX_DIVIDER_VAL; 480 481 if (div > 1) { 482 for (idx = 2; idx <= MAX_DDIV_REG; idx++) { 483 temp = DIV_ROUND_UP_ULL((u64)div, idx); 484 if (div % idx == 0 && temp <= MAX_DDIV_REG) 485 break; 486 } 487 488 if (idx > MAX_DDIV_REG) 489 return -EINVAL; 490 491 *ddiv1 = temp; 492 *ddiv2 = idx; 493 } 494 495 return 0; 496 } 497 498 static int 499 lgm_clk_ddiv_set_rate(struct clk_hw *hw, unsigned long rate, 500 unsigned long prate) 501 { 502 struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw); 503 u32 div, ddiv1, ddiv2; 504 unsigned long flags; 505 506 div = DIV_ROUND_CLOSEST_ULL((u64)prate, rate); 507 508 spin_lock_irqsave(&ddiv->lock, flags); 509 if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) { 510 div = DIV_ROUND_CLOSEST_ULL((u64)div, 5); 511 div = div * 2; 512 } 513 514 if (div <= 0) { 515 spin_unlock_irqrestore(&ddiv->lock, flags); 516 return -EINVAL; 517 } 518 519 if (lgm_clk_get_ddiv_val(div, &ddiv1, &ddiv2)) { 520 spin_unlock_irqrestore(&ddiv->lock, flags); 521 return -EINVAL; 522 } 523 524 lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift0, ddiv->width0, 525 ddiv1 - 1); 526 527 lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift1, ddiv->width1, 528 ddiv2 - 1); 529 spin_unlock_irqrestore(&ddiv->lock, flags); 530 531 return 0; 532 } 533 534 static long 535 lgm_clk_ddiv_round_rate(struct clk_hw *hw, unsigned long rate, 536 unsigned long *prate) 537 { 538 struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw); 539 u32 div, ddiv1, ddiv2; 540 unsigned long flags; 541 u64 rate64; 542 543 div = DIV_ROUND_CLOSEST_ULL((u64)*prate, rate); 544 545 /* if predivide bit is enabled, modify div by factor of 2.5 */ 546 spin_lock_irqsave(&ddiv->lock, flags); 547 if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) { 548 div = div * 2; 549 div = DIV_ROUND_CLOSEST_ULL((u64)div, 5); 550 } 551 552 if (div <= 0) { 553 spin_unlock_irqrestore(&ddiv->lock, flags); 554 return *prate; 555 } 556 557 if (lgm_clk_get_ddiv_val(div, &ddiv1, &ddiv2) != 0) { 558 if (lgm_clk_get_ddiv_val(div + 1, &ddiv1, &ddiv2) != 0) { 559 spin_unlock_irqrestore(&ddiv->lock, flags); 560 return -EINVAL; 561 } 562 } 563 564 rate64 = *prate; 565 do_div(rate64, ddiv1); 566 do_div(rate64, ddiv2); 567 568 /* if predivide bit is enabled, modify rounded rate by factor of 2.5 */ 569 if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) { 570 rate64 = rate64 * 2; 571 rate64 = DIV_ROUND_CLOSEST_ULL(rate64, 5); 572 } 573 spin_unlock_irqrestore(&ddiv->lock, flags); 574 575 return rate64; 576 } 577 578 static const struct clk_ops lgm_clk_ddiv_ops = { 579 .recalc_rate = lgm_clk_ddiv_recalc_rate, 580 .enable = lgm_clk_ddiv_enable, 581 .disable = lgm_clk_ddiv_disable, 582 .set_rate = lgm_clk_ddiv_set_rate, 583 .round_rate = lgm_clk_ddiv_round_rate, 584 }; 585 586 int lgm_clk_register_ddiv(struct lgm_clk_provider *ctx, 587 const struct lgm_clk_ddiv_data *list, 588 unsigned int nr_clk) 589 { 590 struct device *dev = ctx->dev; 591 struct clk_init_data init = {}; 592 struct lgm_clk_ddiv *ddiv; 593 struct clk_hw *hw; 594 unsigned int idx; 595 int ret; 596 597 for (idx = 0; idx < nr_clk; idx++, list++) { 598 ddiv = NULL; 599 ddiv = devm_kzalloc(dev, sizeof(*ddiv), GFP_KERNEL); 600 if (!ddiv) 601 return -ENOMEM; 602 603 memset(&init, 0, sizeof(init)); 604 init.name = list->name; 605 init.ops = &lgm_clk_ddiv_ops; 606 init.flags = list->flags; 607 init.parent_data = list->parent_data; 608 init.num_parents = 1; 609 610 ddiv->membase = ctx->membase; 611 ddiv->lock = ctx->lock; 612 ddiv->reg = list->reg; 613 ddiv->shift0 = list->shift0; 614 ddiv->width0 = list->width0; 615 ddiv->shift1 = list->shift1; 616 ddiv->width1 = list->width1; 617 ddiv->shift_gate = list->shift_gate; 618 ddiv->width_gate = list->width_gate; 619 ddiv->shift2 = list->ex_shift; 620 ddiv->width2 = list->ex_width; 621 ddiv->flags = list->div_flags; 622 ddiv->mult = 2; 623 ddiv->div = 5; 624 ddiv->hw.init = &init; 625 626 hw = &ddiv->hw; 627 ret = clk_hw_register(dev, hw); 628 if (ret) { 629 dev_err(dev, "register clk: %s failed!\n", list->name); 630 return ret; 631 } 632 ctx->clk_data.hws[list->id] = hw; 633 } 634 635 return 0; 636 } 637