1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * ti-sysc.c - Texas Instruments sysc interconnect target driver 4 */ 5 6 #include <linux/io.h> 7 #include <linux/clk.h> 8 #include <linux/clkdev.h> 9 #include <linux/delay.h> 10 #include <linux/list.h> 11 #include <linux/module.h> 12 #include <linux/platform_device.h> 13 #include <linux/pm_domain.h> 14 #include <linux/pm_runtime.h> 15 #include <linux/reset.h> 16 #include <linux/of_address.h> 17 #include <linux/of_platform.h> 18 #include <linux/slab.h> 19 #include <linux/sys_soc.h> 20 #include <linux/iopoll.h> 21 22 #include <linux/platform_data/ti-sysc.h> 23 24 #include <dt-bindings/bus/ti-sysc.h> 25 26 #define DIS_ISP BIT(2) 27 #define DIS_IVA BIT(1) 28 #define DIS_SGX BIT(0) 29 30 #define SOC_FLAG(match, flag) { .machine = match, .data = (void *)(flag), } 31 32 #define MAX_MODULE_SOFTRESET_WAIT 10000 33 34 enum sysc_soc { 35 SOC_UNKNOWN, 36 SOC_2420, 37 SOC_2430, 38 SOC_3430, 39 SOC_3630, 40 SOC_4430, 41 SOC_4460, 42 SOC_4470, 43 SOC_5430, 44 SOC_AM3, 45 SOC_AM4, 46 SOC_DRA7, 47 }; 48 49 struct sysc_address { 50 unsigned long base; 51 struct list_head node; 52 }; 53 54 struct sysc_soc_info { 55 unsigned long general_purpose:1; 56 enum sysc_soc soc; 57 struct mutex list_lock; /* disabled modules list lock */ 58 struct list_head disabled_modules; 59 }; 60 61 enum sysc_clocks { 62 SYSC_FCK, 63 SYSC_ICK, 64 SYSC_OPTFCK0, 65 SYSC_OPTFCK1, 66 SYSC_OPTFCK2, 67 SYSC_OPTFCK3, 68 SYSC_OPTFCK4, 69 SYSC_OPTFCK5, 70 SYSC_OPTFCK6, 71 SYSC_OPTFCK7, 72 SYSC_MAX_CLOCKS, 73 }; 74 75 static struct sysc_soc_info *sysc_soc; 76 static const char * const reg_names[] = { "rev", "sysc", "syss", }; 77 static const char * const clock_names[SYSC_MAX_CLOCKS] = { 78 "fck", "ick", "opt0", "opt1", "opt2", "opt3", "opt4", 79 "opt5", "opt6", "opt7", 80 }; 81 82 #define SYSC_IDLEMODE_MASK 3 83 #define SYSC_CLOCKACTIVITY_MASK 3 84 85 /** 86 * struct sysc - TI sysc interconnect target module registers and capabilities 87 * @dev: struct device pointer 88 * @module_pa: physical address of the interconnect target module 89 * @module_size: size of the interconnect target module 90 * @module_va: virtual address of the interconnect target module 91 * @offsets: register offsets from module base 92 * @mdata: ti-sysc to hwmod translation data for a module 93 * @clocks: clocks used by the interconnect target module 94 * @clock_roles: clock role names for the found clocks 95 * @nr_clocks: number of clocks used by the interconnect target module 96 * @rsts: resets used by the interconnect target module 97 * @legacy_mode: configured for legacy mode if set 98 * @cap: interconnect target module capabilities 99 * @cfg: interconnect target module configuration 100 * @cookie: data used by legacy platform callbacks 101 * @name: name if available 102 * @revision: interconnect target module revision 103 * @enabled: sysc runtime enabled status 104 * @needs_resume: runtime resume needed on resume from suspend 105 * @child_needs_resume: runtime resume needed for child on resume from suspend 106 * @disable_on_idle: status flag used for disabling modules with resets 107 * @idle_work: work structure used to perform delayed idle on a module 108 * @pre_reset_quirk: module specific pre-reset quirk 109 * @post_reset_quirk: module specific post-reset quirk 110 * @reset_done_quirk: module specific reset done quirk 111 * @module_enable_quirk: module specific enable quirk 112 * @module_disable_quirk: module specific disable quirk 113 * @module_unlock_quirk: module specific sysconfig unlock quirk 114 * @module_lock_quirk: module specific sysconfig lock quirk 115 */ 116 struct sysc { 117 struct device *dev; 118 u64 module_pa; 119 u32 module_size; 120 void __iomem *module_va; 121 int offsets[SYSC_MAX_REGS]; 122 struct ti_sysc_module_data *mdata; 123 struct clk **clocks; 124 const char **clock_roles; 125 int nr_clocks; 126 struct reset_control *rsts; 127 const char *legacy_mode; 128 const struct sysc_capabilities *cap; 129 struct sysc_config cfg; 130 struct ti_sysc_cookie cookie; 131 const char *name; 132 u32 revision; 133 unsigned int enabled:1; 134 unsigned int needs_resume:1; 135 unsigned int child_needs_resume:1; 136 struct delayed_work idle_work; 137 void (*pre_reset_quirk)(struct sysc *sysc); 138 void (*post_reset_quirk)(struct sysc *sysc); 139 void (*reset_done_quirk)(struct sysc *sysc); 140 void (*module_enable_quirk)(struct sysc *sysc); 141 void (*module_disable_quirk)(struct sysc *sysc); 142 void (*module_unlock_quirk)(struct sysc *sysc); 143 void (*module_lock_quirk)(struct sysc *sysc); 144 }; 145 146 static void sysc_parse_dts_quirks(struct sysc *ddata, struct device_node *np, 147 bool is_child); 148 149 static void sysc_write(struct sysc *ddata, int offset, u32 value) 150 { 151 if (ddata->cfg.quirks & SYSC_QUIRK_16BIT) { 152 writew_relaxed(value & 0xffff, ddata->module_va + offset); 153 154 /* Only i2c revision has LO and HI register with stride of 4 */ 155 if (ddata->offsets[SYSC_REVISION] >= 0 && 156 offset == ddata->offsets[SYSC_REVISION]) { 157 u16 hi = value >> 16; 158 159 writew_relaxed(hi, ddata->module_va + offset + 4); 160 } 161 162 return; 163 } 164 165 writel_relaxed(value, ddata->module_va + offset); 166 } 167 168 static u32 sysc_read(struct sysc *ddata, int offset) 169 { 170 if (ddata->cfg.quirks & SYSC_QUIRK_16BIT) { 171 u32 val; 172 173 val = readw_relaxed(ddata->module_va + offset); 174 175 /* Only i2c revision has LO and HI register with stride of 4 */ 176 if (ddata->offsets[SYSC_REVISION] >= 0 && 177 offset == ddata->offsets[SYSC_REVISION]) { 178 u16 tmp = readw_relaxed(ddata->module_va + offset + 4); 179 180 val |= tmp << 16; 181 } 182 183 return val; 184 } 185 186 return readl_relaxed(ddata->module_va + offset); 187 } 188 189 static bool sysc_opt_clks_needed(struct sysc *ddata) 190 { 191 return !!(ddata->cfg.quirks & SYSC_QUIRK_OPT_CLKS_NEEDED); 192 } 193 194 static u32 sysc_read_revision(struct sysc *ddata) 195 { 196 int offset = ddata->offsets[SYSC_REVISION]; 197 198 if (offset < 0) 199 return 0; 200 201 return sysc_read(ddata, offset); 202 } 203 204 static u32 sysc_read_sysconfig(struct sysc *ddata) 205 { 206 int offset = ddata->offsets[SYSC_SYSCONFIG]; 207 208 if (offset < 0) 209 return 0; 210 211 return sysc_read(ddata, offset); 212 } 213 214 static u32 sysc_read_sysstatus(struct sysc *ddata) 215 { 216 int offset = ddata->offsets[SYSC_SYSSTATUS]; 217 218 if (offset < 0) 219 return 0; 220 221 return sysc_read(ddata, offset); 222 } 223 224 /* Poll on reset status */ 225 static int sysc_wait_softreset(struct sysc *ddata) 226 { 227 u32 sysc_mask, syss_done, rstval; 228 int syss_offset, error = 0; 229 230 if (ddata->cap->regbits->srst_shift < 0) 231 return 0; 232 233 syss_offset = ddata->offsets[SYSC_SYSSTATUS]; 234 sysc_mask = BIT(ddata->cap->regbits->srst_shift); 235 236 if (ddata->cfg.quirks & SYSS_QUIRK_RESETDONE_INVERTED) 237 syss_done = 0; 238 else 239 syss_done = ddata->cfg.syss_mask; 240 241 if (syss_offset >= 0) { 242 error = readx_poll_timeout_atomic(sysc_read_sysstatus, ddata, 243 rstval, (rstval & ddata->cfg.syss_mask) == 244 syss_done, 100, MAX_MODULE_SOFTRESET_WAIT); 245 246 } else if (ddata->cfg.quirks & SYSC_QUIRK_RESET_STATUS) { 247 error = readx_poll_timeout_atomic(sysc_read_sysconfig, ddata, 248 rstval, !(rstval & sysc_mask), 249 100, MAX_MODULE_SOFTRESET_WAIT); 250 } 251 252 return error; 253 } 254 255 static int sysc_add_named_clock_from_child(struct sysc *ddata, 256 const char *name, 257 const char *optfck_name) 258 { 259 struct device_node *np = ddata->dev->of_node; 260 struct device_node *child; 261 struct clk_lookup *cl; 262 struct clk *clock; 263 const char *n; 264 265 if (name) 266 n = name; 267 else 268 n = optfck_name; 269 270 /* Does the clock alias already exist? */ 271 clock = of_clk_get_by_name(np, n); 272 if (!IS_ERR(clock)) { 273 clk_put(clock); 274 275 return 0; 276 } 277 278 child = of_get_next_available_child(np, NULL); 279 if (!child) 280 return -ENODEV; 281 282 clock = devm_get_clk_from_child(ddata->dev, child, name); 283 if (IS_ERR(clock)) 284 return PTR_ERR(clock); 285 286 /* 287 * Use clkdev_add() instead of clkdev_alloc() to avoid the MAX_DEV_ID 288 * limit for clk_get(). If cl ever needs to be freed, it should be done 289 * with clkdev_drop(). 290 */ 291 cl = kcalloc(1, sizeof(*cl), GFP_KERNEL); 292 if (!cl) 293 return -ENOMEM; 294 295 cl->con_id = n; 296 cl->dev_id = dev_name(ddata->dev); 297 cl->clk = clock; 298 clkdev_add(cl); 299 300 clk_put(clock); 301 302 return 0; 303 } 304 305 static int sysc_init_ext_opt_clock(struct sysc *ddata, const char *name) 306 { 307 const char *optfck_name; 308 int error, index; 309 310 if (ddata->nr_clocks < SYSC_OPTFCK0) 311 index = SYSC_OPTFCK0; 312 else 313 index = ddata->nr_clocks; 314 315 if (name) 316 optfck_name = name; 317 else 318 optfck_name = clock_names[index]; 319 320 error = sysc_add_named_clock_from_child(ddata, name, optfck_name); 321 if (error) 322 return error; 323 324 ddata->clock_roles[index] = optfck_name; 325 ddata->nr_clocks++; 326 327 return 0; 328 } 329 330 static int sysc_get_one_clock(struct sysc *ddata, const char *name) 331 { 332 int error, i, index = -ENODEV; 333 334 if (!strncmp(clock_names[SYSC_FCK], name, 3)) 335 index = SYSC_FCK; 336 else if (!strncmp(clock_names[SYSC_ICK], name, 3)) 337 index = SYSC_ICK; 338 339 if (index < 0) { 340 for (i = SYSC_OPTFCK0; i < SYSC_MAX_CLOCKS; i++) { 341 if (!ddata->clocks[i]) { 342 index = i; 343 break; 344 } 345 } 346 } 347 348 if (index < 0) { 349 dev_err(ddata->dev, "clock %s not added\n", name); 350 return index; 351 } 352 353 ddata->clocks[index] = devm_clk_get(ddata->dev, name); 354 if (IS_ERR(ddata->clocks[index])) { 355 dev_err(ddata->dev, "clock get error for %s: %li\n", 356 name, PTR_ERR(ddata->clocks[index])); 357 358 return PTR_ERR(ddata->clocks[index]); 359 } 360 361 error = clk_prepare(ddata->clocks[index]); 362 if (error) { 363 dev_err(ddata->dev, "clock prepare error for %s: %i\n", 364 name, error); 365 366 return error; 367 } 368 369 return 0; 370 } 371 372 static int sysc_get_clocks(struct sysc *ddata) 373 { 374 struct device_node *np = ddata->dev->of_node; 375 struct property *prop; 376 const char *name; 377 int nr_fck = 0, nr_ick = 0, i, error = 0; 378 379 ddata->clock_roles = devm_kcalloc(ddata->dev, 380 SYSC_MAX_CLOCKS, 381 sizeof(*ddata->clock_roles), 382 GFP_KERNEL); 383 if (!ddata->clock_roles) 384 return -ENOMEM; 385 386 of_property_for_each_string(np, "clock-names", prop, name) { 387 if (!strncmp(clock_names[SYSC_FCK], name, 3)) 388 nr_fck++; 389 if (!strncmp(clock_names[SYSC_ICK], name, 3)) 390 nr_ick++; 391 ddata->clock_roles[ddata->nr_clocks] = name; 392 ddata->nr_clocks++; 393 } 394 395 if (ddata->nr_clocks < 1) 396 return 0; 397 398 if ((ddata->cfg.quirks & SYSC_QUIRK_EXT_OPT_CLOCK)) { 399 error = sysc_init_ext_opt_clock(ddata, NULL); 400 if (error) 401 return error; 402 } 403 404 if (ddata->nr_clocks > SYSC_MAX_CLOCKS) { 405 dev_err(ddata->dev, "too many clocks for %pOF\n", np); 406 407 return -EINVAL; 408 } 409 410 if (nr_fck > 1 || nr_ick > 1) { 411 dev_err(ddata->dev, "max one fck and ick for %pOF\n", np); 412 413 return -EINVAL; 414 } 415 416 /* Always add a slot for main clocks fck and ick even if unused */ 417 if (!nr_fck) 418 ddata->nr_clocks++; 419 if (!nr_ick) 420 ddata->nr_clocks++; 421 422 ddata->clocks = devm_kcalloc(ddata->dev, 423 ddata->nr_clocks, sizeof(*ddata->clocks), 424 GFP_KERNEL); 425 if (!ddata->clocks) 426 return -ENOMEM; 427 428 for (i = 0; i < SYSC_MAX_CLOCKS; i++) { 429 const char *name = ddata->clock_roles[i]; 430 431 if (!name) 432 continue; 433 434 error = sysc_get_one_clock(ddata, name); 435 if (error) 436 return error; 437 } 438 439 return 0; 440 } 441 442 static int sysc_enable_main_clocks(struct sysc *ddata) 443 { 444 struct clk *clock; 445 int i, error; 446 447 if (!ddata->clocks) 448 return 0; 449 450 for (i = 0; i < SYSC_OPTFCK0; i++) { 451 clock = ddata->clocks[i]; 452 453 /* Main clocks may not have ick */ 454 if (IS_ERR_OR_NULL(clock)) 455 continue; 456 457 error = clk_enable(clock); 458 if (error) 459 goto err_disable; 460 } 461 462 return 0; 463 464 err_disable: 465 for (i--; i >= 0; i--) { 466 clock = ddata->clocks[i]; 467 468 /* Main clocks may not have ick */ 469 if (IS_ERR_OR_NULL(clock)) 470 continue; 471 472 clk_disable(clock); 473 } 474 475 return error; 476 } 477 478 static void sysc_disable_main_clocks(struct sysc *ddata) 479 { 480 struct clk *clock; 481 int i; 482 483 if (!ddata->clocks) 484 return; 485 486 for (i = 0; i < SYSC_OPTFCK0; i++) { 487 clock = ddata->clocks[i]; 488 if (IS_ERR_OR_NULL(clock)) 489 continue; 490 491 clk_disable(clock); 492 } 493 } 494 495 static int sysc_enable_opt_clocks(struct sysc *ddata) 496 { 497 struct clk *clock; 498 int i, error; 499 500 if (!ddata->clocks || ddata->nr_clocks < SYSC_OPTFCK0 + 1) 501 return 0; 502 503 for (i = SYSC_OPTFCK0; i < SYSC_MAX_CLOCKS; i++) { 504 clock = ddata->clocks[i]; 505 506 /* Assume no holes for opt clocks */ 507 if (IS_ERR_OR_NULL(clock)) 508 return 0; 509 510 error = clk_enable(clock); 511 if (error) 512 goto err_disable; 513 } 514 515 return 0; 516 517 err_disable: 518 for (i--; i >= 0; i--) { 519 clock = ddata->clocks[i]; 520 if (IS_ERR_OR_NULL(clock)) 521 continue; 522 523 clk_disable(clock); 524 } 525 526 return error; 527 } 528 529 static void sysc_disable_opt_clocks(struct sysc *ddata) 530 { 531 struct clk *clock; 532 int i; 533 534 if (!ddata->clocks || ddata->nr_clocks < SYSC_OPTFCK0 + 1) 535 return; 536 537 for (i = SYSC_OPTFCK0; i < SYSC_MAX_CLOCKS; i++) { 538 clock = ddata->clocks[i]; 539 540 /* Assume no holes for opt clocks */ 541 if (IS_ERR_OR_NULL(clock)) 542 return; 543 544 clk_disable(clock); 545 } 546 } 547 548 static void sysc_clkdm_deny_idle(struct sysc *ddata) 549 { 550 struct ti_sysc_platform_data *pdata; 551 552 if (ddata->legacy_mode || (ddata->cfg.quirks & SYSC_QUIRK_CLKDM_NOAUTO)) 553 return; 554 555 pdata = dev_get_platdata(ddata->dev); 556 if (pdata && pdata->clkdm_deny_idle) 557 pdata->clkdm_deny_idle(ddata->dev, &ddata->cookie); 558 } 559 560 static void sysc_clkdm_allow_idle(struct sysc *ddata) 561 { 562 struct ti_sysc_platform_data *pdata; 563 564 if (ddata->legacy_mode || (ddata->cfg.quirks & SYSC_QUIRK_CLKDM_NOAUTO)) 565 return; 566 567 pdata = dev_get_platdata(ddata->dev); 568 if (pdata && pdata->clkdm_allow_idle) 569 pdata->clkdm_allow_idle(ddata->dev, &ddata->cookie); 570 } 571 572 /** 573 * sysc_init_resets - init rstctrl reset line if configured 574 * @ddata: device driver data 575 * 576 * See sysc_rstctrl_reset_deassert(). 577 */ 578 static int sysc_init_resets(struct sysc *ddata) 579 { 580 ddata->rsts = 581 devm_reset_control_get_optional_shared(ddata->dev, "rstctrl"); 582 583 return PTR_ERR_OR_ZERO(ddata->rsts); 584 } 585 586 /** 587 * sysc_parse_and_check_child_range - parses module IO region from ranges 588 * @ddata: device driver data 589 * 590 * In general we only need rev, syss, and sysc registers and not the whole 591 * module range. But we do want the offsets for these registers from the 592 * module base. This allows us to check them against the legacy hwmod 593 * platform data. Let's also check the ranges are configured properly. 594 */ 595 static int sysc_parse_and_check_child_range(struct sysc *ddata) 596 { 597 struct device_node *np = ddata->dev->of_node; 598 const __be32 *ranges; 599 u32 nr_addr, nr_size; 600 int len, error; 601 602 ranges = of_get_property(np, "ranges", &len); 603 if (!ranges) { 604 dev_err(ddata->dev, "missing ranges for %pOF\n", np); 605 606 return -ENOENT; 607 } 608 609 len /= sizeof(*ranges); 610 611 if (len < 3) { 612 dev_err(ddata->dev, "incomplete ranges for %pOF\n", np); 613 614 return -EINVAL; 615 } 616 617 error = of_property_read_u32(np, "#address-cells", &nr_addr); 618 if (error) 619 return -ENOENT; 620 621 error = of_property_read_u32(np, "#size-cells", &nr_size); 622 if (error) 623 return -ENOENT; 624 625 if (nr_addr != 1 || nr_size != 1) { 626 dev_err(ddata->dev, "invalid ranges for %pOF\n", np); 627 628 return -EINVAL; 629 } 630 631 ranges++; 632 ddata->module_pa = of_translate_address(np, ranges++); 633 ddata->module_size = be32_to_cpup(ranges); 634 635 return 0; 636 } 637 638 static struct device_node *stdout_path; 639 640 static void sysc_init_stdout_path(struct sysc *ddata) 641 { 642 struct device_node *np = NULL; 643 const char *uart; 644 645 if (IS_ERR(stdout_path)) 646 return; 647 648 if (stdout_path) 649 return; 650 651 np = of_find_node_by_path("/chosen"); 652 if (!np) 653 goto err; 654 655 uart = of_get_property(np, "stdout-path", NULL); 656 if (!uart) 657 goto err; 658 659 np = of_find_node_by_path(uart); 660 if (!np) 661 goto err; 662 663 stdout_path = np; 664 665 return; 666 667 err: 668 stdout_path = ERR_PTR(-ENODEV); 669 } 670 671 static void sysc_check_quirk_stdout(struct sysc *ddata, 672 struct device_node *np) 673 { 674 sysc_init_stdout_path(ddata); 675 if (np != stdout_path) 676 return; 677 678 ddata->cfg.quirks |= SYSC_QUIRK_NO_IDLE_ON_INIT | 679 SYSC_QUIRK_NO_RESET_ON_INIT; 680 } 681 682 /** 683 * sysc_check_one_child - check child configuration 684 * @ddata: device driver data 685 * @np: child device node 686 * 687 * Let's avoid messy situations where we have new interconnect target 688 * node but children have "ti,hwmods". These belong to the interconnect 689 * target node and are managed by this driver. 690 */ 691 static void sysc_check_one_child(struct sysc *ddata, 692 struct device_node *np) 693 { 694 const char *name; 695 696 name = of_get_property(np, "ti,hwmods", NULL); 697 if (name && !of_device_is_compatible(np, "ti,sysc")) 698 dev_warn(ddata->dev, "really a child ti,hwmods property?"); 699 700 sysc_check_quirk_stdout(ddata, np); 701 sysc_parse_dts_quirks(ddata, np, true); 702 } 703 704 static void sysc_check_children(struct sysc *ddata) 705 { 706 struct device_node *child; 707 708 for_each_child_of_node(ddata->dev->of_node, child) 709 sysc_check_one_child(ddata, child); 710 } 711 712 /* 713 * So far only I2C uses 16-bit read access with clockactivity with revision 714 * in two registers with stride of 4. We can detect this based on the rev 715 * register size to configure things far enough to be able to properly read 716 * the revision register. 717 */ 718 static void sysc_check_quirk_16bit(struct sysc *ddata, struct resource *res) 719 { 720 if (resource_size(res) == 8) 721 ddata->cfg.quirks |= SYSC_QUIRK_16BIT | SYSC_QUIRK_USE_CLOCKACT; 722 } 723 724 /** 725 * sysc_parse_one - parses the interconnect target module registers 726 * @ddata: device driver data 727 * @reg: register to parse 728 */ 729 static int sysc_parse_one(struct sysc *ddata, enum sysc_registers reg) 730 { 731 struct resource *res; 732 const char *name; 733 734 switch (reg) { 735 case SYSC_REVISION: 736 case SYSC_SYSCONFIG: 737 case SYSC_SYSSTATUS: 738 name = reg_names[reg]; 739 break; 740 default: 741 return -EINVAL; 742 } 743 744 res = platform_get_resource_byname(to_platform_device(ddata->dev), 745 IORESOURCE_MEM, name); 746 if (!res) { 747 ddata->offsets[reg] = -ENODEV; 748 749 return 0; 750 } 751 752 ddata->offsets[reg] = res->start - ddata->module_pa; 753 if (reg == SYSC_REVISION) 754 sysc_check_quirk_16bit(ddata, res); 755 756 return 0; 757 } 758 759 static int sysc_parse_registers(struct sysc *ddata) 760 { 761 int i, error; 762 763 for (i = 0; i < SYSC_MAX_REGS; i++) { 764 error = sysc_parse_one(ddata, i); 765 if (error) 766 return error; 767 } 768 769 return 0; 770 } 771 772 /** 773 * sysc_check_registers - check for misconfigured register overlaps 774 * @ddata: device driver data 775 */ 776 static int sysc_check_registers(struct sysc *ddata) 777 { 778 int i, j, nr_regs = 0, nr_matches = 0; 779 780 for (i = 0; i < SYSC_MAX_REGS; i++) { 781 if (ddata->offsets[i] < 0) 782 continue; 783 784 if (ddata->offsets[i] > (ddata->module_size - 4)) { 785 dev_err(ddata->dev, "register outside module range"); 786 787 return -EINVAL; 788 } 789 790 for (j = 0; j < SYSC_MAX_REGS; j++) { 791 if (ddata->offsets[j] < 0) 792 continue; 793 794 if (ddata->offsets[i] == ddata->offsets[j]) 795 nr_matches++; 796 } 797 nr_regs++; 798 } 799 800 if (nr_matches > nr_regs) { 801 dev_err(ddata->dev, "overlapping registers: (%i/%i)", 802 nr_regs, nr_matches); 803 804 return -EINVAL; 805 } 806 807 return 0; 808 } 809 810 /** 811 * syc_ioremap - ioremap register space for the interconnect target module 812 * @ddata: device driver data 813 * 814 * Note that the interconnect target module registers can be anywhere 815 * within the interconnect target module range. For example, SGX has 816 * them at offset 0x1fc00 in the 32MB module address space. And cpsw 817 * has them at offset 0x1200 in the CPSW_WR child. Usually the 818 * the interconnect target module registers are at the beginning of 819 * the module range though. 820 */ 821 static int sysc_ioremap(struct sysc *ddata) 822 { 823 int size; 824 825 if (ddata->offsets[SYSC_REVISION] < 0 && 826 ddata->offsets[SYSC_SYSCONFIG] < 0 && 827 ddata->offsets[SYSC_SYSSTATUS] < 0) { 828 size = ddata->module_size; 829 } else { 830 size = max3(ddata->offsets[SYSC_REVISION], 831 ddata->offsets[SYSC_SYSCONFIG], 832 ddata->offsets[SYSC_SYSSTATUS]); 833 834 if (size < SZ_1K) 835 size = SZ_1K; 836 837 if ((size + sizeof(u32)) > ddata->module_size) 838 size = ddata->module_size; 839 } 840 841 ddata->module_va = devm_ioremap(ddata->dev, 842 ddata->module_pa, 843 size + sizeof(u32)); 844 if (!ddata->module_va) 845 return -EIO; 846 847 return 0; 848 } 849 850 /** 851 * sysc_map_and_check_registers - ioremap and check device registers 852 * @ddata: device driver data 853 */ 854 static int sysc_map_and_check_registers(struct sysc *ddata) 855 { 856 int error; 857 858 error = sysc_parse_and_check_child_range(ddata); 859 if (error) 860 return error; 861 862 sysc_check_children(ddata); 863 864 error = sysc_parse_registers(ddata); 865 if (error) 866 return error; 867 868 error = sysc_ioremap(ddata); 869 if (error) 870 return error; 871 872 error = sysc_check_registers(ddata); 873 if (error) 874 return error; 875 876 return 0; 877 } 878 879 /** 880 * sysc_show_rev - read and show interconnect target module revision 881 * @bufp: buffer to print the information to 882 * @ddata: device driver data 883 */ 884 static int sysc_show_rev(char *bufp, struct sysc *ddata) 885 { 886 int len; 887 888 if (ddata->offsets[SYSC_REVISION] < 0) 889 return sprintf(bufp, ":NA"); 890 891 len = sprintf(bufp, ":%08x", ddata->revision); 892 893 return len; 894 } 895 896 static int sysc_show_reg(struct sysc *ddata, 897 char *bufp, enum sysc_registers reg) 898 { 899 if (ddata->offsets[reg] < 0) 900 return sprintf(bufp, ":NA"); 901 902 return sprintf(bufp, ":%x", ddata->offsets[reg]); 903 } 904 905 static int sysc_show_name(char *bufp, struct sysc *ddata) 906 { 907 if (!ddata->name) 908 return 0; 909 910 return sprintf(bufp, ":%s", ddata->name); 911 } 912 913 /** 914 * sysc_show_registers - show information about interconnect target module 915 * @ddata: device driver data 916 */ 917 static void sysc_show_registers(struct sysc *ddata) 918 { 919 char buf[128]; 920 char *bufp = buf; 921 int i; 922 923 for (i = 0; i < SYSC_MAX_REGS; i++) 924 bufp += sysc_show_reg(ddata, bufp, i); 925 926 bufp += sysc_show_rev(bufp, ddata); 927 bufp += sysc_show_name(bufp, ddata); 928 929 dev_dbg(ddata->dev, "%llx:%x%s\n", 930 ddata->module_pa, ddata->module_size, 931 buf); 932 } 933 934 /** 935 * sysc_write_sysconfig - handle sysconfig quirks for register write 936 * @ddata: device driver data 937 * @value: register value 938 */ 939 static void sysc_write_sysconfig(struct sysc *ddata, u32 value) 940 { 941 if (ddata->module_unlock_quirk) 942 ddata->module_unlock_quirk(ddata); 943 944 sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], value); 945 946 if (ddata->module_lock_quirk) 947 ddata->module_lock_quirk(ddata); 948 } 949 950 #define SYSC_IDLE_MASK (SYSC_NR_IDLEMODES - 1) 951 #define SYSC_CLOCACT_ICK 2 952 953 /* Caller needs to manage sysc_clkdm_deny_idle() and sysc_clkdm_allow_idle() */ 954 static int sysc_enable_module(struct device *dev) 955 { 956 struct sysc *ddata; 957 const struct sysc_regbits *regbits; 958 u32 reg, idlemodes, best_mode; 959 int error; 960 961 ddata = dev_get_drvdata(dev); 962 963 /* 964 * Some modules like DSS reset automatically on idle. Enable optional 965 * reset clocks and wait for OCP softreset to complete. 966 */ 967 if (ddata->cfg.quirks & SYSC_QUIRK_OPT_CLKS_IN_RESET) { 968 error = sysc_enable_opt_clocks(ddata); 969 if (error) { 970 dev_err(ddata->dev, 971 "Optional clocks failed for enable: %i\n", 972 error); 973 return error; 974 } 975 } 976 /* 977 * Some modules like i2c and hdq1w have unusable reset status unless 978 * the module reset quirk is enabled. Skip status check on enable. 979 */ 980 if (!(ddata->cfg.quirks & SYSC_MODULE_QUIRK_ENA_RESETDONE)) { 981 error = sysc_wait_softreset(ddata); 982 if (error) 983 dev_warn(ddata->dev, "OCP softreset timed out\n"); 984 } 985 if (ddata->cfg.quirks & SYSC_QUIRK_OPT_CLKS_IN_RESET) 986 sysc_disable_opt_clocks(ddata); 987 988 /* 989 * Some subsystem private interconnects, like DSS top level module, 990 * need only the automatic OCP softreset handling with no sysconfig 991 * register bits to configure. 992 */ 993 if (ddata->offsets[SYSC_SYSCONFIG] == -ENODEV) 994 return 0; 995 996 regbits = ddata->cap->regbits; 997 reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]); 998 999 /* 1000 * Set CLOCKACTIVITY, we only use it for ick. And we only configure it 1001 * based on the SYSC_QUIRK_USE_CLOCKACT flag, not based on the hardware 1002 * capabilities. See the old HWMOD_SET_DEFAULT_CLOCKACT flag. 1003 */ 1004 if (regbits->clkact_shift >= 0 && 1005 (ddata->cfg.quirks & SYSC_QUIRK_USE_CLOCKACT)) 1006 reg |= SYSC_CLOCACT_ICK << regbits->clkact_shift; 1007 1008 /* Set SIDLE mode */ 1009 idlemodes = ddata->cfg.sidlemodes; 1010 if (!idlemodes || regbits->sidle_shift < 0) 1011 goto set_midle; 1012 1013 if (ddata->cfg.quirks & (SYSC_QUIRK_SWSUP_SIDLE | 1014 SYSC_QUIRK_SWSUP_SIDLE_ACT)) { 1015 best_mode = SYSC_IDLE_NO; 1016 } else { 1017 best_mode = fls(ddata->cfg.sidlemodes) - 1; 1018 if (best_mode > SYSC_IDLE_MASK) { 1019 dev_err(dev, "%s: invalid sidlemode\n", __func__); 1020 return -EINVAL; 1021 } 1022 1023 /* Set WAKEUP */ 1024 if (regbits->enwkup_shift >= 0 && 1025 ddata->cfg.sysc_val & BIT(regbits->enwkup_shift)) 1026 reg |= BIT(regbits->enwkup_shift); 1027 } 1028 1029 reg &= ~(SYSC_IDLE_MASK << regbits->sidle_shift); 1030 reg |= best_mode << regbits->sidle_shift; 1031 sysc_write_sysconfig(ddata, reg); 1032 1033 set_midle: 1034 /* Set MIDLE mode */ 1035 idlemodes = ddata->cfg.midlemodes; 1036 if (!idlemodes || regbits->midle_shift < 0) 1037 goto set_autoidle; 1038 1039 best_mode = fls(ddata->cfg.midlemodes) - 1; 1040 if (best_mode > SYSC_IDLE_MASK) { 1041 dev_err(dev, "%s: invalid midlemode\n", __func__); 1042 return -EINVAL; 1043 } 1044 1045 if (ddata->cfg.quirks & SYSC_QUIRK_SWSUP_MSTANDBY) 1046 best_mode = SYSC_IDLE_NO; 1047 1048 reg &= ~(SYSC_IDLE_MASK << regbits->midle_shift); 1049 reg |= best_mode << regbits->midle_shift; 1050 sysc_write_sysconfig(ddata, reg); 1051 1052 set_autoidle: 1053 /* Autoidle bit must enabled separately if available */ 1054 if (regbits->autoidle_shift >= 0 && 1055 ddata->cfg.sysc_val & BIT(regbits->autoidle_shift)) { 1056 reg |= 1 << regbits->autoidle_shift; 1057 sysc_write_sysconfig(ddata, reg); 1058 } 1059 1060 /* Flush posted write */ 1061 sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]); 1062 1063 if (ddata->module_enable_quirk) 1064 ddata->module_enable_quirk(ddata); 1065 1066 return 0; 1067 } 1068 1069 static int sysc_best_idle_mode(u32 idlemodes, u32 *best_mode) 1070 { 1071 if (idlemodes & BIT(SYSC_IDLE_SMART_WKUP)) 1072 *best_mode = SYSC_IDLE_SMART_WKUP; 1073 else if (idlemodes & BIT(SYSC_IDLE_SMART)) 1074 *best_mode = SYSC_IDLE_SMART; 1075 else if (idlemodes & BIT(SYSC_IDLE_FORCE)) 1076 *best_mode = SYSC_IDLE_FORCE; 1077 else 1078 return -EINVAL; 1079 1080 return 0; 1081 } 1082 1083 /* Caller needs to manage sysc_clkdm_deny_idle() and sysc_clkdm_allow_idle() */ 1084 static int sysc_disable_module(struct device *dev) 1085 { 1086 struct sysc *ddata; 1087 const struct sysc_regbits *regbits; 1088 u32 reg, idlemodes, best_mode; 1089 int ret; 1090 1091 ddata = dev_get_drvdata(dev); 1092 if (ddata->offsets[SYSC_SYSCONFIG] == -ENODEV) 1093 return 0; 1094 1095 if (ddata->module_disable_quirk) 1096 ddata->module_disable_quirk(ddata); 1097 1098 regbits = ddata->cap->regbits; 1099 reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]); 1100 1101 /* Set MIDLE mode */ 1102 idlemodes = ddata->cfg.midlemodes; 1103 if (!idlemodes || regbits->midle_shift < 0) 1104 goto set_sidle; 1105 1106 ret = sysc_best_idle_mode(idlemodes, &best_mode); 1107 if (ret) { 1108 dev_err(dev, "%s: invalid midlemode\n", __func__); 1109 return ret; 1110 } 1111 1112 if (ddata->cfg.quirks & (SYSC_QUIRK_SWSUP_MSTANDBY) || 1113 ddata->cfg.quirks & (SYSC_QUIRK_FORCE_MSTANDBY)) 1114 best_mode = SYSC_IDLE_FORCE; 1115 1116 reg &= ~(SYSC_IDLE_MASK << regbits->midle_shift); 1117 reg |= best_mode << regbits->midle_shift; 1118 sysc_write_sysconfig(ddata, reg); 1119 1120 set_sidle: 1121 /* Set SIDLE mode */ 1122 idlemodes = ddata->cfg.sidlemodes; 1123 if (!idlemodes || regbits->sidle_shift < 0) 1124 return 0; 1125 1126 if (ddata->cfg.quirks & SYSC_QUIRK_SWSUP_SIDLE) { 1127 best_mode = SYSC_IDLE_FORCE; 1128 } else { 1129 ret = sysc_best_idle_mode(idlemodes, &best_mode); 1130 if (ret) { 1131 dev_err(dev, "%s: invalid sidlemode\n", __func__); 1132 return ret; 1133 } 1134 } 1135 1136 reg &= ~(SYSC_IDLE_MASK << regbits->sidle_shift); 1137 reg |= best_mode << regbits->sidle_shift; 1138 if (regbits->autoidle_shift >= 0 && 1139 ddata->cfg.sysc_val & BIT(regbits->autoidle_shift)) 1140 reg |= 1 << regbits->autoidle_shift; 1141 sysc_write_sysconfig(ddata, reg); 1142 1143 /* Flush posted write */ 1144 sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]); 1145 1146 return 0; 1147 } 1148 1149 static int __maybe_unused sysc_runtime_suspend_legacy(struct device *dev, 1150 struct sysc *ddata) 1151 { 1152 struct ti_sysc_platform_data *pdata; 1153 int error; 1154 1155 pdata = dev_get_platdata(ddata->dev); 1156 if (!pdata) 1157 return 0; 1158 1159 if (!pdata->idle_module) 1160 return -ENODEV; 1161 1162 error = pdata->idle_module(dev, &ddata->cookie); 1163 if (error) 1164 dev_err(dev, "%s: could not idle: %i\n", 1165 __func__, error); 1166 1167 reset_control_assert(ddata->rsts); 1168 1169 return 0; 1170 } 1171 1172 static int __maybe_unused sysc_runtime_resume_legacy(struct device *dev, 1173 struct sysc *ddata) 1174 { 1175 struct ti_sysc_platform_data *pdata; 1176 int error; 1177 1178 pdata = dev_get_platdata(ddata->dev); 1179 if (!pdata) 1180 return 0; 1181 1182 if (!pdata->enable_module) 1183 return -ENODEV; 1184 1185 error = pdata->enable_module(dev, &ddata->cookie); 1186 if (error) 1187 dev_err(dev, "%s: could not enable: %i\n", 1188 __func__, error); 1189 1190 reset_control_deassert(ddata->rsts); 1191 1192 return 0; 1193 } 1194 1195 static int __maybe_unused sysc_runtime_suspend(struct device *dev) 1196 { 1197 struct sysc *ddata; 1198 int error = 0; 1199 1200 ddata = dev_get_drvdata(dev); 1201 1202 if (!ddata->enabled) 1203 return 0; 1204 1205 sysc_clkdm_deny_idle(ddata); 1206 1207 if (ddata->legacy_mode) { 1208 error = sysc_runtime_suspend_legacy(dev, ddata); 1209 if (error) 1210 goto err_allow_idle; 1211 } else { 1212 error = sysc_disable_module(dev); 1213 if (error) 1214 goto err_allow_idle; 1215 } 1216 1217 sysc_disable_main_clocks(ddata); 1218 1219 if (sysc_opt_clks_needed(ddata)) 1220 sysc_disable_opt_clocks(ddata); 1221 1222 ddata->enabled = false; 1223 1224 err_allow_idle: 1225 reset_control_assert(ddata->rsts); 1226 1227 sysc_clkdm_allow_idle(ddata); 1228 1229 return error; 1230 } 1231 1232 static int __maybe_unused sysc_runtime_resume(struct device *dev) 1233 { 1234 struct sysc *ddata; 1235 int error = 0; 1236 1237 ddata = dev_get_drvdata(dev); 1238 1239 if (ddata->enabled) 1240 return 0; 1241 1242 1243 sysc_clkdm_deny_idle(ddata); 1244 1245 if (sysc_opt_clks_needed(ddata)) { 1246 error = sysc_enable_opt_clocks(ddata); 1247 if (error) 1248 goto err_allow_idle; 1249 } 1250 1251 error = sysc_enable_main_clocks(ddata); 1252 if (error) 1253 goto err_opt_clocks; 1254 1255 reset_control_deassert(ddata->rsts); 1256 1257 if (ddata->legacy_mode) { 1258 error = sysc_runtime_resume_legacy(dev, ddata); 1259 if (error) 1260 goto err_main_clocks; 1261 } else { 1262 error = sysc_enable_module(dev); 1263 if (error) 1264 goto err_main_clocks; 1265 } 1266 1267 ddata->enabled = true; 1268 1269 sysc_clkdm_allow_idle(ddata); 1270 1271 return 0; 1272 1273 err_main_clocks: 1274 sysc_disable_main_clocks(ddata); 1275 err_opt_clocks: 1276 if (sysc_opt_clks_needed(ddata)) 1277 sysc_disable_opt_clocks(ddata); 1278 err_allow_idle: 1279 sysc_clkdm_allow_idle(ddata); 1280 1281 return error; 1282 } 1283 1284 static int __maybe_unused sysc_noirq_suspend(struct device *dev) 1285 { 1286 struct sysc *ddata; 1287 1288 ddata = dev_get_drvdata(dev); 1289 1290 if (ddata->cfg.quirks & 1291 (SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE)) 1292 return 0; 1293 1294 return pm_runtime_force_suspend(dev); 1295 } 1296 1297 static int __maybe_unused sysc_noirq_resume(struct device *dev) 1298 { 1299 struct sysc *ddata; 1300 1301 ddata = dev_get_drvdata(dev); 1302 1303 if (ddata->cfg.quirks & 1304 (SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE)) 1305 return 0; 1306 1307 return pm_runtime_force_resume(dev); 1308 } 1309 1310 static const struct dev_pm_ops sysc_pm_ops = { 1311 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(sysc_noirq_suspend, sysc_noirq_resume) 1312 SET_RUNTIME_PM_OPS(sysc_runtime_suspend, 1313 sysc_runtime_resume, 1314 NULL) 1315 }; 1316 1317 /* Module revision register based quirks */ 1318 struct sysc_revision_quirk { 1319 const char *name; 1320 u32 base; 1321 int rev_offset; 1322 int sysc_offset; 1323 int syss_offset; 1324 u32 revision; 1325 u32 revision_mask; 1326 u32 quirks; 1327 }; 1328 1329 #define SYSC_QUIRK(optname, optbase, optrev, optsysc, optsyss, \ 1330 optrev_val, optrevmask, optquirkmask) \ 1331 { \ 1332 .name = (optname), \ 1333 .base = (optbase), \ 1334 .rev_offset = (optrev), \ 1335 .sysc_offset = (optsysc), \ 1336 .syss_offset = (optsyss), \ 1337 .revision = (optrev_val), \ 1338 .revision_mask = (optrevmask), \ 1339 .quirks = (optquirkmask), \ 1340 } 1341 1342 static const struct sysc_revision_quirk sysc_revision_quirks[] = { 1343 /* These drivers need to be fixed to not use pm_runtime_irq_safe() */ 1344 SYSC_QUIRK("gpio", 0, 0, 0x10, 0x114, 0x50600801, 0xffff00ff, 1345 SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_OPT_CLKS_IN_RESET), 1346 SYSC_QUIRK("sham", 0, 0x100, 0x110, 0x114, 0x40000c03, 0xffffffff, 1347 SYSC_QUIRK_LEGACY_IDLE), 1348 SYSC_QUIRK("smartreflex", 0, -ENODEV, 0x24, -ENODEV, 0x00000000, 0xffffffff, 1349 SYSC_QUIRK_LEGACY_IDLE), 1350 SYSC_QUIRK("smartreflex", 0, -ENODEV, 0x38, -ENODEV, 0x00000000, 0xffffffff, 1351 SYSC_QUIRK_LEGACY_IDLE), 1352 SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000046, 0xffffffff, 1353 SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE), 1354 SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff, 1355 SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE), 1356 /* Uarts on omap4 and later */ 1357 SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x50411e03, 0xffff00ff, 1358 SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE), 1359 SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47422e03, 0xffffffff, 1360 SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE), 1361 1362 /* Quirks that need to be set based on the module address */ 1363 SYSC_QUIRK("mcpdm", 0x40132000, 0, 0x10, -ENODEV, 0x50000800, 0xffffffff, 1364 SYSC_QUIRK_EXT_OPT_CLOCK | SYSC_QUIRK_NO_RESET_ON_INIT | 1365 SYSC_QUIRK_SWSUP_SIDLE), 1366 1367 /* Quirks that need to be set based on detected module */ 1368 SYSC_QUIRK("aess", 0, 0, 0x10, -ENODEV, 0x40000000, 0xffffffff, 1369 SYSC_MODULE_QUIRK_AESS), 1370 SYSC_QUIRK("dcan", 0x48480000, 0x20, -ENODEV, -ENODEV, 0xa3170504, 0xffffffff, 1371 SYSC_QUIRK_CLKDM_NOAUTO), 1372 SYSC_QUIRK("dss", 0x4832a000, 0, 0x10, 0x14, 0x00000020, 0xffffffff, 1373 SYSC_QUIRK_OPT_CLKS_IN_RESET | SYSC_MODULE_QUIRK_DSS_RESET), 1374 SYSC_QUIRK("dss", 0x58000000, 0, -ENODEV, 0x14, 0x00000040, 0xffffffff, 1375 SYSC_QUIRK_OPT_CLKS_IN_RESET | SYSC_MODULE_QUIRK_DSS_RESET), 1376 SYSC_QUIRK("dss", 0x58000000, 0, -ENODEV, 0x14, 0x00000061, 0xffffffff, 1377 SYSC_QUIRK_OPT_CLKS_IN_RESET | SYSC_MODULE_QUIRK_DSS_RESET), 1378 SYSC_QUIRK("dwc3", 0x48880000, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff, 1379 SYSC_QUIRK_CLKDM_NOAUTO), 1380 SYSC_QUIRK("dwc3", 0x488c0000, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff, 1381 SYSC_QUIRK_CLKDM_NOAUTO), 1382 SYSC_QUIRK("hdmi", 0, 0, 0x10, -ENODEV, 0x50030200, 0xffffffff, 1383 SYSC_QUIRK_OPT_CLKS_NEEDED), 1384 SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x00000006, 0xffffffff, 1385 SYSC_MODULE_QUIRK_HDQ1W | SYSC_MODULE_QUIRK_ENA_RESETDONE), 1386 SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x0000000a, 0xffffffff, 1387 SYSC_MODULE_QUIRK_HDQ1W | SYSC_MODULE_QUIRK_ENA_RESETDONE), 1388 SYSC_QUIRK("i2c", 0, 0, 0x20, 0x10, 0x00000036, 0x000000ff, 1389 SYSC_MODULE_QUIRK_I2C | SYSC_MODULE_QUIRK_ENA_RESETDONE), 1390 SYSC_QUIRK("i2c", 0, 0, 0x20, 0x10, 0x0000003c, 0x000000ff, 1391 SYSC_MODULE_QUIRK_I2C | SYSC_MODULE_QUIRK_ENA_RESETDONE), 1392 SYSC_QUIRK("i2c", 0, 0, 0x20, 0x10, 0x00000040, 0x000000ff, 1393 SYSC_MODULE_QUIRK_I2C | SYSC_MODULE_QUIRK_ENA_RESETDONE), 1394 SYSC_QUIRK("i2c", 0, 0, 0x10, 0x90, 0x5040000a, 0xfffff0f0, 1395 SYSC_MODULE_QUIRK_I2C | SYSC_MODULE_QUIRK_ENA_RESETDONE), 1396 SYSC_QUIRK("gpu", 0x50000000, 0x14, -ENODEV, -ENODEV, 0x00010201, 0xffffffff, 0), 1397 SYSC_QUIRK("gpu", 0x50000000, 0xfe00, 0xfe10, -ENODEV, 0x40000000 , 0xffffffff, 1398 SYSC_MODULE_QUIRK_SGX), 1399 SYSC_QUIRK("lcdc", 0, 0, 0x54, -ENODEV, 0x4f201000, 0xffffffff, 1400 SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY), 1401 SYSC_QUIRK("rtc", 0, 0x74, 0x78, -ENODEV, 0x4eb01908, 0xffff00f0, 1402 SYSC_MODULE_QUIRK_RTC_UNLOCK), 1403 SYSC_QUIRK("tptc", 0, 0, 0x10, -ENODEV, 0x40006c00, 0xffffefff, 1404 SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY), 1405 SYSC_QUIRK("tptc", 0, 0, -ENODEV, -ENODEV, 0x40007c00, 0xffffffff, 1406 SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY), 1407 SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, 0x14, 0x50700100, 0xffffffff, 1408 SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY), 1409 SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, -ENODEV, 0x50700101, 0xffffffff, 1410 SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY), 1411 SYSC_QUIRK("usb_otg_hs", 0, 0x400, 0x404, 0x408, 0x00000050, 1412 0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY), 1413 SYSC_QUIRK("usb_otg_hs", 0, 0, 0x10, -ENODEV, 0x4ea2080d, 0xffffffff, 1414 SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY), 1415 SYSC_QUIRK("wdt", 0, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0, 1416 SYSC_MODULE_QUIRK_WDT), 1417 /* PRUSS on am3, am4 and am5 */ 1418 SYSC_QUIRK("pruss", 0, 0x26000, 0x26004, -ENODEV, 0x47000000, 0xff000000, 1419 SYSC_MODULE_QUIRK_PRUSS), 1420 /* Watchdog on am3 and am4 */ 1421 SYSC_QUIRK("wdt", 0x44e35000, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0, 1422 SYSC_MODULE_QUIRK_WDT | SYSC_QUIRK_SWSUP_SIDLE), 1423 1424 #ifdef DEBUG 1425 SYSC_QUIRK("adc", 0, 0, 0x10, -ENODEV, 0x47300001, 0xffffffff, 0), 1426 SYSC_QUIRK("atl", 0, 0, -ENODEV, -ENODEV, 0x0a070100, 0xffffffff, 0), 1427 SYSC_QUIRK("cm", 0, 0, -ENODEV, -ENODEV, 0x40000301, 0xffffffff, 0), 1428 SYSC_QUIRK("control", 0, 0, 0x10, -ENODEV, 0x40000900, 0xffffffff, 0), 1429 SYSC_QUIRK("cpgmac", 0, 0x1200, 0x1208, 0x1204, 0x4edb1902, 1430 0xffff00f0, 0), 1431 SYSC_QUIRK("dcan", 0, 0x20, -ENODEV, -ENODEV, 0xa3170504, 0xffffffff, 0), 1432 SYSC_QUIRK("dcan", 0, 0x20, -ENODEV, -ENODEV, 0x4edb1902, 0xffffffff, 0), 1433 SYSC_QUIRK("dispc", 0x4832a400, 0, 0x10, 0x14, 0x00000030, 0xffffffff, 0), 1434 SYSC_QUIRK("dispc", 0x58001000, 0, 0x10, 0x14, 0x00000040, 0xffffffff, 0), 1435 SYSC_QUIRK("dispc", 0x58001000, 0, 0x10, 0x14, 0x00000051, 0xffffffff, 0), 1436 SYSC_QUIRK("dmic", 0, 0, 0x10, -ENODEV, 0x50010000, 0xffffffff, 0), 1437 SYSC_QUIRK("dsi", 0x58004000, 0, 0x10, 0x14, 0x00000030, 0xffffffff, 0), 1438 SYSC_QUIRK("dsi", 0x58005000, 0, 0x10, 0x14, 0x00000030, 0xffffffff, 0), 1439 SYSC_QUIRK("dsi", 0x58005000, 0, 0x10, 0x14, 0x00000040, 0xffffffff, 0), 1440 SYSC_QUIRK("dsi", 0x58009000, 0, 0x10, 0x14, 0x00000040, 0xffffffff, 0), 1441 SYSC_QUIRK("dwc3", 0, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff, 0), 1442 SYSC_QUIRK("d2d", 0x4a0b6000, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0), 1443 SYSC_QUIRK("d2d", 0x4a0cd000, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0), 1444 SYSC_QUIRK("epwmss", 0, 0, 0x4, -ENODEV, 0x47400001, 0xffffffff, 0), 1445 SYSC_QUIRK("gpu", 0, 0x1fc00, 0x1fc10, -ENODEV, 0, 0, 0), 1446 SYSC_QUIRK("gpu", 0, 0xfe00, 0xfe10, -ENODEV, 0x40000000 , 0xffffffff, 0), 1447 SYSC_QUIRK("hdmi", 0, 0, 0x10, -ENODEV, 0x50031d00, 0xffffffff, 0), 1448 SYSC_QUIRK("hsi", 0, 0, 0x10, 0x14, 0x50043101, 0xffffffff, 0), 1449 SYSC_QUIRK("iss", 0, 0, 0x10, -ENODEV, 0x40000101, 0xffffffff, 0), 1450 SYSC_QUIRK("mcasp", 0, 0, 0x4, -ENODEV, 0x44306302, 0xffffffff, 0), 1451 SYSC_QUIRK("mcasp", 0, 0, 0x4, -ENODEV, 0x44307b02, 0xffffffff, 0), 1452 SYSC_QUIRK("mcbsp", 0, -ENODEV, 0x8c, -ENODEV, 0, 0, 0), 1453 SYSC_QUIRK("mcspi", 0, 0, 0x10, -ENODEV, 0x40300a0b, 0xffff00ff, 0), 1454 SYSC_QUIRK("mcspi", 0, 0, 0x110, 0x114, 0x40300a0b, 0xffffffff, 0), 1455 SYSC_QUIRK("mailbox", 0, 0, 0x10, -ENODEV, 0x00000400, 0xffffffff, 0), 1456 SYSC_QUIRK("m3", 0, 0, -ENODEV, -ENODEV, 0x5f580105, 0x0fff0f00, 0), 1457 SYSC_QUIRK("ocp2scp", 0, 0, 0x10, 0x14, 0x50060005, 0xfffffff0, 0), 1458 SYSC_QUIRK("ocp2scp", 0, 0, -ENODEV, -ENODEV, 0x50060007, 0xffffffff, 0), 1459 SYSC_QUIRK("padconf", 0, 0, 0x10, -ENODEV, 0x4fff0800, 0xffffffff, 0), 1460 SYSC_QUIRK("padconf", 0, 0, -ENODEV, -ENODEV, 0x40001100, 0xffffffff, 0), 1461 SYSC_QUIRK("prcm", 0, 0, -ENODEV, -ENODEV, 0x40000100, 0xffffffff, 0), 1462 SYSC_QUIRK("prcm", 0, 0, -ENODEV, -ENODEV, 0x00004102, 0xffffffff, 0), 1463 SYSC_QUIRK("prcm", 0, 0, -ENODEV, -ENODEV, 0x40000400, 0xffffffff, 0), 1464 SYSC_QUIRK("rfbi", 0x4832a800, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0), 1465 SYSC_QUIRK("rfbi", 0x58002000, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0), 1466 SYSC_QUIRK("scm", 0, 0, 0x10, -ENODEV, 0x40000900, 0xffffffff, 0), 1467 SYSC_QUIRK("scm", 0, 0, -ENODEV, -ENODEV, 0x4e8b0100, 0xffffffff, 0), 1468 SYSC_QUIRK("scm", 0, 0, -ENODEV, -ENODEV, 0x4f000100, 0xffffffff, 0), 1469 SYSC_QUIRK("scm", 0, 0, -ENODEV, -ENODEV, 0x40000900, 0xffffffff, 0), 1470 SYSC_QUIRK("scrm", 0, 0, -ENODEV, -ENODEV, 0x00000010, 0xffffffff, 0), 1471 SYSC_QUIRK("sdio", 0, 0, 0x10, -ENODEV, 0x40202301, 0xffff0ff0, 0), 1472 SYSC_QUIRK("sdio", 0, 0x2fc, 0x110, 0x114, 0x31010000, 0xffffffff, 0), 1473 SYSC_QUIRK("sdma", 0, 0, 0x2c, 0x28, 0x00010900, 0xffffffff, 0), 1474 SYSC_QUIRK("slimbus", 0, 0, 0x10, -ENODEV, 0x40000902, 0xffffffff, 0), 1475 SYSC_QUIRK("slimbus", 0, 0, 0x10, -ENODEV, 0x40002903, 0xffffffff, 0), 1476 SYSC_QUIRK("spinlock", 0, 0, 0x10, -ENODEV, 0x50020000, 0xffffffff, 0), 1477 SYSC_QUIRK("rng", 0, 0x1fe0, 0x1fe4, -ENODEV, 0x00000020, 0xffffffff, 0), 1478 SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000013, 0xffffffff, 0), 1479 SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000015, 0xffffffff, 0), 1480 /* Some timers on omap4 and later */ 1481 SYSC_QUIRK("timer", 0, 0, 0x10, -ENODEV, 0x50002100, 0xffffffff, 0), 1482 SYSC_QUIRK("timer", 0, 0, 0x10, -ENODEV, 0x4fff1301, 0xffff00ff, 0), 1483 SYSC_QUIRK("timer32k", 0, 0, 0x4, -ENODEV, 0x00000040, 0xffffffff, 0), 1484 SYSC_QUIRK("timer32k", 0, 0, 0x4, -ENODEV, 0x00000011, 0xffffffff, 0), 1485 SYSC_QUIRK("timer32k", 0, 0, 0x4, -ENODEV, 0x00000060, 0xffffffff, 0), 1486 SYSC_QUIRK("tpcc", 0, 0, -ENODEV, -ENODEV, 0x40014c00, 0xffffffff, 0), 1487 SYSC_QUIRK("usbhstll", 0, 0, 0x10, 0x14, 0x00000004, 0xffffffff, 0), 1488 SYSC_QUIRK("usbhstll", 0, 0, 0x10, 0x14, 0x00000008, 0xffffffff, 0), 1489 SYSC_QUIRK("venc", 0x58003000, 0, -ENODEV, -ENODEV, 0x00000002, 0xffffffff, 0), 1490 SYSC_QUIRK("vfpe", 0, 0, 0x104, -ENODEV, 0x4d001200, 0xffffffff, 0), 1491 #endif 1492 }; 1493 1494 /* 1495 * Early quirks based on module base and register offsets only that are 1496 * needed before the module revision can be read 1497 */ 1498 static void sysc_init_early_quirks(struct sysc *ddata) 1499 { 1500 const struct sysc_revision_quirk *q; 1501 int i; 1502 1503 for (i = 0; i < ARRAY_SIZE(sysc_revision_quirks); i++) { 1504 q = &sysc_revision_quirks[i]; 1505 1506 if (!q->base) 1507 continue; 1508 1509 if (q->base != ddata->module_pa) 1510 continue; 1511 1512 if (q->rev_offset != ddata->offsets[SYSC_REVISION]) 1513 continue; 1514 1515 if (q->sysc_offset != ddata->offsets[SYSC_SYSCONFIG]) 1516 continue; 1517 1518 if (q->syss_offset != ddata->offsets[SYSC_SYSSTATUS]) 1519 continue; 1520 1521 ddata->name = q->name; 1522 ddata->cfg.quirks |= q->quirks; 1523 } 1524 } 1525 1526 /* Quirks that also consider the revision register value */ 1527 static void sysc_init_revision_quirks(struct sysc *ddata) 1528 { 1529 const struct sysc_revision_quirk *q; 1530 int i; 1531 1532 for (i = 0; i < ARRAY_SIZE(sysc_revision_quirks); i++) { 1533 q = &sysc_revision_quirks[i]; 1534 1535 if (q->base && q->base != ddata->module_pa) 1536 continue; 1537 1538 if (q->rev_offset != ddata->offsets[SYSC_REVISION]) 1539 continue; 1540 1541 if (q->sysc_offset != ddata->offsets[SYSC_SYSCONFIG]) 1542 continue; 1543 1544 if (q->syss_offset != ddata->offsets[SYSC_SYSSTATUS]) 1545 continue; 1546 1547 if (q->revision == ddata->revision || 1548 (q->revision & q->revision_mask) == 1549 (ddata->revision & q->revision_mask)) { 1550 ddata->name = q->name; 1551 ddata->cfg.quirks |= q->quirks; 1552 } 1553 } 1554 } 1555 1556 /* 1557 * DSS needs dispc outputs disabled to reset modules. Returns mask of 1558 * enabled DSS interrupts. Eventually we may be able to do this on 1559 * dispc init rather than top-level DSS init. 1560 */ 1561 static u32 sysc_quirk_dispc(struct sysc *ddata, int dispc_offset, 1562 bool disable) 1563 { 1564 bool lcd_en, digit_en, lcd2_en = false, lcd3_en = false; 1565 const int lcd_en_mask = BIT(0), digit_en_mask = BIT(1); 1566 int manager_count; 1567 bool framedonetv_irq = true; 1568 u32 val, irq_mask = 0; 1569 1570 switch (sysc_soc->soc) { 1571 case SOC_2420 ... SOC_3630: 1572 manager_count = 2; 1573 framedonetv_irq = false; 1574 break; 1575 case SOC_4430 ... SOC_4470: 1576 manager_count = 3; 1577 break; 1578 case SOC_5430: 1579 case SOC_DRA7: 1580 manager_count = 4; 1581 break; 1582 case SOC_AM4: 1583 manager_count = 1; 1584 framedonetv_irq = false; 1585 break; 1586 case SOC_UNKNOWN: 1587 default: 1588 return 0; 1589 }; 1590 1591 /* Remap the whole module range to be able to reset dispc outputs */ 1592 devm_iounmap(ddata->dev, ddata->module_va); 1593 ddata->module_va = devm_ioremap(ddata->dev, 1594 ddata->module_pa, 1595 ddata->module_size); 1596 if (!ddata->module_va) 1597 return -EIO; 1598 1599 /* DISP_CONTROL */ 1600 val = sysc_read(ddata, dispc_offset + 0x40); 1601 lcd_en = val & lcd_en_mask; 1602 digit_en = val & digit_en_mask; 1603 if (lcd_en) 1604 irq_mask |= BIT(0); /* FRAMEDONE */ 1605 if (digit_en) { 1606 if (framedonetv_irq) 1607 irq_mask |= BIT(24); /* FRAMEDONETV */ 1608 else 1609 irq_mask |= BIT(2) | BIT(3); /* EVSYNC bits */ 1610 } 1611 if (disable & (lcd_en | digit_en)) 1612 sysc_write(ddata, dispc_offset + 0x40, 1613 val & ~(lcd_en_mask | digit_en_mask)); 1614 1615 if (manager_count <= 2) 1616 return irq_mask; 1617 1618 /* DISPC_CONTROL2 */ 1619 val = sysc_read(ddata, dispc_offset + 0x238); 1620 lcd2_en = val & lcd_en_mask; 1621 if (lcd2_en) 1622 irq_mask |= BIT(22); /* FRAMEDONE2 */ 1623 if (disable && lcd2_en) 1624 sysc_write(ddata, dispc_offset + 0x238, 1625 val & ~lcd_en_mask); 1626 1627 if (manager_count <= 3) 1628 return irq_mask; 1629 1630 /* DISPC_CONTROL3 */ 1631 val = sysc_read(ddata, dispc_offset + 0x848); 1632 lcd3_en = val & lcd_en_mask; 1633 if (lcd3_en) 1634 irq_mask |= BIT(30); /* FRAMEDONE3 */ 1635 if (disable && lcd3_en) 1636 sysc_write(ddata, dispc_offset + 0x848, 1637 val & ~lcd_en_mask); 1638 1639 return irq_mask; 1640 } 1641 1642 /* DSS needs child outputs disabled and SDI registers cleared for reset */ 1643 static void sysc_pre_reset_quirk_dss(struct sysc *ddata) 1644 { 1645 const int dispc_offset = 0x1000; 1646 int error; 1647 u32 irq_mask, val; 1648 1649 /* Get enabled outputs */ 1650 irq_mask = sysc_quirk_dispc(ddata, dispc_offset, false); 1651 if (!irq_mask) 1652 return; 1653 1654 /* Clear IRQSTATUS */ 1655 sysc_write(ddata, dispc_offset + 0x18, irq_mask); 1656 1657 /* Disable outputs */ 1658 val = sysc_quirk_dispc(ddata, dispc_offset, true); 1659 1660 /* Poll IRQSTATUS */ 1661 error = readl_poll_timeout(ddata->module_va + dispc_offset + 0x18, 1662 val, val != irq_mask, 100, 50); 1663 if (error) 1664 dev_warn(ddata->dev, "%s: timed out %08x !+ %08x\n", 1665 __func__, val, irq_mask); 1666 1667 if (sysc_soc->soc == SOC_3430) { 1668 /* Clear DSS_SDI_CONTROL */ 1669 sysc_write(ddata, 0x44, 0); 1670 1671 /* Clear DSS_PLL_CONTROL */ 1672 sysc_write(ddata, 0x48, 0); 1673 } 1674 1675 /* Clear DSS_CONTROL to switch DSS clock sources to PRCM if not */ 1676 sysc_write(ddata, 0x40, 0); 1677 } 1678 1679 /* 1-wire needs module's internal clocks enabled for reset */ 1680 static void sysc_pre_reset_quirk_hdq1w(struct sysc *ddata) 1681 { 1682 int offset = 0x0c; /* HDQ_CTRL_STATUS */ 1683 u16 val; 1684 1685 val = sysc_read(ddata, offset); 1686 val |= BIT(5); 1687 sysc_write(ddata, offset, val); 1688 } 1689 1690 /* AESS (Audio Engine SubSystem) needs autogating set after enable */ 1691 static void sysc_module_enable_quirk_aess(struct sysc *ddata) 1692 { 1693 int offset = 0x7c; /* AESS_AUTO_GATING_ENABLE */ 1694 1695 sysc_write(ddata, offset, 1); 1696 } 1697 1698 /* I2C needs to be disabled for reset */ 1699 static void sysc_clk_quirk_i2c(struct sysc *ddata, bool enable) 1700 { 1701 int offset; 1702 u16 val; 1703 1704 /* I2C_CON, omap2/3 is different from omap4 and later */ 1705 if ((ddata->revision & 0xffffff00) == 0x001f0000) 1706 offset = 0x24; 1707 else 1708 offset = 0xa4; 1709 1710 /* I2C_EN */ 1711 val = sysc_read(ddata, offset); 1712 if (enable) 1713 val |= BIT(15); 1714 else 1715 val &= ~BIT(15); 1716 sysc_write(ddata, offset, val); 1717 } 1718 1719 static void sysc_pre_reset_quirk_i2c(struct sysc *ddata) 1720 { 1721 sysc_clk_quirk_i2c(ddata, false); 1722 } 1723 1724 static void sysc_post_reset_quirk_i2c(struct sysc *ddata) 1725 { 1726 sysc_clk_quirk_i2c(ddata, true); 1727 } 1728 1729 /* RTC on am3 and 4 needs to be unlocked and locked for sysconfig */ 1730 static void sysc_quirk_rtc(struct sysc *ddata, bool lock) 1731 { 1732 u32 val, kick0_val = 0, kick1_val = 0; 1733 unsigned long flags; 1734 int error; 1735 1736 if (!lock) { 1737 kick0_val = 0x83e70b13; 1738 kick1_val = 0x95a4f1e0; 1739 } 1740 1741 local_irq_save(flags); 1742 /* RTC_STATUS BUSY bit may stay active for 1/32768 seconds (~30 usec) */ 1743 error = readl_poll_timeout_atomic(ddata->module_va + 0x44, val, 1744 !(val & BIT(0)), 100, 50); 1745 if (error) 1746 dev_warn(ddata->dev, "rtc busy timeout\n"); 1747 /* Now we have ~15 microseconds to read/write various registers */ 1748 sysc_write(ddata, 0x6c, kick0_val); 1749 sysc_write(ddata, 0x70, kick1_val); 1750 local_irq_restore(flags); 1751 } 1752 1753 static void sysc_module_unlock_quirk_rtc(struct sysc *ddata) 1754 { 1755 sysc_quirk_rtc(ddata, false); 1756 } 1757 1758 static void sysc_module_lock_quirk_rtc(struct sysc *ddata) 1759 { 1760 sysc_quirk_rtc(ddata, true); 1761 } 1762 1763 /* 36xx SGX needs a quirk for to bypass OCP IPG interrupt logic */ 1764 static void sysc_module_enable_quirk_sgx(struct sysc *ddata) 1765 { 1766 int offset = 0xff08; /* OCP_DEBUG_CONFIG */ 1767 u32 val = BIT(31); /* THALIA_INT_BYPASS */ 1768 1769 sysc_write(ddata, offset, val); 1770 } 1771 1772 /* Watchdog timer needs a disable sequence after reset */ 1773 static void sysc_reset_done_quirk_wdt(struct sysc *ddata) 1774 { 1775 int wps, spr, error; 1776 u32 val; 1777 1778 wps = 0x34; 1779 spr = 0x48; 1780 1781 sysc_write(ddata, spr, 0xaaaa); 1782 error = readl_poll_timeout(ddata->module_va + wps, val, 1783 !(val & 0x10), 100, 1784 MAX_MODULE_SOFTRESET_WAIT); 1785 if (error) 1786 dev_warn(ddata->dev, "wdt disable step1 failed\n"); 1787 1788 sysc_write(ddata, spr, 0x5555); 1789 error = readl_poll_timeout(ddata->module_va + wps, val, 1790 !(val & 0x10), 100, 1791 MAX_MODULE_SOFTRESET_WAIT); 1792 if (error) 1793 dev_warn(ddata->dev, "wdt disable step2 failed\n"); 1794 } 1795 1796 /* PRUSS needs to set MSTANDBY_INIT inorder to idle properly */ 1797 static void sysc_module_disable_quirk_pruss(struct sysc *ddata) 1798 { 1799 u32 reg; 1800 1801 reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]); 1802 reg |= SYSC_PRUSS_STANDBY_INIT; 1803 sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg); 1804 } 1805 1806 static void sysc_init_module_quirks(struct sysc *ddata) 1807 { 1808 if (ddata->legacy_mode || !ddata->name) 1809 return; 1810 1811 if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_HDQ1W) { 1812 ddata->pre_reset_quirk = sysc_pre_reset_quirk_hdq1w; 1813 1814 return; 1815 } 1816 1817 if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_I2C) { 1818 ddata->pre_reset_quirk = sysc_pre_reset_quirk_i2c; 1819 ddata->post_reset_quirk = sysc_post_reset_quirk_i2c; 1820 1821 return; 1822 } 1823 1824 if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_AESS) 1825 ddata->module_enable_quirk = sysc_module_enable_quirk_aess; 1826 1827 if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_DSS_RESET) 1828 ddata->pre_reset_quirk = sysc_pre_reset_quirk_dss; 1829 1830 if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_RTC_UNLOCK) { 1831 ddata->module_unlock_quirk = sysc_module_unlock_quirk_rtc; 1832 ddata->module_lock_quirk = sysc_module_lock_quirk_rtc; 1833 1834 return; 1835 } 1836 1837 if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_SGX) 1838 ddata->module_enable_quirk = sysc_module_enable_quirk_sgx; 1839 1840 if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_WDT) { 1841 ddata->reset_done_quirk = sysc_reset_done_quirk_wdt; 1842 ddata->module_disable_quirk = sysc_reset_done_quirk_wdt; 1843 } 1844 1845 if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_PRUSS) 1846 ddata->module_disable_quirk = sysc_module_disable_quirk_pruss; 1847 } 1848 1849 static int sysc_clockdomain_init(struct sysc *ddata) 1850 { 1851 struct ti_sysc_platform_data *pdata = dev_get_platdata(ddata->dev); 1852 struct clk *fck = NULL, *ick = NULL; 1853 int error; 1854 1855 if (!pdata || !pdata->init_clockdomain) 1856 return 0; 1857 1858 switch (ddata->nr_clocks) { 1859 case 2: 1860 ick = ddata->clocks[SYSC_ICK]; 1861 fallthrough; 1862 case 1: 1863 fck = ddata->clocks[SYSC_FCK]; 1864 break; 1865 case 0: 1866 return 0; 1867 } 1868 1869 error = pdata->init_clockdomain(ddata->dev, fck, ick, &ddata->cookie); 1870 if (!error || error == -ENODEV) 1871 return 0; 1872 1873 return error; 1874 } 1875 1876 /* 1877 * Note that pdata->init_module() typically does a reset first. After 1878 * pdata->init_module() is done, PM runtime can be used for the interconnect 1879 * target module. 1880 */ 1881 static int sysc_legacy_init(struct sysc *ddata) 1882 { 1883 struct ti_sysc_platform_data *pdata = dev_get_platdata(ddata->dev); 1884 int error; 1885 1886 if (!pdata || !pdata->init_module) 1887 return 0; 1888 1889 error = pdata->init_module(ddata->dev, ddata->mdata, &ddata->cookie); 1890 if (error == -EEXIST) 1891 error = 0; 1892 1893 return error; 1894 } 1895 1896 /* 1897 * Note that the caller must ensure the interconnect target module is enabled 1898 * before calling reset. Otherwise reset will not complete. 1899 */ 1900 static int sysc_reset(struct sysc *ddata) 1901 { 1902 int sysc_offset, sysc_val, error; 1903 u32 sysc_mask; 1904 1905 sysc_offset = ddata->offsets[SYSC_SYSCONFIG]; 1906 1907 if (ddata->legacy_mode || 1908 ddata->cap->regbits->srst_shift < 0 || 1909 ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT) 1910 return 0; 1911 1912 sysc_mask = BIT(ddata->cap->regbits->srst_shift); 1913 1914 if (ddata->pre_reset_quirk) 1915 ddata->pre_reset_quirk(ddata); 1916 1917 if (sysc_offset >= 0) { 1918 sysc_val = sysc_read_sysconfig(ddata); 1919 sysc_val |= sysc_mask; 1920 sysc_write(ddata, sysc_offset, sysc_val); 1921 } 1922 1923 if (ddata->cfg.srst_udelay) 1924 usleep_range(ddata->cfg.srst_udelay, 1925 ddata->cfg.srst_udelay * 2); 1926 1927 if (ddata->post_reset_quirk) 1928 ddata->post_reset_quirk(ddata); 1929 1930 error = sysc_wait_softreset(ddata); 1931 if (error) 1932 dev_warn(ddata->dev, "OCP softreset timed out\n"); 1933 1934 if (ddata->reset_done_quirk) 1935 ddata->reset_done_quirk(ddata); 1936 1937 return error; 1938 } 1939 1940 /* 1941 * At this point the module is configured enough to read the revision but 1942 * module may not be completely configured yet to use PM runtime. Enable 1943 * all clocks directly during init to configure the quirks needed for PM 1944 * runtime based on the revision register. 1945 */ 1946 static int sysc_init_module(struct sysc *ddata) 1947 { 1948 int error = 0; 1949 1950 error = sysc_clockdomain_init(ddata); 1951 if (error) 1952 return error; 1953 1954 sysc_clkdm_deny_idle(ddata); 1955 1956 /* 1957 * Always enable clocks. The bootloader may or may not have enabled 1958 * the related clocks. 1959 */ 1960 error = sysc_enable_opt_clocks(ddata); 1961 if (error) 1962 return error; 1963 1964 error = sysc_enable_main_clocks(ddata); 1965 if (error) 1966 goto err_opt_clocks; 1967 1968 if (!(ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT)) { 1969 error = reset_control_deassert(ddata->rsts); 1970 if (error) 1971 goto err_main_clocks; 1972 } 1973 1974 ddata->revision = sysc_read_revision(ddata); 1975 sysc_init_revision_quirks(ddata); 1976 sysc_init_module_quirks(ddata); 1977 1978 if (ddata->legacy_mode) { 1979 error = sysc_legacy_init(ddata); 1980 if (error) 1981 goto err_reset; 1982 } 1983 1984 if (!ddata->legacy_mode) { 1985 error = sysc_enable_module(ddata->dev); 1986 if (error) 1987 goto err_reset; 1988 } 1989 1990 error = sysc_reset(ddata); 1991 if (error) 1992 dev_err(ddata->dev, "Reset failed with %d\n", error); 1993 1994 if (error && !ddata->legacy_mode) 1995 sysc_disable_module(ddata->dev); 1996 1997 err_reset: 1998 if (error && !(ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT)) 1999 reset_control_assert(ddata->rsts); 2000 2001 err_main_clocks: 2002 if (error) 2003 sysc_disable_main_clocks(ddata); 2004 err_opt_clocks: 2005 /* No re-enable of clockdomain autoidle to prevent module autoidle */ 2006 if (error) { 2007 sysc_disable_opt_clocks(ddata); 2008 sysc_clkdm_allow_idle(ddata); 2009 } 2010 2011 return error; 2012 } 2013 2014 static int sysc_init_sysc_mask(struct sysc *ddata) 2015 { 2016 struct device_node *np = ddata->dev->of_node; 2017 int error; 2018 u32 val; 2019 2020 error = of_property_read_u32(np, "ti,sysc-mask", &val); 2021 if (error) 2022 return 0; 2023 2024 ddata->cfg.sysc_val = val & ddata->cap->sysc_mask; 2025 2026 return 0; 2027 } 2028 2029 static int sysc_init_idlemode(struct sysc *ddata, u8 *idlemodes, 2030 const char *name) 2031 { 2032 struct device_node *np = ddata->dev->of_node; 2033 struct property *prop; 2034 const __be32 *p; 2035 u32 val; 2036 2037 of_property_for_each_u32(np, name, prop, p, val) { 2038 if (val >= SYSC_NR_IDLEMODES) { 2039 dev_err(ddata->dev, "invalid idlemode: %i\n", val); 2040 return -EINVAL; 2041 } 2042 *idlemodes |= (1 << val); 2043 } 2044 2045 return 0; 2046 } 2047 2048 static int sysc_init_idlemodes(struct sysc *ddata) 2049 { 2050 int error; 2051 2052 error = sysc_init_idlemode(ddata, &ddata->cfg.midlemodes, 2053 "ti,sysc-midle"); 2054 if (error) 2055 return error; 2056 2057 error = sysc_init_idlemode(ddata, &ddata->cfg.sidlemodes, 2058 "ti,sysc-sidle"); 2059 if (error) 2060 return error; 2061 2062 return 0; 2063 } 2064 2065 /* 2066 * Only some devices on omap4 and later have SYSCONFIG reset done 2067 * bit. We can detect this if there is no SYSSTATUS at all, or the 2068 * SYSTATUS bit 0 is not used. Note that some SYSSTATUS registers 2069 * have multiple bits for the child devices like OHCI and EHCI. 2070 * Depends on SYSC being parsed first. 2071 */ 2072 static int sysc_init_syss_mask(struct sysc *ddata) 2073 { 2074 struct device_node *np = ddata->dev->of_node; 2075 int error; 2076 u32 val; 2077 2078 error = of_property_read_u32(np, "ti,syss-mask", &val); 2079 if (error) { 2080 if ((ddata->cap->type == TI_SYSC_OMAP4 || 2081 ddata->cap->type == TI_SYSC_OMAP4_TIMER) && 2082 (ddata->cfg.sysc_val & SYSC_OMAP4_SOFTRESET)) 2083 ddata->cfg.quirks |= SYSC_QUIRK_RESET_STATUS; 2084 2085 return 0; 2086 } 2087 2088 if (!(val & 1) && (ddata->cfg.sysc_val & SYSC_OMAP4_SOFTRESET)) 2089 ddata->cfg.quirks |= SYSC_QUIRK_RESET_STATUS; 2090 2091 ddata->cfg.syss_mask = val; 2092 2093 return 0; 2094 } 2095 2096 /* 2097 * Many child device drivers need to have fck and opt clocks available 2098 * to get the clock rate for device internal configuration etc. 2099 */ 2100 static int sysc_child_add_named_clock(struct sysc *ddata, 2101 struct device *child, 2102 const char *name) 2103 { 2104 struct clk *clk; 2105 struct clk_lookup *l; 2106 int error = 0; 2107 2108 if (!name) 2109 return 0; 2110 2111 clk = clk_get(child, name); 2112 if (!IS_ERR(clk)) { 2113 error = -EEXIST; 2114 goto put_clk; 2115 } 2116 2117 clk = clk_get(ddata->dev, name); 2118 if (IS_ERR(clk)) 2119 return -ENODEV; 2120 2121 l = clkdev_create(clk, name, dev_name(child)); 2122 if (!l) 2123 error = -ENOMEM; 2124 put_clk: 2125 clk_put(clk); 2126 2127 return error; 2128 } 2129 2130 static int sysc_child_add_clocks(struct sysc *ddata, 2131 struct device *child) 2132 { 2133 int i, error; 2134 2135 for (i = 0; i < ddata->nr_clocks; i++) { 2136 error = sysc_child_add_named_clock(ddata, 2137 child, 2138 ddata->clock_roles[i]); 2139 if (error && error != -EEXIST) { 2140 dev_err(ddata->dev, "could not add child clock %s: %i\n", 2141 ddata->clock_roles[i], error); 2142 2143 return error; 2144 } 2145 } 2146 2147 return 0; 2148 } 2149 2150 static struct device_type sysc_device_type = { 2151 }; 2152 2153 static struct sysc *sysc_child_to_parent(struct device *dev) 2154 { 2155 struct device *parent = dev->parent; 2156 2157 if (!parent || parent->type != &sysc_device_type) 2158 return NULL; 2159 2160 return dev_get_drvdata(parent); 2161 } 2162 2163 static int __maybe_unused sysc_child_runtime_suspend(struct device *dev) 2164 { 2165 struct sysc *ddata; 2166 int error; 2167 2168 ddata = sysc_child_to_parent(dev); 2169 2170 error = pm_generic_runtime_suspend(dev); 2171 if (error) 2172 return error; 2173 2174 if (!ddata->enabled) 2175 return 0; 2176 2177 return sysc_runtime_suspend(ddata->dev); 2178 } 2179 2180 static int __maybe_unused sysc_child_runtime_resume(struct device *dev) 2181 { 2182 struct sysc *ddata; 2183 int error; 2184 2185 ddata = sysc_child_to_parent(dev); 2186 2187 if (!ddata->enabled) { 2188 error = sysc_runtime_resume(ddata->dev); 2189 if (error < 0) 2190 dev_err(ddata->dev, 2191 "%s error: %i\n", __func__, error); 2192 } 2193 2194 return pm_generic_runtime_resume(dev); 2195 } 2196 2197 #ifdef CONFIG_PM_SLEEP 2198 static int sysc_child_suspend_noirq(struct device *dev) 2199 { 2200 struct sysc *ddata; 2201 int error; 2202 2203 ddata = sysc_child_to_parent(dev); 2204 2205 dev_dbg(ddata->dev, "%s %s\n", __func__, 2206 ddata->name ? ddata->name : ""); 2207 2208 error = pm_generic_suspend_noirq(dev); 2209 if (error) { 2210 dev_err(dev, "%s error at %i: %i\n", 2211 __func__, __LINE__, error); 2212 2213 return error; 2214 } 2215 2216 if (!pm_runtime_status_suspended(dev)) { 2217 error = pm_generic_runtime_suspend(dev); 2218 if (error) { 2219 dev_dbg(dev, "%s busy at %i: %i\n", 2220 __func__, __LINE__, error); 2221 2222 return 0; 2223 } 2224 2225 error = sysc_runtime_suspend(ddata->dev); 2226 if (error) { 2227 dev_err(dev, "%s error at %i: %i\n", 2228 __func__, __LINE__, error); 2229 2230 return error; 2231 } 2232 2233 ddata->child_needs_resume = true; 2234 } 2235 2236 return 0; 2237 } 2238 2239 static int sysc_child_resume_noirq(struct device *dev) 2240 { 2241 struct sysc *ddata; 2242 int error; 2243 2244 ddata = sysc_child_to_parent(dev); 2245 2246 dev_dbg(ddata->dev, "%s %s\n", __func__, 2247 ddata->name ? ddata->name : ""); 2248 2249 if (ddata->child_needs_resume) { 2250 ddata->child_needs_resume = false; 2251 2252 error = sysc_runtime_resume(ddata->dev); 2253 if (error) 2254 dev_err(ddata->dev, 2255 "%s runtime resume error: %i\n", 2256 __func__, error); 2257 2258 error = pm_generic_runtime_resume(dev); 2259 if (error) 2260 dev_err(ddata->dev, 2261 "%s generic runtime resume: %i\n", 2262 __func__, error); 2263 } 2264 2265 return pm_generic_resume_noirq(dev); 2266 } 2267 #endif 2268 2269 static struct dev_pm_domain sysc_child_pm_domain = { 2270 .ops = { 2271 SET_RUNTIME_PM_OPS(sysc_child_runtime_suspend, 2272 sysc_child_runtime_resume, 2273 NULL) 2274 USE_PLATFORM_PM_SLEEP_OPS 2275 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(sysc_child_suspend_noirq, 2276 sysc_child_resume_noirq) 2277 } 2278 }; 2279 2280 /** 2281 * sysc_legacy_idle_quirk - handle children in omap_device compatible way 2282 * @ddata: device driver data 2283 * @child: child device driver 2284 * 2285 * Allow idle for child devices as done with _od_runtime_suspend(). 2286 * Otherwise many child devices will not idle because of the permanent 2287 * parent usecount set in pm_runtime_irq_safe(). 2288 * 2289 * Note that the long term solution is to just modify the child device 2290 * drivers to not set pm_runtime_irq_safe() and then this can be just 2291 * dropped. 2292 */ 2293 static void sysc_legacy_idle_quirk(struct sysc *ddata, struct device *child) 2294 { 2295 if (ddata->cfg.quirks & SYSC_QUIRK_LEGACY_IDLE) 2296 dev_pm_domain_set(child, &sysc_child_pm_domain); 2297 } 2298 2299 static int sysc_notifier_call(struct notifier_block *nb, 2300 unsigned long event, void *device) 2301 { 2302 struct device *dev = device; 2303 struct sysc *ddata; 2304 int error; 2305 2306 ddata = sysc_child_to_parent(dev); 2307 if (!ddata) 2308 return NOTIFY_DONE; 2309 2310 switch (event) { 2311 case BUS_NOTIFY_ADD_DEVICE: 2312 error = sysc_child_add_clocks(ddata, dev); 2313 if (error) 2314 return error; 2315 sysc_legacy_idle_quirk(ddata, dev); 2316 break; 2317 default: 2318 break; 2319 } 2320 2321 return NOTIFY_DONE; 2322 } 2323 2324 static struct notifier_block sysc_nb = { 2325 .notifier_call = sysc_notifier_call, 2326 }; 2327 2328 /* Device tree configured quirks */ 2329 struct sysc_dts_quirk { 2330 const char *name; 2331 u32 mask; 2332 }; 2333 2334 static const struct sysc_dts_quirk sysc_dts_quirks[] = { 2335 { .name = "ti,no-idle-on-init", 2336 .mask = SYSC_QUIRK_NO_IDLE_ON_INIT, }, 2337 { .name = "ti,no-reset-on-init", 2338 .mask = SYSC_QUIRK_NO_RESET_ON_INIT, }, 2339 { .name = "ti,no-idle", 2340 .mask = SYSC_QUIRK_NO_IDLE, }, 2341 }; 2342 2343 static void sysc_parse_dts_quirks(struct sysc *ddata, struct device_node *np, 2344 bool is_child) 2345 { 2346 const struct property *prop; 2347 int i, len; 2348 2349 for (i = 0; i < ARRAY_SIZE(sysc_dts_quirks); i++) { 2350 const char *name = sysc_dts_quirks[i].name; 2351 2352 prop = of_get_property(np, name, &len); 2353 if (!prop) 2354 continue; 2355 2356 ddata->cfg.quirks |= sysc_dts_quirks[i].mask; 2357 if (is_child) { 2358 dev_warn(ddata->dev, 2359 "dts flag should be at module level for %s\n", 2360 name); 2361 } 2362 } 2363 } 2364 2365 static int sysc_init_dts_quirks(struct sysc *ddata) 2366 { 2367 struct device_node *np = ddata->dev->of_node; 2368 int error; 2369 u32 val; 2370 2371 ddata->legacy_mode = of_get_property(np, "ti,hwmods", NULL); 2372 2373 sysc_parse_dts_quirks(ddata, np, false); 2374 error = of_property_read_u32(np, "ti,sysc-delay-us", &val); 2375 if (!error) { 2376 if (val > 255) { 2377 dev_warn(ddata->dev, "bad ti,sysc-delay-us: %i\n", 2378 val); 2379 } 2380 2381 ddata->cfg.srst_udelay = (u8)val; 2382 } 2383 2384 return 0; 2385 } 2386 2387 static void sysc_unprepare(struct sysc *ddata) 2388 { 2389 int i; 2390 2391 if (!ddata->clocks) 2392 return; 2393 2394 for (i = 0; i < SYSC_MAX_CLOCKS; i++) { 2395 if (!IS_ERR_OR_NULL(ddata->clocks[i])) 2396 clk_unprepare(ddata->clocks[i]); 2397 } 2398 } 2399 2400 /* 2401 * Common sysc register bits found on omap2, also known as type1 2402 */ 2403 static const struct sysc_regbits sysc_regbits_omap2 = { 2404 .dmadisable_shift = -ENODEV, 2405 .midle_shift = 12, 2406 .sidle_shift = 3, 2407 .clkact_shift = 8, 2408 .emufree_shift = 5, 2409 .enwkup_shift = 2, 2410 .srst_shift = 1, 2411 .autoidle_shift = 0, 2412 }; 2413 2414 static const struct sysc_capabilities sysc_omap2 = { 2415 .type = TI_SYSC_OMAP2, 2416 .sysc_mask = SYSC_OMAP2_CLOCKACTIVITY | SYSC_OMAP2_EMUFREE | 2417 SYSC_OMAP2_ENAWAKEUP | SYSC_OMAP2_SOFTRESET | 2418 SYSC_OMAP2_AUTOIDLE, 2419 .regbits = &sysc_regbits_omap2, 2420 }; 2421 2422 /* All omap2 and 3 timers, and timers 1, 2 & 10 on omap 4 and 5 */ 2423 static const struct sysc_capabilities sysc_omap2_timer = { 2424 .type = TI_SYSC_OMAP2_TIMER, 2425 .sysc_mask = SYSC_OMAP2_CLOCKACTIVITY | SYSC_OMAP2_EMUFREE | 2426 SYSC_OMAP2_ENAWAKEUP | SYSC_OMAP2_SOFTRESET | 2427 SYSC_OMAP2_AUTOIDLE, 2428 .regbits = &sysc_regbits_omap2, 2429 .mod_quirks = SYSC_QUIRK_USE_CLOCKACT, 2430 }; 2431 2432 /* 2433 * SHAM2 (SHA1/MD5) sysc found on omap3, a variant of sysc_regbits_omap2 2434 * with different sidle position 2435 */ 2436 static const struct sysc_regbits sysc_regbits_omap3_sham = { 2437 .dmadisable_shift = -ENODEV, 2438 .midle_shift = -ENODEV, 2439 .sidle_shift = 4, 2440 .clkact_shift = -ENODEV, 2441 .enwkup_shift = -ENODEV, 2442 .srst_shift = 1, 2443 .autoidle_shift = 0, 2444 .emufree_shift = -ENODEV, 2445 }; 2446 2447 static const struct sysc_capabilities sysc_omap3_sham = { 2448 .type = TI_SYSC_OMAP3_SHAM, 2449 .sysc_mask = SYSC_OMAP2_SOFTRESET | SYSC_OMAP2_AUTOIDLE, 2450 .regbits = &sysc_regbits_omap3_sham, 2451 }; 2452 2453 /* 2454 * AES register bits found on omap3 and later, a variant of 2455 * sysc_regbits_omap2 with different sidle position 2456 */ 2457 static const struct sysc_regbits sysc_regbits_omap3_aes = { 2458 .dmadisable_shift = -ENODEV, 2459 .midle_shift = -ENODEV, 2460 .sidle_shift = 6, 2461 .clkact_shift = -ENODEV, 2462 .enwkup_shift = -ENODEV, 2463 .srst_shift = 1, 2464 .autoidle_shift = 0, 2465 .emufree_shift = -ENODEV, 2466 }; 2467 2468 static const struct sysc_capabilities sysc_omap3_aes = { 2469 .type = TI_SYSC_OMAP3_AES, 2470 .sysc_mask = SYSC_OMAP2_SOFTRESET | SYSC_OMAP2_AUTOIDLE, 2471 .regbits = &sysc_regbits_omap3_aes, 2472 }; 2473 2474 /* 2475 * Common sysc register bits found on omap4, also known as type2 2476 */ 2477 static const struct sysc_regbits sysc_regbits_omap4 = { 2478 .dmadisable_shift = 16, 2479 .midle_shift = 4, 2480 .sidle_shift = 2, 2481 .clkact_shift = -ENODEV, 2482 .enwkup_shift = -ENODEV, 2483 .emufree_shift = 1, 2484 .srst_shift = 0, 2485 .autoidle_shift = -ENODEV, 2486 }; 2487 2488 static const struct sysc_capabilities sysc_omap4 = { 2489 .type = TI_SYSC_OMAP4, 2490 .sysc_mask = SYSC_OMAP4_DMADISABLE | SYSC_OMAP4_FREEEMU | 2491 SYSC_OMAP4_SOFTRESET, 2492 .regbits = &sysc_regbits_omap4, 2493 }; 2494 2495 static const struct sysc_capabilities sysc_omap4_timer = { 2496 .type = TI_SYSC_OMAP4_TIMER, 2497 .sysc_mask = SYSC_OMAP4_DMADISABLE | SYSC_OMAP4_FREEEMU | 2498 SYSC_OMAP4_SOFTRESET, 2499 .regbits = &sysc_regbits_omap4, 2500 }; 2501 2502 /* 2503 * Common sysc register bits found on omap4, also known as type3 2504 */ 2505 static const struct sysc_regbits sysc_regbits_omap4_simple = { 2506 .dmadisable_shift = -ENODEV, 2507 .midle_shift = 2, 2508 .sidle_shift = 0, 2509 .clkact_shift = -ENODEV, 2510 .enwkup_shift = -ENODEV, 2511 .srst_shift = -ENODEV, 2512 .emufree_shift = -ENODEV, 2513 .autoidle_shift = -ENODEV, 2514 }; 2515 2516 static const struct sysc_capabilities sysc_omap4_simple = { 2517 .type = TI_SYSC_OMAP4_SIMPLE, 2518 .regbits = &sysc_regbits_omap4_simple, 2519 }; 2520 2521 /* 2522 * SmartReflex sysc found on omap34xx 2523 */ 2524 static const struct sysc_regbits sysc_regbits_omap34xx_sr = { 2525 .dmadisable_shift = -ENODEV, 2526 .midle_shift = -ENODEV, 2527 .sidle_shift = -ENODEV, 2528 .clkact_shift = 20, 2529 .enwkup_shift = -ENODEV, 2530 .srst_shift = -ENODEV, 2531 .emufree_shift = -ENODEV, 2532 .autoidle_shift = -ENODEV, 2533 }; 2534 2535 static const struct sysc_capabilities sysc_34xx_sr = { 2536 .type = TI_SYSC_OMAP34XX_SR, 2537 .sysc_mask = SYSC_OMAP2_CLOCKACTIVITY, 2538 .regbits = &sysc_regbits_omap34xx_sr, 2539 .mod_quirks = SYSC_QUIRK_USE_CLOCKACT | SYSC_QUIRK_UNCACHED | 2540 SYSC_QUIRK_LEGACY_IDLE, 2541 }; 2542 2543 /* 2544 * SmartReflex sysc found on omap36xx and later 2545 */ 2546 static const struct sysc_regbits sysc_regbits_omap36xx_sr = { 2547 .dmadisable_shift = -ENODEV, 2548 .midle_shift = -ENODEV, 2549 .sidle_shift = 24, 2550 .clkact_shift = -ENODEV, 2551 .enwkup_shift = 26, 2552 .srst_shift = -ENODEV, 2553 .emufree_shift = -ENODEV, 2554 .autoidle_shift = -ENODEV, 2555 }; 2556 2557 static const struct sysc_capabilities sysc_36xx_sr = { 2558 .type = TI_SYSC_OMAP36XX_SR, 2559 .sysc_mask = SYSC_OMAP3_SR_ENAWAKEUP, 2560 .regbits = &sysc_regbits_omap36xx_sr, 2561 .mod_quirks = SYSC_QUIRK_UNCACHED | SYSC_QUIRK_LEGACY_IDLE, 2562 }; 2563 2564 static const struct sysc_capabilities sysc_omap4_sr = { 2565 .type = TI_SYSC_OMAP4_SR, 2566 .regbits = &sysc_regbits_omap36xx_sr, 2567 .mod_quirks = SYSC_QUIRK_LEGACY_IDLE, 2568 }; 2569 2570 /* 2571 * McASP register bits found on omap4 and later 2572 */ 2573 static const struct sysc_regbits sysc_regbits_omap4_mcasp = { 2574 .dmadisable_shift = -ENODEV, 2575 .midle_shift = -ENODEV, 2576 .sidle_shift = 0, 2577 .clkact_shift = -ENODEV, 2578 .enwkup_shift = -ENODEV, 2579 .srst_shift = -ENODEV, 2580 .emufree_shift = -ENODEV, 2581 .autoidle_shift = -ENODEV, 2582 }; 2583 2584 static const struct sysc_capabilities sysc_omap4_mcasp = { 2585 .type = TI_SYSC_OMAP4_MCASP, 2586 .regbits = &sysc_regbits_omap4_mcasp, 2587 .mod_quirks = SYSC_QUIRK_OPT_CLKS_NEEDED, 2588 }; 2589 2590 /* 2591 * McASP found on dra7 and later 2592 */ 2593 static const struct sysc_capabilities sysc_dra7_mcasp = { 2594 .type = TI_SYSC_OMAP4_SIMPLE, 2595 .regbits = &sysc_regbits_omap4_simple, 2596 .mod_quirks = SYSC_QUIRK_OPT_CLKS_NEEDED, 2597 }; 2598 2599 /* 2600 * FS USB host found on omap4 and later 2601 */ 2602 static const struct sysc_regbits sysc_regbits_omap4_usb_host_fs = { 2603 .dmadisable_shift = -ENODEV, 2604 .midle_shift = -ENODEV, 2605 .sidle_shift = 24, 2606 .clkact_shift = -ENODEV, 2607 .enwkup_shift = 26, 2608 .srst_shift = -ENODEV, 2609 .emufree_shift = -ENODEV, 2610 .autoidle_shift = -ENODEV, 2611 }; 2612 2613 static const struct sysc_capabilities sysc_omap4_usb_host_fs = { 2614 .type = TI_SYSC_OMAP4_USB_HOST_FS, 2615 .sysc_mask = SYSC_OMAP2_ENAWAKEUP, 2616 .regbits = &sysc_regbits_omap4_usb_host_fs, 2617 }; 2618 2619 static const struct sysc_regbits sysc_regbits_dra7_mcan = { 2620 .dmadisable_shift = -ENODEV, 2621 .midle_shift = -ENODEV, 2622 .sidle_shift = -ENODEV, 2623 .clkact_shift = -ENODEV, 2624 .enwkup_shift = 4, 2625 .srst_shift = 0, 2626 .emufree_shift = -ENODEV, 2627 .autoidle_shift = -ENODEV, 2628 }; 2629 2630 static const struct sysc_capabilities sysc_dra7_mcan = { 2631 .type = TI_SYSC_DRA7_MCAN, 2632 .sysc_mask = SYSC_DRA7_MCAN_ENAWAKEUP | SYSC_OMAP4_SOFTRESET, 2633 .regbits = &sysc_regbits_dra7_mcan, 2634 .mod_quirks = SYSS_QUIRK_RESETDONE_INVERTED, 2635 }; 2636 2637 /* 2638 * PRUSS found on some AM33xx, AM437x and AM57xx SoCs 2639 */ 2640 static const struct sysc_capabilities sysc_pruss = { 2641 .type = TI_SYSC_PRUSS, 2642 .sysc_mask = SYSC_PRUSS_STANDBY_INIT | SYSC_PRUSS_SUB_MWAIT, 2643 .regbits = &sysc_regbits_omap4_simple, 2644 .mod_quirks = SYSC_MODULE_QUIRK_PRUSS, 2645 }; 2646 2647 static int sysc_init_pdata(struct sysc *ddata) 2648 { 2649 struct ti_sysc_platform_data *pdata = dev_get_platdata(ddata->dev); 2650 struct ti_sysc_module_data *mdata; 2651 2652 if (!pdata) 2653 return 0; 2654 2655 mdata = devm_kzalloc(ddata->dev, sizeof(*mdata), GFP_KERNEL); 2656 if (!mdata) 2657 return -ENOMEM; 2658 2659 if (ddata->legacy_mode) { 2660 mdata->name = ddata->legacy_mode; 2661 mdata->module_pa = ddata->module_pa; 2662 mdata->module_size = ddata->module_size; 2663 mdata->offsets = ddata->offsets; 2664 mdata->nr_offsets = SYSC_MAX_REGS; 2665 mdata->cap = ddata->cap; 2666 mdata->cfg = &ddata->cfg; 2667 } 2668 2669 ddata->mdata = mdata; 2670 2671 return 0; 2672 } 2673 2674 static int sysc_init_match(struct sysc *ddata) 2675 { 2676 const struct sysc_capabilities *cap; 2677 2678 cap = of_device_get_match_data(ddata->dev); 2679 if (!cap) 2680 return -EINVAL; 2681 2682 ddata->cap = cap; 2683 if (ddata->cap) 2684 ddata->cfg.quirks |= ddata->cap->mod_quirks; 2685 2686 return 0; 2687 } 2688 2689 static void ti_sysc_idle(struct work_struct *work) 2690 { 2691 struct sysc *ddata; 2692 2693 ddata = container_of(work, struct sysc, idle_work.work); 2694 2695 /* 2696 * One time decrement of clock usage counts if left on from init. 2697 * Note that we disable opt clocks unconditionally in this case 2698 * as they are enabled unconditionally during init without 2699 * considering sysc_opt_clks_needed() at that point. 2700 */ 2701 if (ddata->cfg.quirks & (SYSC_QUIRK_NO_IDLE | 2702 SYSC_QUIRK_NO_IDLE_ON_INIT)) { 2703 sysc_disable_main_clocks(ddata); 2704 sysc_disable_opt_clocks(ddata); 2705 sysc_clkdm_allow_idle(ddata); 2706 } 2707 2708 /* Keep permanent PM runtime usage count for SYSC_QUIRK_NO_IDLE */ 2709 if (ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE) 2710 return; 2711 2712 /* 2713 * Decrement PM runtime usage count for SYSC_QUIRK_NO_IDLE_ON_INIT 2714 * and SYSC_QUIRK_NO_RESET_ON_INIT 2715 */ 2716 if (pm_runtime_active(ddata->dev)) 2717 pm_runtime_put_sync(ddata->dev); 2718 } 2719 2720 /* 2721 * SoC model and features detection. Only needed for SoCs that need 2722 * special handling for quirks, no need to list others. 2723 */ 2724 static const struct soc_device_attribute sysc_soc_match[] = { 2725 SOC_FLAG("OMAP242*", SOC_2420), 2726 SOC_FLAG("OMAP243*", SOC_2430), 2727 SOC_FLAG("OMAP3[45]*", SOC_3430), 2728 SOC_FLAG("OMAP3[67]*", SOC_3630), 2729 SOC_FLAG("OMAP443*", SOC_4430), 2730 SOC_FLAG("OMAP446*", SOC_4460), 2731 SOC_FLAG("OMAP447*", SOC_4470), 2732 SOC_FLAG("OMAP54*", SOC_5430), 2733 SOC_FLAG("AM433", SOC_AM3), 2734 SOC_FLAG("AM43*", SOC_AM4), 2735 SOC_FLAG("DRA7*", SOC_DRA7), 2736 2737 { /* sentinel */ }, 2738 }; 2739 2740 /* 2741 * List of SoCs variants with disabled features. By default we assume all 2742 * devices in the device tree are available so no need to list those SoCs. 2743 */ 2744 static const struct soc_device_attribute sysc_soc_feat_match[] = { 2745 /* OMAP3430/3530 and AM3517 variants with some accelerators disabled */ 2746 SOC_FLAG("AM3505", DIS_SGX), 2747 SOC_FLAG("OMAP3525", DIS_SGX), 2748 SOC_FLAG("OMAP3515", DIS_IVA | DIS_SGX), 2749 SOC_FLAG("OMAP3503", DIS_ISP | DIS_IVA | DIS_SGX), 2750 2751 /* OMAP3630/DM3730 variants with some accelerators disabled */ 2752 SOC_FLAG("AM3703", DIS_IVA | DIS_SGX), 2753 SOC_FLAG("DM3725", DIS_SGX), 2754 SOC_FLAG("OMAP3611", DIS_ISP | DIS_IVA | DIS_SGX), 2755 SOC_FLAG("OMAP3615/AM3715", DIS_IVA), 2756 SOC_FLAG("OMAP3621", DIS_ISP), 2757 2758 { /* sentinel */ }, 2759 }; 2760 2761 static int sysc_add_disabled(unsigned long base) 2762 { 2763 struct sysc_address *disabled_module; 2764 2765 disabled_module = kzalloc(sizeof(*disabled_module), GFP_KERNEL); 2766 if (!disabled_module) 2767 return -ENOMEM; 2768 2769 disabled_module->base = base; 2770 2771 mutex_lock(&sysc_soc->list_lock); 2772 list_add(&disabled_module->node, &sysc_soc->disabled_modules); 2773 mutex_unlock(&sysc_soc->list_lock); 2774 2775 return 0; 2776 } 2777 2778 /* 2779 * One time init to detect the booted SoC and disable unavailable features. 2780 * Note that we initialize static data shared across all ti-sysc instances 2781 * so ddata is only used for SoC type. This can be called from module_init 2782 * once we no longer need to rely on platform data. 2783 */ 2784 static int sysc_init_soc(struct sysc *ddata) 2785 { 2786 const struct soc_device_attribute *match; 2787 struct ti_sysc_platform_data *pdata; 2788 unsigned long features = 0; 2789 2790 if (sysc_soc) 2791 return 0; 2792 2793 sysc_soc = kzalloc(sizeof(*sysc_soc), GFP_KERNEL); 2794 if (!sysc_soc) 2795 return -ENOMEM; 2796 2797 mutex_init(&sysc_soc->list_lock); 2798 INIT_LIST_HEAD(&sysc_soc->disabled_modules); 2799 sysc_soc->general_purpose = true; 2800 2801 pdata = dev_get_platdata(ddata->dev); 2802 if (pdata && pdata->soc_type_gp) 2803 sysc_soc->general_purpose = pdata->soc_type_gp(); 2804 2805 match = soc_device_match(sysc_soc_match); 2806 if (match && match->data) 2807 sysc_soc->soc = (int)match->data; 2808 2809 /* Ignore devices that are not available on HS and EMU SoCs */ 2810 if (!sysc_soc->general_purpose) { 2811 switch (sysc_soc->soc) { 2812 case SOC_3430 ... SOC_3630: 2813 sysc_add_disabled(0x48304000); /* timer12 */ 2814 break; 2815 default: 2816 break; 2817 }; 2818 } 2819 2820 match = soc_device_match(sysc_soc_feat_match); 2821 if (!match) 2822 return 0; 2823 2824 if (match->data) 2825 features = (unsigned long)match->data; 2826 2827 /* 2828 * Add disabled devices to the list based on the module base. 2829 * Note that this must be done before we attempt to access the 2830 * device and have module revision checks working. 2831 */ 2832 if (features & DIS_ISP) 2833 sysc_add_disabled(0x480bd400); 2834 if (features & DIS_IVA) 2835 sysc_add_disabled(0x5d000000); 2836 if (features & DIS_SGX) 2837 sysc_add_disabled(0x50000000); 2838 2839 return 0; 2840 } 2841 2842 static void sysc_cleanup_soc(void) 2843 { 2844 struct sysc_address *disabled_module; 2845 struct list_head *pos, *tmp; 2846 2847 if (!sysc_soc) 2848 return; 2849 2850 mutex_lock(&sysc_soc->list_lock); 2851 list_for_each_safe(pos, tmp, &sysc_soc->disabled_modules) { 2852 disabled_module = list_entry(pos, struct sysc_address, node); 2853 list_del(pos); 2854 kfree(disabled_module); 2855 } 2856 mutex_unlock(&sysc_soc->list_lock); 2857 } 2858 2859 static int sysc_check_disabled_devices(struct sysc *ddata) 2860 { 2861 struct sysc_address *disabled_module; 2862 struct list_head *pos; 2863 int error = 0; 2864 2865 mutex_lock(&sysc_soc->list_lock); 2866 list_for_each(pos, &sysc_soc->disabled_modules) { 2867 disabled_module = list_entry(pos, struct sysc_address, node); 2868 if (ddata->module_pa == disabled_module->base) { 2869 dev_dbg(ddata->dev, "module disabled for this SoC\n"); 2870 error = -ENODEV; 2871 break; 2872 } 2873 } 2874 mutex_unlock(&sysc_soc->list_lock); 2875 2876 return error; 2877 } 2878 2879 /* 2880 * Ignore timers tagged with no-reset and no-idle. These are likely in use, 2881 * for example by drivers/clocksource/timer-ti-dm-systimer.c. If more checks 2882 * are needed, we could also look at the timer register configuration. 2883 */ 2884 static int sysc_check_active_timer(struct sysc *ddata) 2885 { 2886 if (ddata->cap->type != TI_SYSC_OMAP2_TIMER && 2887 ddata->cap->type != TI_SYSC_OMAP4_TIMER) 2888 return 0; 2889 2890 if ((ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT) && 2891 (ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE)) 2892 return -ENXIO; 2893 2894 return 0; 2895 } 2896 2897 static const struct of_device_id sysc_match_table[] = { 2898 { .compatible = "simple-bus", }, 2899 { /* sentinel */ }, 2900 }; 2901 2902 static int sysc_probe(struct platform_device *pdev) 2903 { 2904 struct ti_sysc_platform_data *pdata = dev_get_platdata(&pdev->dev); 2905 struct sysc *ddata; 2906 int error; 2907 2908 ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL); 2909 if (!ddata) 2910 return -ENOMEM; 2911 2912 ddata->dev = &pdev->dev; 2913 platform_set_drvdata(pdev, ddata); 2914 2915 error = sysc_init_soc(ddata); 2916 if (error) 2917 return error; 2918 2919 error = sysc_init_match(ddata); 2920 if (error) 2921 return error; 2922 2923 error = sysc_init_dts_quirks(ddata); 2924 if (error) 2925 return error; 2926 2927 error = sysc_map_and_check_registers(ddata); 2928 if (error) 2929 return error; 2930 2931 error = sysc_init_sysc_mask(ddata); 2932 if (error) 2933 return error; 2934 2935 error = sysc_init_idlemodes(ddata); 2936 if (error) 2937 return error; 2938 2939 error = sysc_init_syss_mask(ddata); 2940 if (error) 2941 return error; 2942 2943 error = sysc_init_pdata(ddata); 2944 if (error) 2945 return error; 2946 2947 sysc_init_early_quirks(ddata); 2948 2949 error = sysc_check_disabled_devices(ddata); 2950 if (error) 2951 return error; 2952 2953 error = sysc_check_active_timer(ddata); 2954 if (error) 2955 return error; 2956 2957 error = sysc_get_clocks(ddata); 2958 if (error) 2959 return error; 2960 2961 error = sysc_init_resets(ddata); 2962 if (error) 2963 goto unprepare; 2964 2965 error = sysc_init_module(ddata); 2966 if (error) 2967 goto unprepare; 2968 2969 pm_runtime_enable(ddata->dev); 2970 error = pm_runtime_get_sync(ddata->dev); 2971 if (error < 0) { 2972 pm_runtime_put_noidle(ddata->dev); 2973 pm_runtime_disable(ddata->dev); 2974 goto unprepare; 2975 } 2976 2977 /* Balance use counts as PM runtime should have enabled these all */ 2978 if (!(ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT)) 2979 reset_control_assert(ddata->rsts); 2980 2981 if (!(ddata->cfg.quirks & 2982 (SYSC_QUIRK_NO_IDLE | SYSC_QUIRK_NO_IDLE_ON_INIT))) { 2983 sysc_disable_main_clocks(ddata); 2984 sysc_disable_opt_clocks(ddata); 2985 sysc_clkdm_allow_idle(ddata); 2986 } 2987 2988 sysc_show_registers(ddata); 2989 2990 ddata->dev->type = &sysc_device_type; 2991 error = of_platform_populate(ddata->dev->of_node, sysc_match_table, 2992 pdata ? pdata->auxdata : NULL, 2993 ddata->dev); 2994 if (error) 2995 goto err; 2996 2997 INIT_DELAYED_WORK(&ddata->idle_work, ti_sysc_idle); 2998 2999 /* At least earlycon won't survive without deferred idle */ 3000 if (ddata->cfg.quirks & (SYSC_QUIRK_NO_IDLE | 3001 SYSC_QUIRK_NO_IDLE_ON_INIT | 3002 SYSC_QUIRK_NO_RESET_ON_INIT)) { 3003 schedule_delayed_work(&ddata->idle_work, 3000); 3004 } else { 3005 pm_runtime_put(&pdev->dev); 3006 } 3007 3008 return 0; 3009 3010 err: 3011 pm_runtime_put_sync(&pdev->dev); 3012 pm_runtime_disable(&pdev->dev); 3013 unprepare: 3014 sysc_unprepare(ddata); 3015 3016 return error; 3017 } 3018 3019 static int sysc_remove(struct platform_device *pdev) 3020 { 3021 struct sysc *ddata = platform_get_drvdata(pdev); 3022 int error; 3023 3024 cancel_delayed_work_sync(&ddata->idle_work); 3025 3026 error = pm_runtime_get_sync(ddata->dev); 3027 if (error < 0) { 3028 pm_runtime_put_noidle(ddata->dev); 3029 pm_runtime_disable(ddata->dev); 3030 goto unprepare; 3031 } 3032 3033 of_platform_depopulate(&pdev->dev); 3034 3035 pm_runtime_put_sync(&pdev->dev); 3036 pm_runtime_disable(&pdev->dev); 3037 reset_control_assert(ddata->rsts); 3038 3039 unprepare: 3040 sysc_unprepare(ddata); 3041 3042 return 0; 3043 } 3044 3045 static const struct of_device_id sysc_match[] = { 3046 { .compatible = "ti,sysc-omap2", .data = &sysc_omap2, }, 3047 { .compatible = "ti,sysc-omap2-timer", .data = &sysc_omap2_timer, }, 3048 { .compatible = "ti,sysc-omap4", .data = &sysc_omap4, }, 3049 { .compatible = "ti,sysc-omap4-timer", .data = &sysc_omap4_timer, }, 3050 { .compatible = "ti,sysc-omap4-simple", .data = &sysc_omap4_simple, }, 3051 { .compatible = "ti,sysc-omap3430-sr", .data = &sysc_34xx_sr, }, 3052 { .compatible = "ti,sysc-omap3630-sr", .data = &sysc_36xx_sr, }, 3053 { .compatible = "ti,sysc-omap4-sr", .data = &sysc_omap4_sr, }, 3054 { .compatible = "ti,sysc-omap3-sham", .data = &sysc_omap3_sham, }, 3055 { .compatible = "ti,sysc-omap-aes", .data = &sysc_omap3_aes, }, 3056 { .compatible = "ti,sysc-mcasp", .data = &sysc_omap4_mcasp, }, 3057 { .compatible = "ti,sysc-dra7-mcasp", .data = &sysc_dra7_mcasp, }, 3058 { .compatible = "ti,sysc-usb-host-fs", 3059 .data = &sysc_omap4_usb_host_fs, }, 3060 { .compatible = "ti,sysc-dra7-mcan", .data = &sysc_dra7_mcan, }, 3061 { .compatible = "ti,sysc-pruss", .data = &sysc_pruss, }, 3062 { }, 3063 }; 3064 MODULE_DEVICE_TABLE(of, sysc_match); 3065 3066 static struct platform_driver sysc_driver = { 3067 .probe = sysc_probe, 3068 .remove = sysc_remove, 3069 .driver = { 3070 .name = "ti-sysc", 3071 .of_match_table = sysc_match, 3072 .pm = &sysc_pm_ops, 3073 }, 3074 }; 3075 3076 static int __init sysc_init(void) 3077 { 3078 bus_register_notifier(&platform_bus_type, &sysc_nb); 3079 3080 return platform_driver_register(&sysc_driver); 3081 } 3082 module_init(sysc_init); 3083 3084 static void __exit sysc_exit(void) 3085 { 3086 bus_unregister_notifier(&platform_bus_type, &sysc_nb); 3087 platform_driver_unregister(&sysc_driver); 3088 sysc_cleanup_soc(); 3089 } 3090 module_exit(sysc_exit); 3091 3092 MODULE_DESCRIPTION("TI sysc interconnect target driver"); 3093 MODULE_LICENSE("GPL v2"); 3094