1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * ti-sysc.c - Texas Instruments sysc interconnect target driver 4 */ 5 6 #include <linux/io.h> 7 #include <linux/clk.h> 8 #include <linux/clkdev.h> 9 #include <linux/delay.h> 10 #include <linux/list.h> 11 #include <linux/module.h> 12 #include <linux/platform_device.h> 13 #include <linux/pm_domain.h> 14 #include <linux/pm_runtime.h> 15 #include <linux/reset.h> 16 #include <linux/of_address.h> 17 #include <linux/of_platform.h> 18 #include <linux/slab.h> 19 #include <linux/sys_soc.h> 20 #include <linux/iopoll.h> 21 22 #include <linux/platform_data/ti-sysc.h> 23 24 #include <dt-bindings/bus/ti-sysc.h> 25 26 #define DIS_ISP BIT(2) 27 #define DIS_IVA BIT(1) 28 #define DIS_SGX BIT(0) 29 30 #define SOC_FLAG(match, flag) { .machine = match, .data = (void *)(flag), } 31 32 #define MAX_MODULE_SOFTRESET_WAIT 10000 33 34 enum sysc_soc { 35 SOC_UNKNOWN, 36 SOC_2420, 37 SOC_2430, 38 SOC_3430, 39 SOC_3630, 40 SOC_4430, 41 SOC_4460, 42 SOC_4470, 43 SOC_5430, 44 SOC_AM3, 45 SOC_AM4, 46 SOC_DRA7, 47 }; 48 49 struct sysc_address { 50 unsigned long base; 51 struct list_head node; 52 }; 53 54 struct sysc_soc_info { 55 unsigned long general_purpose:1; 56 enum sysc_soc soc; 57 struct mutex list_lock; /* disabled modules list lock */ 58 struct list_head disabled_modules; 59 }; 60 61 enum sysc_clocks { 62 SYSC_FCK, 63 SYSC_ICK, 64 SYSC_OPTFCK0, 65 SYSC_OPTFCK1, 66 SYSC_OPTFCK2, 67 SYSC_OPTFCK3, 68 SYSC_OPTFCK4, 69 SYSC_OPTFCK5, 70 SYSC_OPTFCK6, 71 SYSC_OPTFCK7, 72 SYSC_MAX_CLOCKS, 73 }; 74 75 static struct sysc_soc_info *sysc_soc; 76 static const char * const reg_names[] = { "rev", "sysc", "syss", }; 77 static const char * const clock_names[SYSC_MAX_CLOCKS] = { 78 "fck", "ick", "opt0", "opt1", "opt2", "opt3", "opt4", 79 "opt5", "opt6", "opt7", 80 }; 81 82 #define SYSC_IDLEMODE_MASK 3 83 #define SYSC_CLOCKACTIVITY_MASK 3 84 85 /** 86 * struct sysc - TI sysc interconnect target module registers and capabilities 87 * @dev: struct device pointer 88 * @module_pa: physical address of the interconnect target module 89 * @module_size: size of the interconnect target module 90 * @module_va: virtual address of the interconnect target module 91 * @offsets: register offsets from module base 92 * @mdata: ti-sysc to hwmod translation data for a module 93 * @clocks: clocks used by the interconnect target module 94 * @clock_roles: clock role names for the found clocks 95 * @nr_clocks: number of clocks used by the interconnect target module 96 * @rsts: resets used by the interconnect target module 97 * @legacy_mode: configured for legacy mode if set 98 * @cap: interconnect target module capabilities 99 * @cfg: interconnect target module configuration 100 * @cookie: data used by legacy platform callbacks 101 * @name: name if available 102 * @revision: interconnect target module revision 103 * @enabled: sysc runtime enabled status 104 * @needs_resume: runtime resume needed on resume from suspend 105 * @child_needs_resume: runtime resume needed for child on resume from suspend 106 * @disable_on_idle: status flag used for disabling modules with resets 107 * @idle_work: work structure used to perform delayed idle on a module 108 * @pre_reset_quirk: module specific pre-reset quirk 109 * @post_reset_quirk: module specific post-reset quirk 110 * @reset_done_quirk: module specific reset done quirk 111 * @module_enable_quirk: module specific enable quirk 112 * @module_disable_quirk: module specific disable quirk 113 * @module_unlock_quirk: module specific sysconfig unlock quirk 114 * @module_lock_quirk: module specific sysconfig lock quirk 115 */ 116 struct sysc { 117 struct device *dev; 118 u64 module_pa; 119 u32 module_size; 120 void __iomem *module_va; 121 int offsets[SYSC_MAX_REGS]; 122 struct ti_sysc_module_data *mdata; 123 struct clk **clocks; 124 const char **clock_roles; 125 int nr_clocks; 126 struct reset_control *rsts; 127 const char *legacy_mode; 128 const struct sysc_capabilities *cap; 129 struct sysc_config cfg; 130 struct ti_sysc_cookie cookie; 131 const char *name; 132 u32 revision; 133 unsigned int enabled:1; 134 unsigned int needs_resume:1; 135 unsigned int child_needs_resume:1; 136 struct delayed_work idle_work; 137 void (*pre_reset_quirk)(struct sysc *sysc); 138 void (*post_reset_quirk)(struct sysc *sysc); 139 void (*reset_done_quirk)(struct sysc *sysc); 140 void (*module_enable_quirk)(struct sysc *sysc); 141 void (*module_disable_quirk)(struct sysc *sysc); 142 void (*module_unlock_quirk)(struct sysc *sysc); 143 void (*module_lock_quirk)(struct sysc *sysc); 144 }; 145 146 static void sysc_parse_dts_quirks(struct sysc *ddata, struct device_node *np, 147 bool is_child); 148 149 static void sysc_write(struct sysc *ddata, int offset, u32 value) 150 { 151 if (ddata->cfg.quirks & SYSC_QUIRK_16BIT) { 152 writew_relaxed(value & 0xffff, ddata->module_va + offset); 153 154 /* Only i2c revision has LO and HI register with stride of 4 */ 155 if (ddata->offsets[SYSC_REVISION] >= 0 && 156 offset == ddata->offsets[SYSC_REVISION]) { 157 u16 hi = value >> 16; 158 159 writew_relaxed(hi, ddata->module_va + offset + 4); 160 } 161 162 return; 163 } 164 165 writel_relaxed(value, ddata->module_va + offset); 166 } 167 168 static u32 sysc_read(struct sysc *ddata, int offset) 169 { 170 if (ddata->cfg.quirks & SYSC_QUIRK_16BIT) { 171 u32 val; 172 173 val = readw_relaxed(ddata->module_va + offset); 174 175 /* Only i2c revision has LO and HI register with stride of 4 */ 176 if (ddata->offsets[SYSC_REVISION] >= 0 && 177 offset == ddata->offsets[SYSC_REVISION]) { 178 u16 tmp = readw_relaxed(ddata->module_va + offset + 4); 179 180 val |= tmp << 16; 181 } 182 183 return val; 184 } 185 186 return readl_relaxed(ddata->module_va + offset); 187 } 188 189 static bool sysc_opt_clks_needed(struct sysc *ddata) 190 { 191 return !!(ddata->cfg.quirks & SYSC_QUIRK_OPT_CLKS_NEEDED); 192 } 193 194 static u32 sysc_read_revision(struct sysc *ddata) 195 { 196 int offset = ddata->offsets[SYSC_REVISION]; 197 198 if (offset < 0) 199 return 0; 200 201 return sysc_read(ddata, offset); 202 } 203 204 static u32 sysc_read_sysconfig(struct sysc *ddata) 205 { 206 int offset = ddata->offsets[SYSC_SYSCONFIG]; 207 208 if (offset < 0) 209 return 0; 210 211 return sysc_read(ddata, offset); 212 } 213 214 static u32 sysc_read_sysstatus(struct sysc *ddata) 215 { 216 int offset = ddata->offsets[SYSC_SYSSTATUS]; 217 218 if (offset < 0) 219 return 0; 220 221 return sysc_read(ddata, offset); 222 } 223 224 static int sysc_add_named_clock_from_child(struct sysc *ddata, 225 const char *name, 226 const char *optfck_name) 227 { 228 struct device_node *np = ddata->dev->of_node; 229 struct device_node *child; 230 struct clk_lookup *cl; 231 struct clk *clock; 232 const char *n; 233 234 if (name) 235 n = name; 236 else 237 n = optfck_name; 238 239 /* Does the clock alias already exist? */ 240 clock = of_clk_get_by_name(np, n); 241 if (!IS_ERR(clock)) { 242 clk_put(clock); 243 244 return 0; 245 } 246 247 child = of_get_next_available_child(np, NULL); 248 if (!child) 249 return -ENODEV; 250 251 clock = devm_get_clk_from_child(ddata->dev, child, name); 252 if (IS_ERR(clock)) 253 return PTR_ERR(clock); 254 255 /* 256 * Use clkdev_add() instead of clkdev_alloc() to avoid the MAX_DEV_ID 257 * limit for clk_get(). If cl ever needs to be freed, it should be done 258 * with clkdev_drop(). 259 */ 260 cl = kcalloc(1, sizeof(*cl), GFP_KERNEL); 261 if (!cl) 262 return -ENOMEM; 263 264 cl->con_id = n; 265 cl->dev_id = dev_name(ddata->dev); 266 cl->clk = clock; 267 clkdev_add(cl); 268 269 clk_put(clock); 270 271 return 0; 272 } 273 274 static int sysc_init_ext_opt_clock(struct sysc *ddata, const char *name) 275 { 276 const char *optfck_name; 277 int error, index; 278 279 if (ddata->nr_clocks < SYSC_OPTFCK0) 280 index = SYSC_OPTFCK0; 281 else 282 index = ddata->nr_clocks; 283 284 if (name) 285 optfck_name = name; 286 else 287 optfck_name = clock_names[index]; 288 289 error = sysc_add_named_clock_from_child(ddata, name, optfck_name); 290 if (error) 291 return error; 292 293 ddata->clock_roles[index] = optfck_name; 294 ddata->nr_clocks++; 295 296 return 0; 297 } 298 299 static int sysc_get_one_clock(struct sysc *ddata, const char *name) 300 { 301 int error, i, index = -ENODEV; 302 303 if (!strncmp(clock_names[SYSC_FCK], name, 3)) 304 index = SYSC_FCK; 305 else if (!strncmp(clock_names[SYSC_ICK], name, 3)) 306 index = SYSC_ICK; 307 308 if (index < 0) { 309 for (i = SYSC_OPTFCK0; i < SYSC_MAX_CLOCKS; i++) { 310 if (!ddata->clocks[i]) { 311 index = i; 312 break; 313 } 314 } 315 } 316 317 if (index < 0) { 318 dev_err(ddata->dev, "clock %s not added\n", name); 319 return index; 320 } 321 322 ddata->clocks[index] = devm_clk_get(ddata->dev, name); 323 if (IS_ERR(ddata->clocks[index])) { 324 dev_err(ddata->dev, "clock get error for %s: %li\n", 325 name, PTR_ERR(ddata->clocks[index])); 326 327 return PTR_ERR(ddata->clocks[index]); 328 } 329 330 error = clk_prepare(ddata->clocks[index]); 331 if (error) { 332 dev_err(ddata->dev, "clock prepare error for %s: %i\n", 333 name, error); 334 335 return error; 336 } 337 338 return 0; 339 } 340 341 static int sysc_get_clocks(struct sysc *ddata) 342 { 343 struct device_node *np = ddata->dev->of_node; 344 struct property *prop; 345 const char *name; 346 int nr_fck = 0, nr_ick = 0, i, error = 0; 347 348 ddata->clock_roles = devm_kcalloc(ddata->dev, 349 SYSC_MAX_CLOCKS, 350 sizeof(*ddata->clock_roles), 351 GFP_KERNEL); 352 if (!ddata->clock_roles) 353 return -ENOMEM; 354 355 of_property_for_each_string(np, "clock-names", prop, name) { 356 if (!strncmp(clock_names[SYSC_FCK], name, 3)) 357 nr_fck++; 358 if (!strncmp(clock_names[SYSC_ICK], name, 3)) 359 nr_ick++; 360 ddata->clock_roles[ddata->nr_clocks] = name; 361 ddata->nr_clocks++; 362 } 363 364 if (ddata->nr_clocks < 1) 365 return 0; 366 367 if ((ddata->cfg.quirks & SYSC_QUIRK_EXT_OPT_CLOCK)) { 368 error = sysc_init_ext_opt_clock(ddata, NULL); 369 if (error) 370 return error; 371 } 372 373 if (ddata->nr_clocks > SYSC_MAX_CLOCKS) { 374 dev_err(ddata->dev, "too many clocks for %pOF\n", np); 375 376 return -EINVAL; 377 } 378 379 if (nr_fck > 1 || nr_ick > 1) { 380 dev_err(ddata->dev, "max one fck and ick for %pOF\n", np); 381 382 return -EINVAL; 383 } 384 385 /* Always add a slot for main clocks fck and ick even if unused */ 386 if (!nr_fck) 387 ddata->nr_clocks++; 388 if (!nr_ick) 389 ddata->nr_clocks++; 390 391 ddata->clocks = devm_kcalloc(ddata->dev, 392 ddata->nr_clocks, sizeof(*ddata->clocks), 393 GFP_KERNEL); 394 if (!ddata->clocks) 395 return -ENOMEM; 396 397 for (i = 0; i < SYSC_MAX_CLOCKS; i++) { 398 const char *name = ddata->clock_roles[i]; 399 400 if (!name) 401 continue; 402 403 error = sysc_get_one_clock(ddata, name); 404 if (error) 405 return error; 406 } 407 408 return 0; 409 } 410 411 static int sysc_enable_main_clocks(struct sysc *ddata) 412 { 413 struct clk *clock; 414 int i, error; 415 416 if (!ddata->clocks) 417 return 0; 418 419 for (i = 0; i < SYSC_OPTFCK0; i++) { 420 clock = ddata->clocks[i]; 421 422 /* Main clocks may not have ick */ 423 if (IS_ERR_OR_NULL(clock)) 424 continue; 425 426 error = clk_enable(clock); 427 if (error) 428 goto err_disable; 429 } 430 431 return 0; 432 433 err_disable: 434 for (i--; i >= 0; i--) { 435 clock = ddata->clocks[i]; 436 437 /* Main clocks may not have ick */ 438 if (IS_ERR_OR_NULL(clock)) 439 continue; 440 441 clk_disable(clock); 442 } 443 444 return error; 445 } 446 447 static void sysc_disable_main_clocks(struct sysc *ddata) 448 { 449 struct clk *clock; 450 int i; 451 452 if (!ddata->clocks) 453 return; 454 455 for (i = 0; i < SYSC_OPTFCK0; i++) { 456 clock = ddata->clocks[i]; 457 if (IS_ERR_OR_NULL(clock)) 458 continue; 459 460 clk_disable(clock); 461 } 462 } 463 464 static int sysc_enable_opt_clocks(struct sysc *ddata) 465 { 466 struct clk *clock; 467 int i, error; 468 469 if (!ddata->clocks || ddata->nr_clocks < SYSC_OPTFCK0 + 1) 470 return 0; 471 472 for (i = SYSC_OPTFCK0; i < SYSC_MAX_CLOCKS; i++) { 473 clock = ddata->clocks[i]; 474 475 /* Assume no holes for opt clocks */ 476 if (IS_ERR_OR_NULL(clock)) 477 return 0; 478 479 error = clk_enable(clock); 480 if (error) 481 goto err_disable; 482 } 483 484 return 0; 485 486 err_disable: 487 for (i--; i >= 0; i--) { 488 clock = ddata->clocks[i]; 489 if (IS_ERR_OR_NULL(clock)) 490 continue; 491 492 clk_disable(clock); 493 } 494 495 return error; 496 } 497 498 static void sysc_disable_opt_clocks(struct sysc *ddata) 499 { 500 struct clk *clock; 501 int i; 502 503 if (!ddata->clocks || ddata->nr_clocks < SYSC_OPTFCK0 + 1) 504 return; 505 506 for (i = SYSC_OPTFCK0; i < SYSC_MAX_CLOCKS; i++) { 507 clock = ddata->clocks[i]; 508 509 /* Assume no holes for opt clocks */ 510 if (IS_ERR_OR_NULL(clock)) 511 return; 512 513 clk_disable(clock); 514 } 515 } 516 517 static void sysc_clkdm_deny_idle(struct sysc *ddata) 518 { 519 struct ti_sysc_platform_data *pdata; 520 521 if (ddata->legacy_mode || (ddata->cfg.quirks & SYSC_QUIRK_CLKDM_NOAUTO)) 522 return; 523 524 pdata = dev_get_platdata(ddata->dev); 525 if (pdata && pdata->clkdm_deny_idle) 526 pdata->clkdm_deny_idle(ddata->dev, &ddata->cookie); 527 } 528 529 static void sysc_clkdm_allow_idle(struct sysc *ddata) 530 { 531 struct ti_sysc_platform_data *pdata; 532 533 if (ddata->legacy_mode || (ddata->cfg.quirks & SYSC_QUIRK_CLKDM_NOAUTO)) 534 return; 535 536 pdata = dev_get_platdata(ddata->dev); 537 if (pdata && pdata->clkdm_allow_idle) 538 pdata->clkdm_allow_idle(ddata->dev, &ddata->cookie); 539 } 540 541 /** 542 * sysc_init_resets - init rstctrl reset line if configured 543 * @ddata: device driver data 544 * 545 * See sysc_rstctrl_reset_deassert(). 546 */ 547 static int sysc_init_resets(struct sysc *ddata) 548 { 549 ddata->rsts = 550 devm_reset_control_get_optional_shared(ddata->dev, "rstctrl"); 551 552 return PTR_ERR_OR_ZERO(ddata->rsts); 553 } 554 555 /** 556 * sysc_parse_and_check_child_range - parses module IO region from ranges 557 * @ddata: device driver data 558 * 559 * In general we only need rev, syss, and sysc registers and not the whole 560 * module range. But we do want the offsets for these registers from the 561 * module base. This allows us to check them against the legacy hwmod 562 * platform data. Let's also check the ranges are configured properly. 563 */ 564 static int sysc_parse_and_check_child_range(struct sysc *ddata) 565 { 566 struct device_node *np = ddata->dev->of_node; 567 const __be32 *ranges; 568 u32 nr_addr, nr_size; 569 int len, error; 570 571 ranges = of_get_property(np, "ranges", &len); 572 if (!ranges) { 573 dev_err(ddata->dev, "missing ranges for %pOF\n", np); 574 575 return -ENOENT; 576 } 577 578 len /= sizeof(*ranges); 579 580 if (len < 3) { 581 dev_err(ddata->dev, "incomplete ranges for %pOF\n", np); 582 583 return -EINVAL; 584 } 585 586 error = of_property_read_u32(np, "#address-cells", &nr_addr); 587 if (error) 588 return -ENOENT; 589 590 error = of_property_read_u32(np, "#size-cells", &nr_size); 591 if (error) 592 return -ENOENT; 593 594 if (nr_addr != 1 || nr_size != 1) { 595 dev_err(ddata->dev, "invalid ranges for %pOF\n", np); 596 597 return -EINVAL; 598 } 599 600 ranges++; 601 ddata->module_pa = of_translate_address(np, ranges++); 602 ddata->module_size = be32_to_cpup(ranges); 603 604 return 0; 605 } 606 607 static struct device_node *stdout_path; 608 609 static void sysc_init_stdout_path(struct sysc *ddata) 610 { 611 struct device_node *np = NULL; 612 const char *uart; 613 614 if (IS_ERR(stdout_path)) 615 return; 616 617 if (stdout_path) 618 return; 619 620 np = of_find_node_by_path("/chosen"); 621 if (!np) 622 goto err; 623 624 uart = of_get_property(np, "stdout-path", NULL); 625 if (!uart) 626 goto err; 627 628 np = of_find_node_by_path(uart); 629 if (!np) 630 goto err; 631 632 stdout_path = np; 633 634 return; 635 636 err: 637 stdout_path = ERR_PTR(-ENODEV); 638 } 639 640 static void sysc_check_quirk_stdout(struct sysc *ddata, 641 struct device_node *np) 642 { 643 sysc_init_stdout_path(ddata); 644 if (np != stdout_path) 645 return; 646 647 ddata->cfg.quirks |= SYSC_QUIRK_NO_IDLE_ON_INIT | 648 SYSC_QUIRK_NO_RESET_ON_INIT; 649 } 650 651 /** 652 * sysc_check_one_child - check child configuration 653 * @ddata: device driver data 654 * @np: child device node 655 * 656 * Let's avoid messy situations where we have new interconnect target 657 * node but children have "ti,hwmods". These belong to the interconnect 658 * target node and are managed by this driver. 659 */ 660 static void sysc_check_one_child(struct sysc *ddata, 661 struct device_node *np) 662 { 663 const char *name; 664 665 name = of_get_property(np, "ti,hwmods", NULL); 666 if (name && !of_device_is_compatible(np, "ti,sysc")) 667 dev_warn(ddata->dev, "really a child ti,hwmods property?"); 668 669 sysc_check_quirk_stdout(ddata, np); 670 sysc_parse_dts_quirks(ddata, np, true); 671 } 672 673 static void sysc_check_children(struct sysc *ddata) 674 { 675 struct device_node *child; 676 677 for_each_child_of_node(ddata->dev->of_node, child) 678 sysc_check_one_child(ddata, child); 679 } 680 681 /* 682 * So far only I2C uses 16-bit read access with clockactivity with revision 683 * in two registers with stride of 4. We can detect this based on the rev 684 * register size to configure things far enough to be able to properly read 685 * the revision register. 686 */ 687 static void sysc_check_quirk_16bit(struct sysc *ddata, struct resource *res) 688 { 689 if (resource_size(res) == 8) 690 ddata->cfg.quirks |= SYSC_QUIRK_16BIT | SYSC_QUIRK_USE_CLOCKACT; 691 } 692 693 /** 694 * sysc_parse_one - parses the interconnect target module registers 695 * @ddata: device driver data 696 * @reg: register to parse 697 */ 698 static int sysc_parse_one(struct sysc *ddata, enum sysc_registers reg) 699 { 700 struct resource *res; 701 const char *name; 702 703 switch (reg) { 704 case SYSC_REVISION: 705 case SYSC_SYSCONFIG: 706 case SYSC_SYSSTATUS: 707 name = reg_names[reg]; 708 break; 709 default: 710 return -EINVAL; 711 } 712 713 res = platform_get_resource_byname(to_platform_device(ddata->dev), 714 IORESOURCE_MEM, name); 715 if (!res) { 716 ddata->offsets[reg] = -ENODEV; 717 718 return 0; 719 } 720 721 ddata->offsets[reg] = res->start - ddata->module_pa; 722 if (reg == SYSC_REVISION) 723 sysc_check_quirk_16bit(ddata, res); 724 725 return 0; 726 } 727 728 static int sysc_parse_registers(struct sysc *ddata) 729 { 730 int i, error; 731 732 for (i = 0; i < SYSC_MAX_REGS; i++) { 733 error = sysc_parse_one(ddata, i); 734 if (error) 735 return error; 736 } 737 738 return 0; 739 } 740 741 /** 742 * sysc_check_registers - check for misconfigured register overlaps 743 * @ddata: device driver data 744 */ 745 static int sysc_check_registers(struct sysc *ddata) 746 { 747 int i, j, nr_regs = 0, nr_matches = 0; 748 749 for (i = 0; i < SYSC_MAX_REGS; i++) { 750 if (ddata->offsets[i] < 0) 751 continue; 752 753 if (ddata->offsets[i] > (ddata->module_size - 4)) { 754 dev_err(ddata->dev, "register outside module range"); 755 756 return -EINVAL; 757 } 758 759 for (j = 0; j < SYSC_MAX_REGS; j++) { 760 if (ddata->offsets[j] < 0) 761 continue; 762 763 if (ddata->offsets[i] == ddata->offsets[j]) 764 nr_matches++; 765 } 766 nr_regs++; 767 } 768 769 if (nr_matches > nr_regs) { 770 dev_err(ddata->dev, "overlapping registers: (%i/%i)", 771 nr_regs, nr_matches); 772 773 return -EINVAL; 774 } 775 776 return 0; 777 } 778 779 /** 780 * syc_ioremap - ioremap register space for the interconnect target module 781 * @ddata: device driver data 782 * 783 * Note that the interconnect target module registers can be anywhere 784 * within the interconnect target module range. For example, SGX has 785 * them at offset 0x1fc00 in the 32MB module address space. And cpsw 786 * has them at offset 0x1200 in the CPSW_WR child. Usually the 787 * the interconnect target module registers are at the beginning of 788 * the module range though. 789 */ 790 static int sysc_ioremap(struct sysc *ddata) 791 { 792 int size; 793 794 if (ddata->offsets[SYSC_REVISION] < 0 && 795 ddata->offsets[SYSC_SYSCONFIG] < 0 && 796 ddata->offsets[SYSC_SYSSTATUS] < 0) { 797 size = ddata->module_size; 798 } else { 799 size = max3(ddata->offsets[SYSC_REVISION], 800 ddata->offsets[SYSC_SYSCONFIG], 801 ddata->offsets[SYSC_SYSSTATUS]); 802 803 if (size < SZ_1K) 804 size = SZ_1K; 805 806 if ((size + sizeof(u32)) > ddata->module_size) 807 size = ddata->module_size; 808 } 809 810 ddata->module_va = devm_ioremap(ddata->dev, 811 ddata->module_pa, 812 size + sizeof(u32)); 813 if (!ddata->module_va) 814 return -EIO; 815 816 return 0; 817 } 818 819 /** 820 * sysc_map_and_check_registers - ioremap and check device registers 821 * @ddata: device driver data 822 */ 823 static int sysc_map_and_check_registers(struct sysc *ddata) 824 { 825 int error; 826 827 error = sysc_parse_and_check_child_range(ddata); 828 if (error) 829 return error; 830 831 sysc_check_children(ddata); 832 833 error = sysc_parse_registers(ddata); 834 if (error) 835 return error; 836 837 error = sysc_ioremap(ddata); 838 if (error) 839 return error; 840 841 error = sysc_check_registers(ddata); 842 if (error) 843 return error; 844 845 return 0; 846 } 847 848 /** 849 * sysc_show_rev - read and show interconnect target module revision 850 * @bufp: buffer to print the information to 851 * @ddata: device driver data 852 */ 853 static int sysc_show_rev(char *bufp, struct sysc *ddata) 854 { 855 int len; 856 857 if (ddata->offsets[SYSC_REVISION] < 0) 858 return sprintf(bufp, ":NA"); 859 860 len = sprintf(bufp, ":%08x", ddata->revision); 861 862 return len; 863 } 864 865 static int sysc_show_reg(struct sysc *ddata, 866 char *bufp, enum sysc_registers reg) 867 { 868 if (ddata->offsets[reg] < 0) 869 return sprintf(bufp, ":NA"); 870 871 return sprintf(bufp, ":%x", ddata->offsets[reg]); 872 } 873 874 static int sysc_show_name(char *bufp, struct sysc *ddata) 875 { 876 if (!ddata->name) 877 return 0; 878 879 return sprintf(bufp, ":%s", ddata->name); 880 } 881 882 /** 883 * sysc_show_registers - show information about interconnect target module 884 * @ddata: device driver data 885 */ 886 static void sysc_show_registers(struct sysc *ddata) 887 { 888 char buf[128]; 889 char *bufp = buf; 890 int i; 891 892 for (i = 0; i < SYSC_MAX_REGS; i++) 893 bufp += sysc_show_reg(ddata, bufp, i); 894 895 bufp += sysc_show_rev(bufp, ddata); 896 bufp += sysc_show_name(bufp, ddata); 897 898 dev_dbg(ddata->dev, "%llx:%x%s\n", 899 ddata->module_pa, ddata->module_size, 900 buf); 901 } 902 903 /** 904 * sysc_write_sysconfig - handle sysconfig quirks for register write 905 * @ddata: device driver data 906 * @value: register value 907 */ 908 static void sysc_write_sysconfig(struct sysc *ddata, u32 value) 909 { 910 if (ddata->module_unlock_quirk) 911 ddata->module_unlock_quirk(ddata); 912 913 sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], value); 914 915 if (ddata->module_lock_quirk) 916 ddata->module_lock_quirk(ddata); 917 } 918 919 #define SYSC_IDLE_MASK (SYSC_NR_IDLEMODES - 1) 920 #define SYSC_CLOCACT_ICK 2 921 922 /* Caller needs to manage sysc_clkdm_deny_idle() and sysc_clkdm_allow_idle() */ 923 static int sysc_enable_module(struct device *dev) 924 { 925 struct sysc *ddata; 926 const struct sysc_regbits *regbits; 927 u32 reg, idlemodes, best_mode; 928 929 ddata = dev_get_drvdata(dev); 930 if (ddata->offsets[SYSC_SYSCONFIG] == -ENODEV) 931 return 0; 932 933 regbits = ddata->cap->regbits; 934 reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]); 935 936 /* Set CLOCKACTIVITY, we only use it for ick */ 937 if (regbits->clkact_shift >= 0 && 938 (ddata->cfg.quirks & SYSC_QUIRK_USE_CLOCKACT || 939 ddata->cfg.sysc_val & BIT(regbits->clkact_shift))) 940 reg |= SYSC_CLOCACT_ICK << regbits->clkact_shift; 941 942 /* Set SIDLE mode */ 943 idlemodes = ddata->cfg.sidlemodes; 944 if (!idlemodes || regbits->sidle_shift < 0) 945 goto set_midle; 946 947 if (ddata->cfg.quirks & (SYSC_QUIRK_SWSUP_SIDLE | 948 SYSC_QUIRK_SWSUP_SIDLE_ACT)) { 949 best_mode = SYSC_IDLE_NO; 950 } else { 951 best_mode = fls(ddata->cfg.sidlemodes) - 1; 952 if (best_mode > SYSC_IDLE_MASK) { 953 dev_err(dev, "%s: invalid sidlemode\n", __func__); 954 return -EINVAL; 955 } 956 957 /* Set WAKEUP */ 958 if (regbits->enwkup_shift >= 0 && 959 ddata->cfg.sysc_val & BIT(regbits->enwkup_shift)) 960 reg |= BIT(regbits->enwkup_shift); 961 } 962 963 reg &= ~(SYSC_IDLE_MASK << regbits->sidle_shift); 964 reg |= best_mode << regbits->sidle_shift; 965 sysc_write_sysconfig(ddata, reg); 966 967 set_midle: 968 /* Set MIDLE mode */ 969 idlemodes = ddata->cfg.midlemodes; 970 if (!idlemodes || regbits->midle_shift < 0) 971 goto set_autoidle; 972 973 best_mode = fls(ddata->cfg.midlemodes) - 1; 974 if (best_mode > SYSC_IDLE_MASK) { 975 dev_err(dev, "%s: invalid midlemode\n", __func__); 976 return -EINVAL; 977 } 978 979 if (ddata->cfg.quirks & SYSC_QUIRK_SWSUP_MSTANDBY) 980 best_mode = SYSC_IDLE_NO; 981 982 reg &= ~(SYSC_IDLE_MASK << regbits->midle_shift); 983 reg |= best_mode << regbits->midle_shift; 984 sysc_write_sysconfig(ddata, reg); 985 986 set_autoidle: 987 /* Autoidle bit must enabled separately if available */ 988 if (regbits->autoidle_shift >= 0 && 989 ddata->cfg.sysc_val & BIT(regbits->autoidle_shift)) { 990 reg |= 1 << regbits->autoidle_shift; 991 sysc_write_sysconfig(ddata, reg); 992 } 993 994 if (ddata->module_enable_quirk) 995 ddata->module_enable_quirk(ddata); 996 997 return 0; 998 } 999 1000 static int sysc_best_idle_mode(u32 idlemodes, u32 *best_mode) 1001 { 1002 if (idlemodes & BIT(SYSC_IDLE_SMART_WKUP)) 1003 *best_mode = SYSC_IDLE_SMART_WKUP; 1004 else if (idlemodes & BIT(SYSC_IDLE_SMART)) 1005 *best_mode = SYSC_IDLE_SMART; 1006 else if (idlemodes & BIT(SYSC_IDLE_FORCE)) 1007 *best_mode = SYSC_IDLE_FORCE; 1008 else 1009 return -EINVAL; 1010 1011 return 0; 1012 } 1013 1014 /* Caller needs to manage sysc_clkdm_deny_idle() and sysc_clkdm_allow_idle() */ 1015 static int sysc_disable_module(struct device *dev) 1016 { 1017 struct sysc *ddata; 1018 const struct sysc_regbits *regbits; 1019 u32 reg, idlemodes, best_mode; 1020 int ret; 1021 1022 ddata = dev_get_drvdata(dev); 1023 if (ddata->offsets[SYSC_SYSCONFIG] == -ENODEV) 1024 return 0; 1025 1026 if (ddata->module_disable_quirk) 1027 ddata->module_disable_quirk(ddata); 1028 1029 regbits = ddata->cap->regbits; 1030 reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]); 1031 1032 /* Set MIDLE mode */ 1033 idlemodes = ddata->cfg.midlemodes; 1034 if (!idlemodes || regbits->midle_shift < 0) 1035 goto set_sidle; 1036 1037 ret = sysc_best_idle_mode(idlemodes, &best_mode); 1038 if (ret) { 1039 dev_err(dev, "%s: invalid midlemode\n", __func__); 1040 return ret; 1041 } 1042 1043 if (ddata->cfg.quirks & (SYSC_QUIRK_SWSUP_MSTANDBY) || 1044 ddata->cfg.quirks & (SYSC_QUIRK_FORCE_MSTANDBY)) 1045 best_mode = SYSC_IDLE_FORCE; 1046 1047 reg &= ~(SYSC_IDLE_MASK << regbits->midle_shift); 1048 reg |= best_mode << regbits->midle_shift; 1049 sysc_write_sysconfig(ddata, reg); 1050 1051 set_sidle: 1052 /* Set SIDLE mode */ 1053 idlemodes = ddata->cfg.sidlemodes; 1054 if (!idlemodes || regbits->sidle_shift < 0) 1055 return 0; 1056 1057 if (ddata->cfg.quirks & SYSC_QUIRK_SWSUP_SIDLE) { 1058 best_mode = SYSC_IDLE_FORCE; 1059 } else { 1060 ret = sysc_best_idle_mode(idlemodes, &best_mode); 1061 if (ret) { 1062 dev_err(dev, "%s: invalid sidlemode\n", __func__); 1063 return ret; 1064 } 1065 } 1066 1067 reg &= ~(SYSC_IDLE_MASK << regbits->sidle_shift); 1068 reg |= best_mode << regbits->sidle_shift; 1069 if (regbits->autoidle_shift >= 0 && 1070 ddata->cfg.sysc_val & BIT(regbits->autoidle_shift)) 1071 reg |= 1 << regbits->autoidle_shift; 1072 sysc_write_sysconfig(ddata, reg); 1073 1074 return 0; 1075 } 1076 1077 static int __maybe_unused sysc_runtime_suspend_legacy(struct device *dev, 1078 struct sysc *ddata) 1079 { 1080 struct ti_sysc_platform_data *pdata; 1081 int error; 1082 1083 pdata = dev_get_platdata(ddata->dev); 1084 if (!pdata) 1085 return 0; 1086 1087 if (!pdata->idle_module) 1088 return -ENODEV; 1089 1090 error = pdata->idle_module(dev, &ddata->cookie); 1091 if (error) 1092 dev_err(dev, "%s: could not idle: %i\n", 1093 __func__, error); 1094 1095 reset_control_assert(ddata->rsts); 1096 1097 return 0; 1098 } 1099 1100 static int __maybe_unused sysc_runtime_resume_legacy(struct device *dev, 1101 struct sysc *ddata) 1102 { 1103 struct ti_sysc_platform_data *pdata; 1104 int error; 1105 1106 pdata = dev_get_platdata(ddata->dev); 1107 if (!pdata) 1108 return 0; 1109 1110 if (!pdata->enable_module) 1111 return -ENODEV; 1112 1113 error = pdata->enable_module(dev, &ddata->cookie); 1114 if (error) 1115 dev_err(dev, "%s: could not enable: %i\n", 1116 __func__, error); 1117 1118 reset_control_deassert(ddata->rsts); 1119 1120 return 0; 1121 } 1122 1123 static int __maybe_unused sysc_runtime_suspend(struct device *dev) 1124 { 1125 struct sysc *ddata; 1126 int error = 0; 1127 1128 ddata = dev_get_drvdata(dev); 1129 1130 if (!ddata->enabled) 1131 return 0; 1132 1133 sysc_clkdm_deny_idle(ddata); 1134 1135 if (ddata->legacy_mode) { 1136 error = sysc_runtime_suspend_legacy(dev, ddata); 1137 if (error) 1138 goto err_allow_idle; 1139 } else { 1140 error = sysc_disable_module(dev); 1141 if (error) 1142 goto err_allow_idle; 1143 } 1144 1145 sysc_disable_main_clocks(ddata); 1146 1147 if (sysc_opt_clks_needed(ddata)) 1148 sysc_disable_opt_clocks(ddata); 1149 1150 ddata->enabled = false; 1151 1152 err_allow_idle: 1153 reset_control_assert(ddata->rsts); 1154 1155 sysc_clkdm_allow_idle(ddata); 1156 1157 return error; 1158 } 1159 1160 static int __maybe_unused sysc_runtime_resume(struct device *dev) 1161 { 1162 struct sysc *ddata; 1163 int error = 0; 1164 1165 ddata = dev_get_drvdata(dev); 1166 1167 if (ddata->enabled) 1168 return 0; 1169 1170 1171 sysc_clkdm_deny_idle(ddata); 1172 1173 if (sysc_opt_clks_needed(ddata)) { 1174 error = sysc_enable_opt_clocks(ddata); 1175 if (error) 1176 goto err_allow_idle; 1177 } 1178 1179 error = sysc_enable_main_clocks(ddata); 1180 if (error) 1181 goto err_opt_clocks; 1182 1183 reset_control_deassert(ddata->rsts); 1184 1185 if (ddata->legacy_mode) { 1186 error = sysc_runtime_resume_legacy(dev, ddata); 1187 if (error) 1188 goto err_main_clocks; 1189 } else { 1190 error = sysc_enable_module(dev); 1191 if (error) 1192 goto err_main_clocks; 1193 } 1194 1195 ddata->enabled = true; 1196 1197 sysc_clkdm_allow_idle(ddata); 1198 1199 return 0; 1200 1201 err_main_clocks: 1202 sysc_disable_main_clocks(ddata); 1203 err_opt_clocks: 1204 if (sysc_opt_clks_needed(ddata)) 1205 sysc_disable_opt_clocks(ddata); 1206 err_allow_idle: 1207 sysc_clkdm_allow_idle(ddata); 1208 1209 return error; 1210 } 1211 1212 static int __maybe_unused sysc_noirq_suspend(struct device *dev) 1213 { 1214 struct sysc *ddata; 1215 1216 ddata = dev_get_drvdata(dev); 1217 1218 if (ddata->cfg.quirks & SYSC_QUIRK_LEGACY_IDLE) 1219 return 0; 1220 1221 return pm_runtime_force_suspend(dev); 1222 } 1223 1224 static int __maybe_unused sysc_noirq_resume(struct device *dev) 1225 { 1226 struct sysc *ddata; 1227 1228 ddata = dev_get_drvdata(dev); 1229 1230 if (ddata->cfg.quirks & SYSC_QUIRK_LEGACY_IDLE) 1231 return 0; 1232 1233 return pm_runtime_force_resume(dev); 1234 } 1235 1236 static const struct dev_pm_ops sysc_pm_ops = { 1237 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(sysc_noirq_suspend, sysc_noirq_resume) 1238 SET_RUNTIME_PM_OPS(sysc_runtime_suspend, 1239 sysc_runtime_resume, 1240 NULL) 1241 }; 1242 1243 /* Module revision register based quirks */ 1244 struct sysc_revision_quirk { 1245 const char *name; 1246 u32 base; 1247 int rev_offset; 1248 int sysc_offset; 1249 int syss_offset; 1250 u32 revision; 1251 u32 revision_mask; 1252 u32 quirks; 1253 }; 1254 1255 #define SYSC_QUIRK(optname, optbase, optrev, optsysc, optsyss, \ 1256 optrev_val, optrevmask, optquirkmask) \ 1257 { \ 1258 .name = (optname), \ 1259 .base = (optbase), \ 1260 .rev_offset = (optrev), \ 1261 .sysc_offset = (optsysc), \ 1262 .syss_offset = (optsyss), \ 1263 .revision = (optrev_val), \ 1264 .revision_mask = (optrevmask), \ 1265 .quirks = (optquirkmask), \ 1266 } 1267 1268 static const struct sysc_revision_quirk sysc_revision_quirks[] = { 1269 /* These drivers need to be fixed to not use pm_runtime_irq_safe() */ 1270 SYSC_QUIRK("gpio", 0, 0, 0x10, 0x114, 0x50600801, 0xffff00ff, 1271 SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_OPT_CLKS_IN_RESET), 1272 SYSC_QUIRK("sham", 0, 0x100, 0x110, 0x114, 0x40000c03, 0xffffffff, 1273 SYSC_QUIRK_LEGACY_IDLE), 1274 SYSC_QUIRK("smartreflex", 0, -ENODEV, 0x24, -ENODEV, 0x00000000, 0xffffffff, 1275 SYSC_QUIRK_LEGACY_IDLE), 1276 SYSC_QUIRK("smartreflex", 0, -ENODEV, 0x38, -ENODEV, 0x00000000, 0xffffffff, 1277 SYSC_QUIRK_LEGACY_IDLE), 1278 SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000046, 0xffffffff, 1279 SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE), 1280 SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff, 1281 SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE), 1282 /* Uarts on omap4 and later */ 1283 SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x50411e03, 0xffff00ff, 1284 SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE), 1285 SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47422e03, 0xffffffff, 1286 SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE), 1287 1288 /* Quirks that need to be set based on the module address */ 1289 SYSC_QUIRK("mcpdm", 0x40132000, 0, 0x10, -ENODEV, 0x50000800, 0xffffffff, 1290 SYSC_QUIRK_EXT_OPT_CLOCK | SYSC_QUIRK_NO_RESET_ON_INIT | 1291 SYSC_QUIRK_SWSUP_SIDLE), 1292 1293 /* Quirks that need to be set based on detected module */ 1294 SYSC_QUIRK("aess", 0, 0, 0x10, -ENODEV, 0x40000000, 0xffffffff, 1295 SYSC_MODULE_QUIRK_AESS), 1296 SYSC_QUIRK("dcan", 0x48480000, 0x20, -ENODEV, -ENODEV, 0xa3170504, 0xffffffff, 1297 SYSC_QUIRK_CLKDM_NOAUTO), 1298 SYSC_QUIRK("dss", 0x4832a000, 0, 0x10, 0x14, 0x00000020, 0xffffffff, 1299 SYSC_QUIRK_OPT_CLKS_IN_RESET | SYSC_MODULE_QUIRK_DSS_RESET), 1300 SYSC_QUIRK("dss", 0x58000000, 0, -ENODEV, 0x14, 0x00000040, 0xffffffff, 1301 SYSC_QUIRK_OPT_CLKS_IN_RESET | SYSC_MODULE_QUIRK_DSS_RESET), 1302 SYSC_QUIRK("dss", 0x58000000, 0, -ENODEV, 0x14, 0x00000061, 0xffffffff, 1303 SYSC_QUIRK_OPT_CLKS_IN_RESET | SYSC_MODULE_QUIRK_DSS_RESET), 1304 SYSC_QUIRK("dwc3", 0x48880000, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff, 1305 SYSC_QUIRK_CLKDM_NOAUTO), 1306 SYSC_QUIRK("dwc3", 0x488c0000, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff, 1307 SYSC_QUIRK_CLKDM_NOAUTO), 1308 SYSC_QUIRK("hdmi", 0, 0, 0x10, -ENODEV, 0x50030200, 0xffffffff, 1309 SYSC_QUIRK_OPT_CLKS_NEEDED), 1310 SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x00000006, 0xffffffff, 1311 SYSC_MODULE_QUIRK_HDQ1W), 1312 SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x0000000a, 0xffffffff, 1313 SYSC_MODULE_QUIRK_HDQ1W), 1314 SYSC_QUIRK("i2c", 0, 0, 0x20, 0x10, 0x00000036, 0x000000ff, 1315 SYSC_MODULE_QUIRK_I2C), 1316 SYSC_QUIRK("i2c", 0, 0, 0x20, 0x10, 0x0000003c, 0x000000ff, 1317 SYSC_MODULE_QUIRK_I2C), 1318 SYSC_QUIRK("i2c", 0, 0, 0x20, 0x10, 0x00000040, 0x000000ff, 1319 SYSC_MODULE_QUIRK_I2C), 1320 SYSC_QUIRK("i2c", 0, 0, 0x10, 0x90, 0x5040000a, 0xfffff0f0, 1321 SYSC_MODULE_QUIRK_I2C), 1322 SYSC_QUIRK("gpu", 0x50000000, 0x14, -ENODEV, -ENODEV, 0x00010201, 0xffffffff, 0), 1323 SYSC_QUIRK("gpu", 0x50000000, 0xfe00, 0xfe10, -ENODEV, 0x40000000 , 0xffffffff, 1324 SYSC_MODULE_QUIRK_SGX), 1325 SYSC_QUIRK("lcdc", 0, 0, 0x54, -ENODEV, 0x4f201000, 0xffffffff, 1326 SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY), 1327 SYSC_QUIRK("rtc", 0, 0x74, 0x78, -ENODEV, 0x4eb01908, 0xffff00f0, 1328 SYSC_MODULE_QUIRK_RTC_UNLOCK), 1329 SYSC_QUIRK("tptc", 0, 0, 0x10, -ENODEV, 0x40006c00, 0xffffefff, 1330 SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY), 1331 SYSC_QUIRK("tptc", 0, 0, -ENODEV, -ENODEV, 0x40007c00, 0xffffffff, 1332 SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY), 1333 SYSC_QUIRK("usb_otg_hs", 0, 0x400, 0x404, 0x408, 0x00000050, 1334 0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY), 1335 SYSC_QUIRK("usb_otg_hs", 0, 0, 0x10, -ENODEV, 0x4ea2080d, 0xffffffff, 1336 SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY), 1337 SYSC_QUIRK("wdt", 0, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0, 1338 SYSC_MODULE_QUIRK_WDT), 1339 /* PRUSS on am3, am4 and am5 */ 1340 SYSC_QUIRK("pruss", 0, 0x26000, 0x26004, -ENODEV, 0x47000000, 0xff000000, 1341 SYSC_MODULE_QUIRK_PRUSS), 1342 /* Watchdog on am3 and am4 */ 1343 SYSC_QUIRK("wdt", 0x44e35000, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0, 1344 SYSC_MODULE_QUIRK_WDT | SYSC_QUIRK_SWSUP_SIDLE), 1345 1346 #ifdef DEBUG 1347 SYSC_QUIRK("adc", 0, 0, 0x10, -ENODEV, 0x47300001, 0xffffffff, 0), 1348 SYSC_QUIRK("atl", 0, 0, -ENODEV, -ENODEV, 0x0a070100, 0xffffffff, 0), 1349 SYSC_QUIRK("cm", 0, 0, -ENODEV, -ENODEV, 0x40000301, 0xffffffff, 0), 1350 SYSC_QUIRK("control", 0, 0, 0x10, -ENODEV, 0x40000900, 0xffffffff, 0), 1351 SYSC_QUIRK("cpgmac", 0, 0x1200, 0x1208, 0x1204, 0x4edb1902, 1352 0xffff00f0, 0), 1353 SYSC_QUIRK("dcan", 0, 0x20, -ENODEV, -ENODEV, 0xa3170504, 0xffffffff, 0), 1354 SYSC_QUIRK("dcan", 0, 0x20, -ENODEV, -ENODEV, 0x4edb1902, 0xffffffff, 0), 1355 SYSC_QUIRK("dispc", 0x4832a400, 0, 0x10, 0x14, 0x00000030, 0xffffffff, 0), 1356 SYSC_QUIRK("dispc", 0x58001000, 0, 0x10, 0x14, 0x00000040, 0xffffffff, 0), 1357 SYSC_QUIRK("dispc", 0x58001000, 0, 0x10, 0x14, 0x00000051, 0xffffffff, 0), 1358 SYSC_QUIRK("dmic", 0, 0, 0x10, -ENODEV, 0x50010000, 0xffffffff, 0), 1359 SYSC_QUIRK("dsi", 0x58004000, 0, 0x10, 0x14, 0x00000030, 0xffffffff, 0), 1360 SYSC_QUIRK("dsi", 0x58005000, 0, 0x10, 0x14, 0x00000030, 0xffffffff, 0), 1361 SYSC_QUIRK("dsi", 0x58005000, 0, 0x10, 0x14, 0x00000040, 0xffffffff, 0), 1362 SYSC_QUIRK("dsi", 0x58009000, 0, 0x10, 0x14, 0x00000040, 0xffffffff, 0), 1363 SYSC_QUIRK("dwc3", 0, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff, 0), 1364 SYSC_QUIRK("d2d", 0x4a0b6000, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0), 1365 SYSC_QUIRK("d2d", 0x4a0cd000, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0), 1366 SYSC_QUIRK("epwmss", 0, 0, 0x4, -ENODEV, 0x47400001, 0xffffffff, 0), 1367 SYSC_QUIRK("gpu", 0, 0x1fc00, 0x1fc10, -ENODEV, 0, 0, 0), 1368 SYSC_QUIRK("gpu", 0, 0xfe00, 0xfe10, -ENODEV, 0x40000000 , 0xffffffff, 0), 1369 SYSC_QUIRK("hdmi", 0, 0, 0x10, -ENODEV, 0x50031d00, 0xffffffff, 0), 1370 SYSC_QUIRK("hsi", 0, 0, 0x10, 0x14, 0x50043101, 0xffffffff, 0), 1371 SYSC_QUIRK("iss", 0, 0, 0x10, -ENODEV, 0x40000101, 0xffffffff, 0), 1372 SYSC_QUIRK("mcasp", 0, 0, 0x4, -ENODEV, 0x44306302, 0xffffffff, 0), 1373 SYSC_QUIRK("mcasp", 0, 0, 0x4, -ENODEV, 0x44307b02, 0xffffffff, 0), 1374 SYSC_QUIRK("mcbsp", 0, -ENODEV, 0x8c, -ENODEV, 0, 0, 0), 1375 SYSC_QUIRK("mcspi", 0, 0, 0x10, -ENODEV, 0x40300a0b, 0xffff00ff, 0), 1376 SYSC_QUIRK("mcspi", 0, 0, 0x110, 0x114, 0x40300a0b, 0xffffffff, 0), 1377 SYSC_QUIRK("mailbox", 0, 0, 0x10, -ENODEV, 0x00000400, 0xffffffff, 0), 1378 SYSC_QUIRK("m3", 0, 0, -ENODEV, -ENODEV, 0x5f580105, 0x0fff0f00, 0), 1379 SYSC_QUIRK("ocp2scp", 0, 0, 0x10, 0x14, 0x50060005, 0xfffffff0, 0), 1380 SYSC_QUIRK("ocp2scp", 0, 0, -ENODEV, -ENODEV, 0x50060007, 0xffffffff, 0), 1381 SYSC_QUIRK("padconf", 0, 0, 0x10, -ENODEV, 0x4fff0800, 0xffffffff, 0), 1382 SYSC_QUIRK("padconf", 0, 0, -ENODEV, -ENODEV, 0x40001100, 0xffffffff, 0), 1383 SYSC_QUIRK("prcm", 0, 0, -ENODEV, -ENODEV, 0x40000100, 0xffffffff, 0), 1384 SYSC_QUIRK("prcm", 0, 0, -ENODEV, -ENODEV, 0x00004102, 0xffffffff, 0), 1385 SYSC_QUIRK("prcm", 0, 0, -ENODEV, -ENODEV, 0x40000400, 0xffffffff, 0), 1386 SYSC_QUIRK("rfbi", 0x4832a800, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0), 1387 SYSC_QUIRK("rfbi", 0x58002000, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0), 1388 SYSC_QUIRK("scm", 0, 0, 0x10, -ENODEV, 0x40000900, 0xffffffff, 0), 1389 SYSC_QUIRK("scm", 0, 0, -ENODEV, -ENODEV, 0x4e8b0100, 0xffffffff, 0), 1390 SYSC_QUIRK("scm", 0, 0, -ENODEV, -ENODEV, 0x4f000100, 0xffffffff, 0), 1391 SYSC_QUIRK("scm", 0, 0, -ENODEV, -ENODEV, 0x40000900, 0xffffffff, 0), 1392 SYSC_QUIRK("scrm", 0, 0, -ENODEV, -ENODEV, 0x00000010, 0xffffffff, 0), 1393 SYSC_QUIRK("sdio", 0, 0, 0x10, -ENODEV, 0x40202301, 0xffff0ff0, 0), 1394 SYSC_QUIRK("sdio", 0, 0x2fc, 0x110, 0x114, 0x31010000, 0xffffffff, 0), 1395 SYSC_QUIRK("sdma", 0, 0, 0x2c, 0x28, 0x00010900, 0xffffffff, 0), 1396 SYSC_QUIRK("slimbus", 0, 0, 0x10, -ENODEV, 0x40000902, 0xffffffff, 0), 1397 SYSC_QUIRK("slimbus", 0, 0, 0x10, -ENODEV, 0x40002903, 0xffffffff, 0), 1398 SYSC_QUIRK("spinlock", 0, 0, 0x10, -ENODEV, 0x50020000, 0xffffffff, 0), 1399 SYSC_QUIRK("rng", 0, 0x1fe0, 0x1fe4, -ENODEV, 0x00000020, 0xffffffff, 0), 1400 SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000013, 0xffffffff, 0), 1401 SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000015, 0xffffffff, 0), 1402 /* Some timers on omap4 and later */ 1403 SYSC_QUIRK("timer", 0, 0, 0x10, -ENODEV, 0x50002100, 0xffffffff, 0), 1404 SYSC_QUIRK("timer", 0, 0, 0x10, -ENODEV, 0x4fff1301, 0xffff00ff, 0), 1405 SYSC_QUIRK("timer32k", 0, 0, 0x4, -ENODEV, 0x00000040, 0xffffffff, 0), 1406 SYSC_QUIRK("timer32k", 0, 0, 0x4, -ENODEV, 0x00000011, 0xffffffff, 0), 1407 SYSC_QUIRK("timer32k", 0, 0, 0x4, -ENODEV, 0x00000060, 0xffffffff, 0), 1408 SYSC_QUIRK("tpcc", 0, 0, -ENODEV, -ENODEV, 0x40014c00, 0xffffffff, 0), 1409 SYSC_QUIRK("usbhstll", 0, 0, 0x10, 0x14, 0x00000004, 0xffffffff, 0), 1410 SYSC_QUIRK("usbhstll", 0, 0, 0x10, 0x14, 0x00000008, 0xffffffff, 0), 1411 SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, 0x14, 0x50700100, 0xffffffff, 0), 1412 SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, -ENODEV, 0x50700101, 0xffffffff, 0), 1413 SYSC_QUIRK("venc", 0x58003000, 0, -ENODEV, -ENODEV, 0x00000002, 0xffffffff, 0), 1414 SYSC_QUIRK("vfpe", 0, 0, 0x104, -ENODEV, 0x4d001200, 0xffffffff, 0), 1415 #endif 1416 }; 1417 1418 /* 1419 * Early quirks based on module base and register offsets only that are 1420 * needed before the module revision can be read 1421 */ 1422 static void sysc_init_early_quirks(struct sysc *ddata) 1423 { 1424 const struct sysc_revision_quirk *q; 1425 int i; 1426 1427 for (i = 0; i < ARRAY_SIZE(sysc_revision_quirks); i++) { 1428 q = &sysc_revision_quirks[i]; 1429 1430 if (!q->base) 1431 continue; 1432 1433 if (q->base != ddata->module_pa) 1434 continue; 1435 1436 if (q->rev_offset != ddata->offsets[SYSC_REVISION]) 1437 continue; 1438 1439 if (q->sysc_offset != ddata->offsets[SYSC_SYSCONFIG]) 1440 continue; 1441 1442 if (q->syss_offset != ddata->offsets[SYSC_SYSSTATUS]) 1443 continue; 1444 1445 ddata->name = q->name; 1446 ddata->cfg.quirks |= q->quirks; 1447 } 1448 } 1449 1450 /* Quirks that also consider the revision register value */ 1451 static void sysc_init_revision_quirks(struct sysc *ddata) 1452 { 1453 const struct sysc_revision_quirk *q; 1454 int i; 1455 1456 for (i = 0; i < ARRAY_SIZE(sysc_revision_quirks); i++) { 1457 q = &sysc_revision_quirks[i]; 1458 1459 if (q->base && q->base != ddata->module_pa) 1460 continue; 1461 1462 if (q->rev_offset != ddata->offsets[SYSC_REVISION]) 1463 continue; 1464 1465 if (q->sysc_offset != ddata->offsets[SYSC_SYSCONFIG]) 1466 continue; 1467 1468 if (q->syss_offset != ddata->offsets[SYSC_SYSSTATUS]) 1469 continue; 1470 1471 if (q->revision == ddata->revision || 1472 (q->revision & q->revision_mask) == 1473 (ddata->revision & q->revision_mask)) { 1474 ddata->name = q->name; 1475 ddata->cfg.quirks |= q->quirks; 1476 } 1477 } 1478 } 1479 1480 /* 1481 * DSS needs dispc outputs disabled to reset modules. Returns mask of 1482 * enabled DSS interrupts. Eventually we may be able to do this on 1483 * dispc init rather than top-level DSS init. 1484 */ 1485 static u32 sysc_quirk_dispc(struct sysc *ddata, int dispc_offset, 1486 bool disable) 1487 { 1488 bool lcd_en, digit_en, lcd2_en = false, lcd3_en = false; 1489 const int lcd_en_mask = BIT(0), digit_en_mask = BIT(1); 1490 int manager_count; 1491 bool framedonetv_irq; 1492 u32 val, irq_mask = 0; 1493 1494 switch (sysc_soc->soc) { 1495 case SOC_2420 ... SOC_3630: 1496 manager_count = 2; 1497 framedonetv_irq = false; 1498 break; 1499 case SOC_4430 ... SOC_4470: 1500 manager_count = 3; 1501 break; 1502 case SOC_5430: 1503 case SOC_DRA7: 1504 manager_count = 4; 1505 break; 1506 case SOC_AM4: 1507 manager_count = 1; 1508 break; 1509 case SOC_UNKNOWN: 1510 default: 1511 return 0; 1512 }; 1513 1514 /* Remap the whole module range to be able to reset dispc outputs */ 1515 devm_iounmap(ddata->dev, ddata->module_va); 1516 ddata->module_va = devm_ioremap(ddata->dev, 1517 ddata->module_pa, 1518 ddata->module_size); 1519 if (!ddata->module_va) 1520 return -EIO; 1521 1522 /* DISP_CONTROL */ 1523 val = sysc_read(ddata, dispc_offset + 0x40); 1524 lcd_en = val & lcd_en_mask; 1525 digit_en = val & digit_en_mask; 1526 if (lcd_en) 1527 irq_mask |= BIT(0); /* FRAMEDONE */ 1528 if (digit_en) { 1529 if (framedonetv_irq) 1530 irq_mask |= BIT(24); /* FRAMEDONETV */ 1531 else 1532 irq_mask |= BIT(2) | BIT(3); /* EVSYNC bits */ 1533 } 1534 if (disable & (lcd_en | digit_en)) 1535 sysc_write(ddata, dispc_offset + 0x40, 1536 val & ~(lcd_en_mask | digit_en_mask)); 1537 1538 if (manager_count <= 2) 1539 return irq_mask; 1540 1541 /* DISPC_CONTROL2 */ 1542 val = sysc_read(ddata, dispc_offset + 0x238); 1543 lcd2_en = val & lcd_en_mask; 1544 if (lcd2_en) 1545 irq_mask |= BIT(22); /* FRAMEDONE2 */ 1546 if (disable && lcd2_en) 1547 sysc_write(ddata, dispc_offset + 0x238, 1548 val & ~lcd_en_mask); 1549 1550 if (manager_count <= 3) 1551 return irq_mask; 1552 1553 /* DISPC_CONTROL3 */ 1554 val = sysc_read(ddata, dispc_offset + 0x848); 1555 lcd3_en = val & lcd_en_mask; 1556 if (lcd3_en) 1557 irq_mask |= BIT(30); /* FRAMEDONE3 */ 1558 if (disable && lcd3_en) 1559 sysc_write(ddata, dispc_offset + 0x848, 1560 val & ~lcd_en_mask); 1561 1562 return irq_mask; 1563 } 1564 1565 /* DSS needs child outputs disabled and SDI registers cleared for reset */ 1566 static void sysc_pre_reset_quirk_dss(struct sysc *ddata) 1567 { 1568 const int dispc_offset = 0x1000; 1569 int error; 1570 u32 irq_mask, val; 1571 1572 /* Get enabled outputs */ 1573 irq_mask = sysc_quirk_dispc(ddata, dispc_offset, false); 1574 if (!irq_mask) 1575 return; 1576 1577 /* Clear IRQSTATUS */ 1578 sysc_write(ddata, dispc_offset + 0x18, irq_mask); 1579 1580 /* Disable outputs */ 1581 val = sysc_quirk_dispc(ddata, dispc_offset, true); 1582 1583 /* Poll IRQSTATUS */ 1584 error = readl_poll_timeout(ddata->module_va + dispc_offset + 0x18, 1585 val, val != irq_mask, 100, 50); 1586 if (error) 1587 dev_warn(ddata->dev, "%s: timed out %08x !+ %08x\n", 1588 __func__, val, irq_mask); 1589 1590 if (sysc_soc->soc == SOC_3430) { 1591 /* Clear DSS_SDI_CONTROL */ 1592 sysc_write(ddata, 0x44, 0); 1593 1594 /* Clear DSS_PLL_CONTROL */ 1595 sysc_write(ddata, 0x48, 0); 1596 } 1597 1598 /* Clear DSS_CONTROL to switch DSS clock sources to PRCM if not */ 1599 sysc_write(ddata, 0x40, 0); 1600 } 1601 1602 /* 1-wire needs module's internal clocks enabled for reset */ 1603 static void sysc_pre_reset_quirk_hdq1w(struct sysc *ddata) 1604 { 1605 int offset = 0x0c; /* HDQ_CTRL_STATUS */ 1606 u16 val; 1607 1608 val = sysc_read(ddata, offset); 1609 val |= BIT(5); 1610 sysc_write(ddata, offset, val); 1611 } 1612 1613 /* AESS (Audio Engine SubSystem) needs autogating set after enable */ 1614 static void sysc_module_enable_quirk_aess(struct sysc *ddata) 1615 { 1616 int offset = 0x7c; /* AESS_AUTO_GATING_ENABLE */ 1617 1618 sysc_write(ddata, offset, 1); 1619 } 1620 1621 /* I2C needs to be disabled for reset */ 1622 static void sysc_clk_quirk_i2c(struct sysc *ddata, bool enable) 1623 { 1624 int offset; 1625 u16 val; 1626 1627 /* I2C_CON, omap2/3 is different from omap4 and later */ 1628 if ((ddata->revision & 0xffffff00) == 0x001f0000) 1629 offset = 0x24; 1630 else 1631 offset = 0xa4; 1632 1633 /* I2C_EN */ 1634 val = sysc_read(ddata, offset); 1635 if (enable) 1636 val |= BIT(15); 1637 else 1638 val &= ~BIT(15); 1639 sysc_write(ddata, offset, val); 1640 } 1641 1642 static void sysc_pre_reset_quirk_i2c(struct sysc *ddata) 1643 { 1644 sysc_clk_quirk_i2c(ddata, false); 1645 } 1646 1647 static void sysc_post_reset_quirk_i2c(struct sysc *ddata) 1648 { 1649 sysc_clk_quirk_i2c(ddata, true); 1650 } 1651 1652 /* RTC on am3 and 4 needs to be unlocked and locked for sysconfig */ 1653 static void sysc_quirk_rtc(struct sysc *ddata, bool lock) 1654 { 1655 u32 val, kick0_val = 0, kick1_val = 0; 1656 unsigned long flags; 1657 int error; 1658 1659 if (!lock) { 1660 kick0_val = 0x83e70b13; 1661 kick1_val = 0x95a4f1e0; 1662 } 1663 1664 local_irq_save(flags); 1665 /* RTC_STATUS BUSY bit may stay active for 1/32768 seconds (~30 usec) */ 1666 error = readl_poll_timeout(ddata->module_va + 0x44, val, 1667 !(val & BIT(0)), 100, 50); 1668 if (error) 1669 dev_warn(ddata->dev, "rtc busy timeout\n"); 1670 /* Now we have ~15 microseconds to read/write various registers */ 1671 sysc_write(ddata, 0x6c, kick0_val); 1672 sysc_write(ddata, 0x70, kick1_val); 1673 local_irq_restore(flags); 1674 } 1675 1676 static void sysc_module_unlock_quirk_rtc(struct sysc *ddata) 1677 { 1678 sysc_quirk_rtc(ddata, false); 1679 } 1680 1681 static void sysc_module_lock_quirk_rtc(struct sysc *ddata) 1682 { 1683 sysc_quirk_rtc(ddata, true); 1684 } 1685 1686 /* 36xx SGX needs a quirk for to bypass OCP IPG interrupt logic */ 1687 static void sysc_module_enable_quirk_sgx(struct sysc *ddata) 1688 { 1689 int offset = 0xff08; /* OCP_DEBUG_CONFIG */ 1690 u32 val = BIT(31); /* THALIA_INT_BYPASS */ 1691 1692 sysc_write(ddata, offset, val); 1693 } 1694 1695 /* Watchdog timer needs a disable sequence after reset */ 1696 static void sysc_reset_done_quirk_wdt(struct sysc *ddata) 1697 { 1698 int wps, spr, error; 1699 u32 val; 1700 1701 wps = 0x34; 1702 spr = 0x48; 1703 1704 sysc_write(ddata, spr, 0xaaaa); 1705 error = readl_poll_timeout(ddata->module_va + wps, val, 1706 !(val & 0x10), 100, 1707 MAX_MODULE_SOFTRESET_WAIT); 1708 if (error) 1709 dev_warn(ddata->dev, "wdt disable step1 failed\n"); 1710 1711 sysc_write(ddata, spr, 0x5555); 1712 error = readl_poll_timeout(ddata->module_va + wps, val, 1713 !(val & 0x10), 100, 1714 MAX_MODULE_SOFTRESET_WAIT); 1715 if (error) 1716 dev_warn(ddata->dev, "wdt disable step2 failed\n"); 1717 } 1718 1719 /* PRUSS needs to set MSTANDBY_INIT inorder to idle properly */ 1720 static void sysc_module_disable_quirk_pruss(struct sysc *ddata) 1721 { 1722 u32 reg; 1723 1724 reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]); 1725 reg |= SYSC_PRUSS_STANDBY_INIT; 1726 sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg); 1727 } 1728 1729 static void sysc_init_module_quirks(struct sysc *ddata) 1730 { 1731 if (ddata->legacy_mode || !ddata->name) 1732 return; 1733 1734 if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_HDQ1W) { 1735 ddata->pre_reset_quirk = sysc_pre_reset_quirk_hdq1w; 1736 1737 return; 1738 } 1739 1740 if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_I2C) { 1741 ddata->pre_reset_quirk = sysc_pre_reset_quirk_i2c; 1742 ddata->post_reset_quirk = sysc_post_reset_quirk_i2c; 1743 1744 return; 1745 } 1746 1747 if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_AESS) 1748 ddata->module_enable_quirk = sysc_module_enable_quirk_aess; 1749 1750 if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_DSS_RESET) 1751 ddata->pre_reset_quirk = sysc_pre_reset_quirk_dss; 1752 1753 if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_RTC_UNLOCK) { 1754 ddata->module_unlock_quirk = sysc_module_unlock_quirk_rtc; 1755 ddata->module_lock_quirk = sysc_module_lock_quirk_rtc; 1756 1757 return; 1758 } 1759 1760 if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_SGX) 1761 ddata->module_enable_quirk = sysc_module_enable_quirk_sgx; 1762 1763 if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_WDT) { 1764 ddata->reset_done_quirk = sysc_reset_done_quirk_wdt; 1765 ddata->module_disable_quirk = sysc_reset_done_quirk_wdt; 1766 } 1767 1768 if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_PRUSS) 1769 ddata->module_disable_quirk = sysc_module_disable_quirk_pruss; 1770 } 1771 1772 static int sysc_clockdomain_init(struct sysc *ddata) 1773 { 1774 struct ti_sysc_platform_data *pdata = dev_get_platdata(ddata->dev); 1775 struct clk *fck = NULL, *ick = NULL; 1776 int error; 1777 1778 if (!pdata || !pdata->init_clockdomain) 1779 return 0; 1780 1781 switch (ddata->nr_clocks) { 1782 case 2: 1783 ick = ddata->clocks[SYSC_ICK]; 1784 /* fallthrough */ 1785 case 1: 1786 fck = ddata->clocks[SYSC_FCK]; 1787 break; 1788 case 0: 1789 return 0; 1790 } 1791 1792 error = pdata->init_clockdomain(ddata->dev, fck, ick, &ddata->cookie); 1793 if (!error || error == -ENODEV) 1794 return 0; 1795 1796 return error; 1797 } 1798 1799 /* 1800 * Note that pdata->init_module() typically does a reset first. After 1801 * pdata->init_module() is done, PM runtime can be used for the interconnect 1802 * target module. 1803 */ 1804 static int sysc_legacy_init(struct sysc *ddata) 1805 { 1806 struct ti_sysc_platform_data *pdata = dev_get_platdata(ddata->dev); 1807 int error; 1808 1809 if (!pdata || !pdata->init_module) 1810 return 0; 1811 1812 error = pdata->init_module(ddata->dev, ddata->mdata, &ddata->cookie); 1813 if (error == -EEXIST) 1814 error = 0; 1815 1816 return error; 1817 } 1818 1819 /* 1820 * Note that the caller must ensure the interconnect target module is enabled 1821 * before calling reset. Otherwise reset will not complete. 1822 */ 1823 static int sysc_reset(struct sysc *ddata) 1824 { 1825 int sysc_offset, syss_offset, sysc_val, rstval, error = 0; 1826 u32 sysc_mask, syss_done; 1827 1828 sysc_offset = ddata->offsets[SYSC_SYSCONFIG]; 1829 syss_offset = ddata->offsets[SYSC_SYSSTATUS]; 1830 1831 if (ddata->legacy_mode || 1832 ddata->cap->regbits->srst_shift < 0 || 1833 ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT) 1834 return 0; 1835 1836 sysc_mask = BIT(ddata->cap->regbits->srst_shift); 1837 1838 if (ddata->cfg.quirks & SYSS_QUIRK_RESETDONE_INVERTED) 1839 syss_done = 0; 1840 else 1841 syss_done = ddata->cfg.syss_mask; 1842 1843 if (ddata->pre_reset_quirk) 1844 ddata->pre_reset_quirk(ddata); 1845 1846 if (sysc_offset >= 0) { 1847 sysc_val = sysc_read_sysconfig(ddata); 1848 sysc_val |= sysc_mask; 1849 sysc_write(ddata, sysc_offset, sysc_val); 1850 } 1851 1852 if (ddata->cfg.srst_udelay) 1853 usleep_range(ddata->cfg.srst_udelay, 1854 ddata->cfg.srst_udelay * 2); 1855 1856 if (ddata->post_reset_quirk) 1857 ddata->post_reset_quirk(ddata); 1858 1859 /* Poll on reset status */ 1860 if (syss_offset >= 0) { 1861 error = readx_poll_timeout(sysc_read_sysstatus, ddata, rstval, 1862 (rstval & ddata->cfg.syss_mask) == 1863 syss_done, 1864 100, MAX_MODULE_SOFTRESET_WAIT); 1865 1866 } else if (ddata->cfg.quirks & SYSC_QUIRK_RESET_STATUS) { 1867 error = readx_poll_timeout(sysc_read_sysconfig, ddata, rstval, 1868 !(rstval & sysc_mask), 1869 100, MAX_MODULE_SOFTRESET_WAIT); 1870 } 1871 1872 if (ddata->reset_done_quirk) 1873 ddata->reset_done_quirk(ddata); 1874 1875 return error; 1876 } 1877 1878 /* 1879 * At this point the module is configured enough to read the revision but 1880 * module may not be completely configured yet to use PM runtime. Enable 1881 * all clocks directly during init to configure the quirks needed for PM 1882 * runtime based on the revision register. 1883 */ 1884 static int sysc_init_module(struct sysc *ddata) 1885 { 1886 int error = 0; 1887 1888 error = sysc_clockdomain_init(ddata); 1889 if (error) 1890 return error; 1891 1892 sysc_clkdm_deny_idle(ddata); 1893 1894 /* 1895 * Always enable clocks. The bootloader may or may not have enabled 1896 * the related clocks. 1897 */ 1898 error = sysc_enable_opt_clocks(ddata); 1899 if (error) 1900 return error; 1901 1902 error = sysc_enable_main_clocks(ddata); 1903 if (error) 1904 goto err_opt_clocks; 1905 1906 if (!(ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT)) { 1907 error = reset_control_deassert(ddata->rsts); 1908 if (error) 1909 goto err_main_clocks; 1910 } 1911 1912 ddata->revision = sysc_read_revision(ddata); 1913 sysc_init_revision_quirks(ddata); 1914 sysc_init_module_quirks(ddata); 1915 1916 if (ddata->legacy_mode) { 1917 error = sysc_legacy_init(ddata); 1918 if (error) 1919 goto err_reset; 1920 } 1921 1922 if (!ddata->legacy_mode) { 1923 error = sysc_enable_module(ddata->dev); 1924 if (error) 1925 goto err_reset; 1926 } 1927 1928 error = sysc_reset(ddata); 1929 if (error) 1930 dev_err(ddata->dev, "Reset failed with %d\n", error); 1931 1932 if (error && !ddata->legacy_mode) 1933 sysc_disable_module(ddata->dev); 1934 1935 err_reset: 1936 if (error && !(ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT)) 1937 reset_control_assert(ddata->rsts); 1938 1939 err_main_clocks: 1940 if (error) 1941 sysc_disable_main_clocks(ddata); 1942 err_opt_clocks: 1943 /* No re-enable of clockdomain autoidle to prevent module autoidle */ 1944 if (error) { 1945 sysc_disable_opt_clocks(ddata); 1946 sysc_clkdm_allow_idle(ddata); 1947 } 1948 1949 return error; 1950 } 1951 1952 static int sysc_init_sysc_mask(struct sysc *ddata) 1953 { 1954 struct device_node *np = ddata->dev->of_node; 1955 int error; 1956 u32 val; 1957 1958 error = of_property_read_u32(np, "ti,sysc-mask", &val); 1959 if (error) 1960 return 0; 1961 1962 ddata->cfg.sysc_val = val & ddata->cap->sysc_mask; 1963 1964 return 0; 1965 } 1966 1967 static int sysc_init_idlemode(struct sysc *ddata, u8 *idlemodes, 1968 const char *name) 1969 { 1970 struct device_node *np = ddata->dev->of_node; 1971 struct property *prop; 1972 const __be32 *p; 1973 u32 val; 1974 1975 of_property_for_each_u32(np, name, prop, p, val) { 1976 if (val >= SYSC_NR_IDLEMODES) { 1977 dev_err(ddata->dev, "invalid idlemode: %i\n", val); 1978 return -EINVAL; 1979 } 1980 *idlemodes |= (1 << val); 1981 } 1982 1983 return 0; 1984 } 1985 1986 static int sysc_init_idlemodes(struct sysc *ddata) 1987 { 1988 int error; 1989 1990 error = sysc_init_idlemode(ddata, &ddata->cfg.midlemodes, 1991 "ti,sysc-midle"); 1992 if (error) 1993 return error; 1994 1995 error = sysc_init_idlemode(ddata, &ddata->cfg.sidlemodes, 1996 "ti,sysc-sidle"); 1997 if (error) 1998 return error; 1999 2000 return 0; 2001 } 2002 2003 /* 2004 * Only some devices on omap4 and later have SYSCONFIG reset done 2005 * bit. We can detect this if there is no SYSSTATUS at all, or the 2006 * SYSTATUS bit 0 is not used. Note that some SYSSTATUS registers 2007 * have multiple bits for the child devices like OHCI and EHCI. 2008 * Depends on SYSC being parsed first. 2009 */ 2010 static int sysc_init_syss_mask(struct sysc *ddata) 2011 { 2012 struct device_node *np = ddata->dev->of_node; 2013 int error; 2014 u32 val; 2015 2016 error = of_property_read_u32(np, "ti,syss-mask", &val); 2017 if (error) { 2018 if ((ddata->cap->type == TI_SYSC_OMAP4 || 2019 ddata->cap->type == TI_SYSC_OMAP4_TIMER) && 2020 (ddata->cfg.sysc_val & SYSC_OMAP4_SOFTRESET)) 2021 ddata->cfg.quirks |= SYSC_QUIRK_RESET_STATUS; 2022 2023 return 0; 2024 } 2025 2026 if (!(val & 1) && (ddata->cfg.sysc_val & SYSC_OMAP4_SOFTRESET)) 2027 ddata->cfg.quirks |= SYSC_QUIRK_RESET_STATUS; 2028 2029 ddata->cfg.syss_mask = val; 2030 2031 return 0; 2032 } 2033 2034 /* 2035 * Many child device drivers need to have fck and opt clocks available 2036 * to get the clock rate for device internal configuration etc. 2037 */ 2038 static int sysc_child_add_named_clock(struct sysc *ddata, 2039 struct device *child, 2040 const char *name) 2041 { 2042 struct clk *clk; 2043 struct clk_lookup *l; 2044 int error = 0; 2045 2046 if (!name) 2047 return 0; 2048 2049 clk = clk_get(child, name); 2050 if (!IS_ERR(clk)) { 2051 error = -EEXIST; 2052 goto put_clk; 2053 } 2054 2055 clk = clk_get(ddata->dev, name); 2056 if (IS_ERR(clk)) 2057 return -ENODEV; 2058 2059 l = clkdev_create(clk, name, dev_name(child)); 2060 if (!l) 2061 error = -ENOMEM; 2062 put_clk: 2063 clk_put(clk); 2064 2065 return error; 2066 } 2067 2068 static int sysc_child_add_clocks(struct sysc *ddata, 2069 struct device *child) 2070 { 2071 int i, error; 2072 2073 for (i = 0; i < ddata->nr_clocks; i++) { 2074 error = sysc_child_add_named_clock(ddata, 2075 child, 2076 ddata->clock_roles[i]); 2077 if (error && error != -EEXIST) { 2078 dev_err(ddata->dev, "could not add child clock %s: %i\n", 2079 ddata->clock_roles[i], error); 2080 2081 return error; 2082 } 2083 } 2084 2085 return 0; 2086 } 2087 2088 static struct device_type sysc_device_type = { 2089 }; 2090 2091 static struct sysc *sysc_child_to_parent(struct device *dev) 2092 { 2093 struct device *parent = dev->parent; 2094 2095 if (!parent || parent->type != &sysc_device_type) 2096 return NULL; 2097 2098 return dev_get_drvdata(parent); 2099 } 2100 2101 static int __maybe_unused sysc_child_runtime_suspend(struct device *dev) 2102 { 2103 struct sysc *ddata; 2104 int error; 2105 2106 ddata = sysc_child_to_parent(dev); 2107 2108 error = pm_generic_runtime_suspend(dev); 2109 if (error) 2110 return error; 2111 2112 if (!ddata->enabled) 2113 return 0; 2114 2115 return sysc_runtime_suspend(ddata->dev); 2116 } 2117 2118 static int __maybe_unused sysc_child_runtime_resume(struct device *dev) 2119 { 2120 struct sysc *ddata; 2121 int error; 2122 2123 ddata = sysc_child_to_parent(dev); 2124 2125 if (!ddata->enabled) { 2126 error = sysc_runtime_resume(ddata->dev); 2127 if (error < 0) 2128 dev_err(ddata->dev, 2129 "%s error: %i\n", __func__, error); 2130 } 2131 2132 return pm_generic_runtime_resume(dev); 2133 } 2134 2135 #ifdef CONFIG_PM_SLEEP 2136 static int sysc_child_suspend_noirq(struct device *dev) 2137 { 2138 struct sysc *ddata; 2139 int error; 2140 2141 ddata = sysc_child_to_parent(dev); 2142 2143 dev_dbg(ddata->dev, "%s %s\n", __func__, 2144 ddata->name ? ddata->name : ""); 2145 2146 error = pm_generic_suspend_noirq(dev); 2147 if (error) { 2148 dev_err(dev, "%s error at %i: %i\n", 2149 __func__, __LINE__, error); 2150 2151 return error; 2152 } 2153 2154 if (!pm_runtime_status_suspended(dev)) { 2155 error = pm_generic_runtime_suspend(dev); 2156 if (error) { 2157 dev_dbg(dev, "%s busy at %i: %i\n", 2158 __func__, __LINE__, error); 2159 2160 return 0; 2161 } 2162 2163 error = sysc_runtime_suspend(ddata->dev); 2164 if (error) { 2165 dev_err(dev, "%s error at %i: %i\n", 2166 __func__, __LINE__, error); 2167 2168 return error; 2169 } 2170 2171 ddata->child_needs_resume = true; 2172 } 2173 2174 return 0; 2175 } 2176 2177 static int sysc_child_resume_noirq(struct device *dev) 2178 { 2179 struct sysc *ddata; 2180 int error; 2181 2182 ddata = sysc_child_to_parent(dev); 2183 2184 dev_dbg(ddata->dev, "%s %s\n", __func__, 2185 ddata->name ? ddata->name : ""); 2186 2187 if (ddata->child_needs_resume) { 2188 ddata->child_needs_resume = false; 2189 2190 error = sysc_runtime_resume(ddata->dev); 2191 if (error) 2192 dev_err(ddata->dev, 2193 "%s runtime resume error: %i\n", 2194 __func__, error); 2195 2196 error = pm_generic_runtime_resume(dev); 2197 if (error) 2198 dev_err(ddata->dev, 2199 "%s generic runtime resume: %i\n", 2200 __func__, error); 2201 } 2202 2203 return pm_generic_resume_noirq(dev); 2204 } 2205 #endif 2206 2207 static struct dev_pm_domain sysc_child_pm_domain = { 2208 .ops = { 2209 SET_RUNTIME_PM_OPS(sysc_child_runtime_suspend, 2210 sysc_child_runtime_resume, 2211 NULL) 2212 USE_PLATFORM_PM_SLEEP_OPS 2213 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(sysc_child_suspend_noirq, 2214 sysc_child_resume_noirq) 2215 } 2216 }; 2217 2218 /** 2219 * sysc_legacy_idle_quirk - handle children in omap_device compatible way 2220 * @ddata: device driver data 2221 * @child: child device driver 2222 * 2223 * Allow idle for child devices as done with _od_runtime_suspend(). 2224 * Otherwise many child devices will not idle because of the permanent 2225 * parent usecount set in pm_runtime_irq_safe(). 2226 * 2227 * Note that the long term solution is to just modify the child device 2228 * drivers to not set pm_runtime_irq_safe() and then this can be just 2229 * dropped. 2230 */ 2231 static void sysc_legacy_idle_quirk(struct sysc *ddata, struct device *child) 2232 { 2233 if (ddata->cfg.quirks & SYSC_QUIRK_LEGACY_IDLE) 2234 dev_pm_domain_set(child, &sysc_child_pm_domain); 2235 } 2236 2237 static int sysc_notifier_call(struct notifier_block *nb, 2238 unsigned long event, void *device) 2239 { 2240 struct device *dev = device; 2241 struct sysc *ddata; 2242 int error; 2243 2244 ddata = sysc_child_to_parent(dev); 2245 if (!ddata) 2246 return NOTIFY_DONE; 2247 2248 switch (event) { 2249 case BUS_NOTIFY_ADD_DEVICE: 2250 error = sysc_child_add_clocks(ddata, dev); 2251 if (error) 2252 return error; 2253 sysc_legacy_idle_quirk(ddata, dev); 2254 break; 2255 default: 2256 break; 2257 } 2258 2259 return NOTIFY_DONE; 2260 } 2261 2262 static struct notifier_block sysc_nb = { 2263 .notifier_call = sysc_notifier_call, 2264 }; 2265 2266 /* Device tree configured quirks */ 2267 struct sysc_dts_quirk { 2268 const char *name; 2269 u32 mask; 2270 }; 2271 2272 static const struct sysc_dts_quirk sysc_dts_quirks[] = { 2273 { .name = "ti,no-idle-on-init", 2274 .mask = SYSC_QUIRK_NO_IDLE_ON_INIT, }, 2275 { .name = "ti,no-reset-on-init", 2276 .mask = SYSC_QUIRK_NO_RESET_ON_INIT, }, 2277 { .name = "ti,no-idle", 2278 .mask = SYSC_QUIRK_NO_IDLE, }, 2279 }; 2280 2281 static void sysc_parse_dts_quirks(struct sysc *ddata, struct device_node *np, 2282 bool is_child) 2283 { 2284 const struct property *prop; 2285 int i, len; 2286 2287 for (i = 0; i < ARRAY_SIZE(sysc_dts_quirks); i++) { 2288 const char *name = sysc_dts_quirks[i].name; 2289 2290 prop = of_get_property(np, name, &len); 2291 if (!prop) 2292 continue; 2293 2294 ddata->cfg.quirks |= sysc_dts_quirks[i].mask; 2295 if (is_child) { 2296 dev_warn(ddata->dev, 2297 "dts flag should be at module level for %s\n", 2298 name); 2299 } 2300 } 2301 } 2302 2303 static int sysc_init_dts_quirks(struct sysc *ddata) 2304 { 2305 struct device_node *np = ddata->dev->of_node; 2306 int error; 2307 u32 val; 2308 2309 ddata->legacy_mode = of_get_property(np, "ti,hwmods", NULL); 2310 2311 sysc_parse_dts_quirks(ddata, np, false); 2312 error = of_property_read_u32(np, "ti,sysc-delay-us", &val); 2313 if (!error) { 2314 if (val > 255) { 2315 dev_warn(ddata->dev, "bad ti,sysc-delay-us: %i\n", 2316 val); 2317 } 2318 2319 ddata->cfg.srst_udelay = (u8)val; 2320 } 2321 2322 return 0; 2323 } 2324 2325 static void sysc_unprepare(struct sysc *ddata) 2326 { 2327 int i; 2328 2329 if (!ddata->clocks) 2330 return; 2331 2332 for (i = 0; i < SYSC_MAX_CLOCKS; i++) { 2333 if (!IS_ERR_OR_NULL(ddata->clocks[i])) 2334 clk_unprepare(ddata->clocks[i]); 2335 } 2336 } 2337 2338 /* 2339 * Common sysc register bits found on omap2, also known as type1 2340 */ 2341 static const struct sysc_regbits sysc_regbits_omap2 = { 2342 .dmadisable_shift = -ENODEV, 2343 .midle_shift = 12, 2344 .sidle_shift = 3, 2345 .clkact_shift = 8, 2346 .emufree_shift = 5, 2347 .enwkup_shift = 2, 2348 .srst_shift = 1, 2349 .autoidle_shift = 0, 2350 }; 2351 2352 static const struct sysc_capabilities sysc_omap2 = { 2353 .type = TI_SYSC_OMAP2, 2354 .sysc_mask = SYSC_OMAP2_CLOCKACTIVITY | SYSC_OMAP2_EMUFREE | 2355 SYSC_OMAP2_ENAWAKEUP | SYSC_OMAP2_SOFTRESET | 2356 SYSC_OMAP2_AUTOIDLE, 2357 .regbits = &sysc_regbits_omap2, 2358 }; 2359 2360 /* All omap2 and 3 timers, and timers 1, 2 & 10 on omap 4 and 5 */ 2361 static const struct sysc_capabilities sysc_omap2_timer = { 2362 .type = TI_SYSC_OMAP2_TIMER, 2363 .sysc_mask = SYSC_OMAP2_CLOCKACTIVITY | SYSC_OMAP2_EMUFREE | 2364 SYSC_OMAP2_ENAWAKEUP | SYSC_OMAP2_SOFTRESET | 2365 SYSC_OMAP2_AUTOIDLE, 2366 .regbits = &sysc_regbits_omap2, 2367 .mod_quirks = SYSC_QUIRK_USE_CLOCKACT, 2368 }; 2369 2370 /* 2371 * SHAM2 (SHA1/MD5) sysc found on omap3, a variant of sysc_regbits_omap2 2372 * with different sidle position 2373 */ 2374 static const struct sysc_regbits sysc_regbits_omap3_sham = { 2375 .dmadisable_shift = -ENODEV, 2376 .midle_shift = -ENODEV, 2377 .sidle_shift = 4, 2378 .clkact_shift = -ENODEV, 2379 .enwkup_shift = -ENODEV, 2380 .srst_shift = 1, 2381 .autoidle_shift = 0, 2382 .emufree_shift = -ENODEV, 2383 }; 2384 2385 static const struct sysc_capabilities sysc_omap3_sham = { 2386 .type = TI_SYSC_OMAP3_SHAM, 2387 .sysc_mask = SYSC_OMAP2_SOFTRESET | SYSC_OMAP2_AUTOIDLE, 2388 .regbits = &sysc_regbits_omap3_sham, 2389 }; 2390 2391 /* 2392 * AES register bits found on omap3 and later, a variant of 2393 * sysc_regbits_omap2 with different sidle position 2394 */ 2395 static const struct sysc_regbits sysc_regbits_omap3_aes = { 2396 .dmadisable_shift = -ENODEV, 2397 .midle_shift = -ENODEV, 2398 .sidle_shift = 6, 2399 .clkact_shift = -ENODEV, 2400 .enwkup_shift = -ENODEV, 2401 .srst_shift = 1, 2402 .autoidle_shift = 0, 2403 .emufree_shift = -ENODEV, 2404 }; 2405 2406 static const struct sysc_capabilities sysc_omap3_aes = { 2407 .type = TI_SYSC_OMAP3_AES, 2408 .sysc_mask = SYSC_OMAP2_SOFTRESET | SYSC_OMAP2_AUTOIDLE, 2409 .regbits = &sysc_regbits_omap3_aes, 2410 }; 2411 2412 /* 2413 * Common sysc register bits found on omap4, also known as type2 2414 */ 2415 static const struct sysc_regbits sysc_regbits_omap4 = { 2416 .dmadisable_shift = 16, 2417 .midle_shift = 4, 2418 .sidle_shift = 2, 2419 .clkact_shift = -ENODEV, 2420 .enwkup_shift = -ENODEV, 2421 .emufree_shift = 1, 2422 .srst_shift = 0, 2423 .autoidle_shift = -ENODEV, 2424 }; 2425 2426 static const struct sysc_capabilities sysc_omap4 = { 2427 .type = TI_SYSC_OMAP4, 2428 .sysc_mask = SYSC_OMAP4_DMADISABLE | SYSC_OMAP4_FREEEMU | 2429 SYSC_OMAP4_SOFTRESET, 2430 .regbits = &sysc_regbits_omap4, 2431 }; 2432 2433 static const struct sysc_capabilities sysc_omap4_timer = { 2434 .type = TI_SYSC_OMAP4_TIMER, 2435 .sysc_mask = SYSC_OMAP4_DMADISABLE | SYSC_OMAP4_FREEEMU | 2436 SYSC_OMAP4_SOFTRESET, 2437 .regbits = &sysc_regbits_omap4, 2438 }; 2439 2440 /* 2441 * Common sysc register bits found on omap4, also known as type3 2442 */ 2443 static const struct sysc_regbits sysc_regbits_omap4_simple = { 2444 .dmadisable_shift = -ENODEV, 2445 .midle_shift = 2, 2446 .sidle_shift = 0, 2447 .clkact_shift = -ENODEV, 2448 .enwkup_shift = -ENODEV, 2449 .srst_shift = -ENODEV, 2450 .emufree_shift = -ENODEV, 2451 .autoidle_shift = -ENODEV, 2452 }; 2453 2454 static const struct sysc_capabilities sysc_omap4_simple = { 2455 .type = TI_SYSC_OMAP4_SIMPLE, 2456 .regbits = &sysc_regbits_omap4_simple, 2457 }; 2458 2459 /* 2460 * SmartReflex sysc found on omap34xx 2461 */ 2462 static const struct sysc_regbits sysc_regbits_omap34xx_sr = { 2463 .dmadisable_shift = -ENODEV, 2464 .midle_shift = -ENODEV, 2465 .sidle_shift = -ENODEV, 2466 .clkact_shift = 20, 2467 .enwkup_shift = -ENODEV, 2468 .srst_shift = -ENODEV, 2469 .emufree_shift = -ENODEV, 2470 .autoidle_shift = -ENODEV, 2471 }; 2472 2473 static const struct sysc_capabilities sysc_34xx_sr = { 2474 .type = TI_SYSC_OMAP34XX_SR, 2475 .sysc_mask = SYSC_OMAP2_CLOCKACTIVITY, 2476 .regbits = &sysc_regbits_omap34xx_sr, 2477 .mod_quirks = SYSC_QUIRK_USE_CLOCKACT | SYSC_QUIRK_UNCACHED | 2478 SYSC_QUIRK_LEGACY_IDLE, 2479 }; 2480 2481 /* 2482 * SmartReflex sysc found on omap36xx and later 2483 */ 2484 static const struct sysc_regbits sysc_regbits_omap36xx_sr = { 2485 .dmadisable_shift = -ENODEV, 2486 .midle_shift = -ENODEV, 2487 .sidle_shift = 24, 2488 .clkact_shift = -ENODEV, 2489 .enwkup_shift = 26, 2490 .srst_shift = -ENODEV, 2491 .emufree_shift = -ENODEV, 2492 .autoidle_shift = -ENODEV, 2493 }; 2494 2495 static const struct sysc_capabilities sysc_36xx_sr = { 2496 .type = TI_SYSC_OMAP36XX_SR, 2497 .sysc_mask = SYSC_OMAP3_SR_ENAWAKEUP, 2498 .regbits = &sysc_regbits_omap36xx_sr, 2499 .mod_quirks = SYSC_QUIRK_UNCACHED | SYSC_QUIRK_LEGACY_IDLE, 2500 }; 2501 2502 static const struct sysc_capabilities sysc_omap4_sr = { 2503 .type = TI_SYSC_OMAP4_SR, 2504 .regbits = &sysc_regbits_omap36xx_sr, 2505 .mod_quirks = SYSC_QUIRK_LEGACY_IDLE, 2506 }; 2507 2508 /* 2509 * McASP register bits found on omap4 and later 2510 */ 2511 static const struct sysc_regbits sysc_regbits_omap4_mcasp = { 2512 .dmadisable_shift = -ENODEV, 2513 .midle_shift = -ENODEV, 2514 .sidle_shift = 0, 2515 .clkact_shift = -ENODEV, 2516 .enwkup_shift = -ENODEV, 2517 .srst_shift = -ENODEV, 2518 .emufree_shift = -ENODEV, 2519 .autoidle_shift = -ENODEV, 2520 }; 2521 2522 static const struct sysc_capabilities sysc_omap4_mcasp = { 2523 .type = TI_SYSC_OMAP4_MCASP, 2524 .regbits = &sysc_regbits_omap4_mcasp, 2525 .mod_quirks = SYSC_QUIRK_OPT_CLKS_NEEDED, 2526 }; 2527 2528 /* 2529 * McASP found on dra7 and later 2530 */ 2531 static const struct sysc_capabilities sysc_dra7_mcasp = { 2532 .type = TI_SYSC_OMAP4_SIMPLE, 2533 .regbits = &sysc_regbits_omap4_simple, 2534 .mod_quirks = SYSC_QUIRK_OPT_CLKS_NEEDED, 2535 }; 2536 2537 /* 2538 * FS USB host found on omap4 and later 2539 */ 2540 static const struct sysc_regbits sysc_regbits_omap4_usb_host_fs = { 2541 .dmadisable_shift = -ENODEV, 2542 .midle_shift = -ENODEV, 2543 .sidle_shift = 24, 2544 .clkact_shift = -ENODEV, 2545 .enwkup_shift = 26, 2546 .srst_shift = -ENODEV, 2547 .emufree_shift = -ENODEV, 2548 .autoidle_shift = -ENODEV, 2549 }; 2550 2551 static const struct sysc_capabilities sysc_omap4_usb_host_fs = { 2552 .type = TI_SYSC_OMAP4_USB_HOST_FS, 2553 .sysc_mask = SYSC_OMAP2_ENAWAKEUP, 2554 .regbits = &sysc_regbits_omap4_usb_host_fs, 2555 }; 2556 2557 static const struct sysc_regbits sysc_regbits_dra7_mcan = { 2558 .dmadisable_shift = -ENODEV, 2559 .midle_shift = -ENODEV, 2560 .sidle_shift = -ENODEV, 2561 .clkact_shift = -ENODEV, 2562 .enwkup_shift = 4, 2563 .srst_shift = 0, 2564 .emufree_shift = -ENODEV, 2565 .autoidle_shift = -ENODEV, 2566 }; 2567 2568 static const struct sysc_capabilities sysc_dra7_mcan = { 2569 .type = TI_SYSC_DRA7_MCAN, 2570 .sysc_mask = SYSC_DRA7_MCAN_ENAWAKEUP | SYSC_OMAP4_SOFTRESET, 2571 .regbits = &sysc_regbits_dra7_mcan, 2572 .mod_quirks = SYSS_QUIRK_RESETDONE_INVERTED, 2573 }; 2574 2575 /* 2576 * PRUSS found on some AM33xx, AM437x and AM57xx SoCs 2577 */ 2578 static const struct sysc_capabilities sysc_pruss = { 2579 .type = TI_SYSC_PRUSS, 2580 .sysc_mask = SYSC_PRUSS_STANDBY_INIT | SYSC_PRUSS_SUB_MWAIT, 2581 .regbits = &sysc_regbits_omap4_simple, 2582 .mod_quirks = SYSC_MODULE_QUIRK_PRUSS, 2583 }; 2584 2585 static int sysc_init_pdata(struct sysc *ddata) 2586 { 2587 struct ti_sysc_platform_data *pdata = dev_get_platdata(ddata->dev); 2588 struct ti_sysc_module_data *mdata; 2589 2590 if (!pdata) 2591 return 0; 2592 2593 mdata = devm_kzalloc(ddata->dev, sizeof(*mdata), GFP_KERNEL); 2594 if (!mdata) 2595 return -ENOMEM; 2596 2597 if (ddata->legacy_mode) { 2598 mdata->name = ddata->legacy_mode; 2599 mdata->module_pa = ddata->module_pa; 2600 mdata->module_size = ddata->module_size; 2601 mdata->offsets = ddata->offsets; 2602 mdata->nr_offsets = SYSC_MAX_REGS; 2603 mdata->cap = ddata->cap; 2604 mdata->cfg = &ddata->cfg; 2605 } 2606 2607 ddata->mdata = mdata; 2608 2609 return 0; 2610 } 2611 2612 static int sysc_init_match(struct sysc *ddata) 2613 { 2614 const struct sysc_capabilities *cap; 2615 2616 cap = of_device_get_match_data(ddata->dev); 2617 if (!cap) 2618 return -EINVAL; 2619 2620 ddata->cap = cap; 2621 if (ddata->cap) 2622 ddata->cfg.quirks |= ddata->cap->mod_quirks; 2623 2624 return 0; 2625 } 2626 2627 static void ti_sysc_idle(struct work_struct *work) 2628 { 2629 struct sysc *ddata; 2630 2631 ddata = container_of(work, struct sysc, idle_work.work); 2632 2633 /* 2634 * One time decrement of clock usage counts if left on from init. 2635 * Note that we disable opt clocks unconditionally in this case 2636 * as they are enabled unconditionally during init without 2637 * considering sysc_opt_clks_needed() at that point. 2638 */ 2639 if (ddata->cfg.quirks & (SYSC_QUIRK_NO_IDLE | 2640 SYSC_QUIRK_NO_IDLE_ON_INIT)) { 2641 sysc_disable_main_clocks(ddata); 2642 sysc_disable_opt_clocks(ddata); 2643 sysc_clkdm_allow_idle(ddata); 2644 } 2645 2646 /* Keep permanent PM runtime usage count for SYSC_QUIRK_NO_IDLE */ 2647 if (ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE) 2648 return; 2649 2650 /* 2651 * Decrement PM runtime usage count for SYSC_QUIRK_NO_IDLE_ON_INIT 2652 * and SYSC_QUIRK_NO_RESET_ON_INIT 2653 */ 2654 if (pm_runtime_active(ddata->dev)) 2655 pm_runtime_put_sync(ddata->dev); 2656 } 2657 2658 /* 2659 * SoC model and features detection. Only needed for SoCs that need 2660 * special handling for quirks, no need to list others. 2661 */ 2662 static const struct soc_device_attribute sysc_soc_match[] = { 2663 SOC_FLAG("OMAP242*", SOC_2420), 2664 SOC_FLAG("OMAP243*", SOC_2430), 2665 SOC_FLAG("OMAP3[45]*", SOC_3430), 2666 SOC_FLAG("OMAP3[67]*", SOC_3630), 2667 SOC_FLAG("OMAP443*", SOC_4430), 2668 SOC_FLAG("OMAP446*", SOC_4460), 2669 SOC_FLAG("OMAP447*", SOC_4470), 2670 SOC_FLAG("OMAP54*", SOC_5430), 2671 SOC_FLAG("AM433", SOC_AM3), 2672 SOC_FLAG("AM43*", SOC_AM4), 2673 SOC_FLAG("DRA7*", SOC_DRA7), 2674 2675 { /* sentinel */ }, 2676 }; 2677 2678 /* 2679 * List of SoCs variants with disabled features. By default we assume all 2680 * devices in the device tree are available so no need to list those SoCs. 2681 */ 2682 static const struct soc_device_attribute sysc_soc_feat_match[] = { 2683 /* OMAP3430/3530 and AM3517 variants with some accelerators disabled */ 2684 SOC_FLAG("AM3505", DIS_SGX), 2685 SOC_FLAG("OMAP3525", DIS_SGX), 2686 SOC_FLAG("OMAP3515", DIS_IVA | DIS_SGX), 2687 SOC_FLAG("OMAP3503", DIS_ISP | DIS_IVA | DIS_SGX), 2688 2689 /* OMAP3630/DM3730 variants with some accelerators disabled */ 2690 SOC_FLAG("AM3703", DIS_IVA | DIS_SGX), 2691 SOC_FLAG("DM3725", DIS_SGX), 2692 SOC_FLAG("OMAP3611", DIS_ISP | DIS_IVA | DIS_SGX), 2693 SOC_FLAG("OMAP3615/AM3715", DIS_IVA), 2694 SOC_FLAG("OMAP3621", DIS_ISP), 2695 2696 { /* sentinel */ }, 2697 }; 2698 2699 static int sysc_add_disabled(unsigned long base) 2700 { 2701 struct sysc_address *disabled_module; 2702 2703 disabled_module = kzalloc(sizeof(*disabled_module), GFP_KERNEL); 2704 if (!disabled_module) 2705 return -ENOMEM; 2706 2707 disabled_module->base = base; 2708 2709 mutex_lock(&sysc_soc->list_lock); 2710 list_add(&disabled_module->node, &sysc_soc->disabled_modules); 2711 mutex_unlock(&sysc_soc->list_lock); 2712 2713 return 0; 2714 } 2715 2716 /* 2717 * One time init to detect the booted SoC and disable unavailable features. 2718 * Note that we initialize static data shared across all ti-sysc instances 2719 * so ddata is only used for SoC type. This can be called from module_init 2720 * once we no longer need to rely on platform data. 2721 */ 2722 static int sysc_init_soc(struct sysc *ddata) 2723 { 2724 const struct soc_device_attribute *match; 2725 struct ti_sysc_platform_data *pdata; 2726 unsigned long features = 0; 2727 2728 if (sysc_soc) 2729 return 0; 2730 2731 sysc_soc = kzalloc(sizeof(*sysc_soc), GFP_KERNEL); 2732 if (!sysc_soc) 2733 return -ENOMEM; 2734 2735 mutex_init(&sysc_soc->list_lock); 2736 INIT_LIST_HEAD(&sysc_soc->disabled_modules); 2737 sysc_soc->general_purpose = true; 2738 2739 pdata = dev_get_platdata(ddata->dev); 2740 if (pdata && pdata->soc_type_gp) 2741 sysc_soc->general_purpose = pdata->soc_type_gp(); 2742 2743 match = soc_device_match(sysc_soc_match); 2744 if (match && match->data) 2745 sysc_soc->soc = (int)match->data; 2746 2747 /* Ignore devices that are not available on HS and EMU SoCs */ 2748 if (!sysc_soc->general_purpose) { 2749 switch (sysc_soc->soc) { 2750 case SOC_3430 ... SOC_3630: 2751 sysc_add_disabled(0x48304000); /* timer12 */ 2752 break; 2753 default: 2754 break; 2755 }; 2756 } 2757 2758 match = soc_device_match(sysc_soc_feat_match); 2759 if (!match) 2760 return 0; 2761 2762 if (match->data) 2763 features = (unsigned long)match->data; 2764 2765 /* 2766 * Add disabled devices to the list based on the module base. 2767 * Note that this must be done before we attempt to access the 2768 * device and have module revision checks working. 2769 */ 2770 if (features & DIS_ISP) 2771 sysc_add_disabled(0x480bd400); 2772 if (features & DIS_IVA) 2773 sysc_add_disabled(0x5d000000); 2774 if (features & DIS_SGX) 2775 sysc_add_disabled(0x50000000); 2776 2777 return 0; 2778 } 2779 2780 static void sysc_cleanup_soc(void) 2781 { 2782 struct sysc_address *disabled_module; 2783 struct list_head *pos, *tmp; 2784 2785 if (!sysc_soc) 2786 return; 2787 2788 mutex_lock(&sysc_soc->list_lock); 2789 list_for_each_safe(pos, tmp, &sysc_soc->disabled_modules) { 2790 disabled_module = list_entry(pos, struct sysc_address, node); 2791 list_del(pos); 2792 kfree(disabled_module); 2793 } 2794 mutex_unlock(&sysc_soc->list_lock); 2795 } 2796 2797 static int sysc_check_disabled_devices(struct sysc *ddata) 2798 { 2799 struct sysc_address *disabled_module; 2800 struct list_head *pos; 2801 int error = 0; 2802 2803 mutex_lock(&sysc_soc->list_lock); 2804 list_for_each(pos, &sysc_soc->disabled_modules) { 2805 disabled_module = list_entry(pos, struct sysc_address, node); 2806 if (ddata->module_pa == disabled_module->base) { 2807 dev_dbg(ddata->dev, "module disabled for this SoC\n"); 2808 error = -ENODEV; 2809 break; 2810 } 2811 } 2812 mutex_unlock(&sysc_soc->list_lock); 2813 2814 return error; 2815 } 2816 2817 static const struct of_device_id sysc_match_table[] = { 2818 { .compatible = "simple-bus", }, 2819 { /* sentinel */ }, 2820 }; 2821 2822 static int sysc_probe(struct platform_device *pdev) 2823 { 2824 struct ti_sysc_platform_data *pdata = dev_get_platdata(&pdev->dev); 2825 struct sysc *ddata; 2826 int error; 2827 2828 ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL); 2829 if (!ddata) 2830 return -ENOMEM; 2831 2832 ddata->dev = &pdev->dev; 2833 platform_set_drvdata(pdev, ddata); 2834 2835 error = sysc_init_soc(ddata); 2836 if (error) 2837 return error; 2838 2839 error = sysc_init_match(ddata); 2840 if (error) 2841 return error; 2842 2843 error = sysc_init_dts_quirks(ddata); 2844 if (error) 2845 return error; 2846 2847 error = sysc_map_and_check_registers(ddata); 2848 if (error) 2849 return error; 2850 2851 error = sysc_init_sysc_mask(ddata); 2852 if (error) 2853 return error; 2854 2855 error = sysc_init_idlemodes(ddata); 2856 if (error) 2857 return error; 2858 2859 error = sysc_init_syss_mask(ddata); 2860 if (error) 2861 return error; 2862 2863 error = sysc_init_pdata(ddata); 2864 if (error) 2865 return error; 2866 2867 sysc_init_early_quirks(ddata); 2868 2869 error = sysc_check_disabled_devices(ddata); 2870 if (error) 2871 return error; 2872 2873 error = sysc_get_clocks(ddata); 2874 if (error) 2875 return error; 2876 2877 error = sysc_init_resets(ddata); 2878 if (error) 2879 goto unprepare; 2880 2881 error = sysc_init_module(ddata); 2882 if (error) 2883 goto unprepare; 2884 2885 pm_runtime_enable(ddata->dev); 2886 error = pm_runtime_get_sync(ddata->dev); 2887 if (error < 0) { 2888 pm_runtime_put_noidle(ddata->dev); 2889 pm_runtime_disable(ddata->dev); 2890 goto unprepare; 2891 } 2892 2893 /* Balance use counts as PM runtime should have enabled these all */ 2894 if (!(ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT)) 2895 reset_control_assert(ddata->rsts); 2896 2897 if (!(ddata->cfg.quirks & 2898 (SYSC_QUIRK_NO_IDLE | SYSC_QUIRK_NO_IDLE_ON_INIT))) { 2899 sysc_disable_main_clocks(ddata); 2900 sysc_disable_opt_clocks(ddata); 2901 sysc_clkdm_allow_idle(ddata); 2902 } 2903 2904 sysc_show_registers(ddata); 2905 2906 ddata->dev->type = &sysc_device_type; 2907 error = of_platform_populate(ddata->dev->of_node, sysc_match_table, 2908 pdata ? pdata->auxdata : NULL, 2909 ddata->dev); 2910 if (error) 2911 goto err; 2912 2913 INIT_DELAYED_WORK(&ddata->idle_work, ti_sysc_idle); 2914 2915 /* At least earlycon won't survive without deferred idle */ 2916 if (ddata->cfg.quirks & (SYSC_QUIRK_NO_IDLE | 2917 SYSC_QUIRK_NO_IDLE_ON_INIT | 2918 SYSC_QUIRK_NO_RESET_ON_INIT)) { 2919 schedule_delayed_work(&ddata->idle_work, 3000); 2920 } else { 2921 pm_runtime_put(&pdev->dev); 2922 } 2923 2924 return 0; 2925 2926 err: 2927 pm_runtime_put_sync(&pdev->dev); 2928 pm_runtime_disable(&pdev->dev); 2929 unprepare: 2930 sysc_unprepare(ddata); 2931 2932 return error; 2933 } 2934 2935 static int sysc_remove(struct platform_device *pdev) 2936 { 2937 struct sysc *ddata = platform_get_drvdata(pdev); 2938 int error; 2939 2940 cancel_delayed_work_sync(&ddata->idle_work); 2941 2942 error = pm_runtime_get_sync(ddata->dev); 2943 if (error < 0) { 2944 pm_runtime_put_noidle(ddata->dev); 2945 pm_runtime_disable(ddata->dev); 2946 goto unprepare; 2947 } 2948 2949 of_platform_depopulate(&pdev->dev); 2950 2951 pm_runtime_put_sync(&pdev->dev); 2952 pm_runtime_disable(&pdev->dev); 2953 reset_control_assert(ddata->rsts); 2954 2955 unprepare: 2956 sysc_unprepare(ddata); 2957 2958 return 0; 2959 } 2960 2961 static const struct of_device_id sysc_match[] = { 2962 { .compatible = "ti,sysc-omap2", .data = &sysc_omap2, }, 2963 { .compatible = "ti,sysc-omap2-timer", .data = &sysc_omap2_timer, }, 2964 { .compatible = "ti,sysc-omap4", .data = &sysc_omap4, }, 2965 { .compatible = "ti,sysc-omap4-timer", .data = &sysc_omap4_timer, }, 2966 { .compatible = "ti,sysc-omap4-simple", .data = &sysc_omap4_simple, }, 2967 { .compatible = "ti,sysc-omap3430-sr", .data = &sysc_34xx_sr, }, 2968 { .compatible = "ti,sysc-omap3630-sr", .data = &sysc_36xx_sr, }, 2969 { .compatible = "ti,sysc-omap4-sr", .data = &sysc_omap4_sr, }, 2970 { .compatible = "ti,sysc-omap3-sham", .data = &sysc_omap3_sham, }, 2971 { .compatible = "ti,sysc-omap-aes", .data = &sysc_omap3_aes, }, 2972 { .compatible = "ti,sysc-mcasp", .data = &sysc_omap4_mcasp, }, 2973 { .compatible = "ti,sysc-dra7-mcasp", .data = &sysc_dra7_mcasp, }, 2974 { .compatible = "ti,sysc-usb-host-fs", 2975 .data = &sysc_omap4_usb_host_fs, }, 2976 { .compatible = "ti,sysc-dra7-mcan", .data = &sysc_dra7_mcan, }, 2977 { .compatible = "ti,sysc-pruss", .data = &sysc_pruss, }, 2978 { }, 2979 }; 2980 MODULE_DEVICE_TABLE(of, sysc_match); 2981 2982 static struct platform_driver sysc_driver = { 2983 .probe = sysc_probe, 2984 .remove = sysc_remove, 2985 .driver = { 2986 .name = "ti-sysc", 2987 .of_match_table = sysc_match, 2988 .pm = &sysc_pm_ops, 2989 }, 2990 }; 2991 2992 static int __init sysc_init(void) 2993 { 2994 bus_register_notifier(&platform_bus_type, &sysc_nb); 2995 2996 return platform_driver_register(&sysc_driver); 2997 } 2998 module_init(sysc_init); 2999 3000 static void __exit sysc_exit(void) 3001 { 3002 bus_unregister_notifier(&platform_bus_type, &sysc_nb); 3003 platform_driver_unregister(&sysc_driver); 3004 sysc_cleanup_soc(); 3005 } 3006 module_exit(sysc_exit); 3007 3008 MODULE_DESCRIPTION("TI sysc interconnect target driver"); 3009 MODULE_LICENSE("GPL v2"); 3010