xref: /openbmc/linux/drivers/bus/ti-sysc.c (revision 5ad1ab30)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * ti-sysc.c - Texas Instruments sysc interconnect target driver
4  */
5 
6 #include <linux/io.h>
7 #include <linux/clk.h>
8 #include <linux/clkdev.h>
9 #include <linux/cpu_pm.h>
10 #include <linux/delay.h>
11 #include <linux/list.h>
12 #include <linux/module.h>
13 #include <linux/platform_device.h>
14 #include <linux/pm_domain.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/reset.h>
17 #include <linux/of_address.h>
18 #include <linux/of_platform.h>
19 #include <linux/slab.h>
20 #include <linux/sys_soc.h>
21 #include <linux/timekeeping.h>
22 #include <linux/iopoll.h>
23 
24 #include <linux/platform_data/ti-sysc.h>
25 
26 #include <dt-bindings/bus/ti-sysc.h>
27 
28 #define DIS_ISP		BIT(2)
29 #define DIS_IVA		BIT(1)
30 #define DIS_SGX		BIT(0)
31 
32 #define SOC_FLAG(match, flag)	{ .machine = match, .data = (void *)(flag), }
33 
34 #define MAX_MODULE_SOFTRESET_WAIT		10000
35 
36 enum sysc_soc {
37 	SOC_UNKNOWN,
38 	SOC_2420,
39 	SOC_2430,
40 	SOC_3430,
41 	SOC_3630,
42 	SOC_4430,
43 	SOC_4460,
44 	SOC_4470,
45 	SOC_5430,
46 	SOC_AM3,
47 	SOC_AM4,
48 	SOC_DRA7,
49 };
50 
51 struct sysc_address {
52 	unsigned long base;
53 	struct list_head node;
54 };
55 
56 struct sysc_module {
57 	struct sysc *ddata;
58 	struct list_head node;
59 };
60 
61 struct sysc_soc_info {
62 	unsigned long general_purpose:1;
63 	enum sysc_soc soc;
64 	struct mutex list_lock;	/* disabled and restored modules list lock */
65 	struct list_head disabled_modules;
66 	struct list_head restored_modules;
67 	struct notifier_block nb;
68 };
69 
70 enum sysc_clocks {
71 	SYSC_FCK,
72 	SYSC_ICK,
73 	SYSC_OPTFCK0,
74 	SYSC_OPTFCK1,
75 	SYSC_OPTFCK2,
76 	SYSC_OPTFCK3,
77 	SYSC_OPTFCK4,
78 	SYSC_OPTFCK5,
79 	SYSC_OPTFCK6,
80 	SYSC_OPTFCK7,
81 	SYSC_MAX_CLOCKS,
82 };
83 
84 static struct sysc_soc_info *sysc_soc;
85 static const char * const reg_names[] = { "rev", "sysc", "syss", };
86 static const char * const clock_names[SYSC_MAX_CLOCKS] = {
87 	"fck", "ick", "opt0", "opt1", "opt2", "opt3", "opt4",
88 	"opt5", "opt6", "opt7",
89 };
90 
91 #define SYSC_IDLEMODE_MASK		3
92 #define SYSC_CLOCKACTIVITY_MASK		3
93 
94 /**
95  * struct sysc - TI sysc interconnect target module registers and capabilities
96  * @dev: struct device pointer
97  * @module_pa: physical address of the interconnect target module
98  * @module_size: size of the interconnect target module
99  * @module_va: virtual address of the interconnect target module
100  * @offsets: register offsets from module base
101  * @mdata: ti-sysc to hwmod translation data for a module
102  * @clocks: clocks used by the interconnect target module
103  * @clock_roles: clock role names for the found clocks
104  * @nr_clocks: number of clocks used by the interconnect target module
105  * @rsts: resets used by the interconnect target module
106  * @legacy_mode: configured for legacy mode if set
107  * @cap: interconnect target module capabilities
108  * @cfg: interconnect target module configuration
109  * @cookie: data used by legacy platform callbacks
110  * @name: name if available
111  * @revision: interconnect target module revision
112  * @reserved: target module is reserved and already in use
113  * @enabled: sysc runtime enabled status
114  * @needs_resume: runtime resume needed on resume from suspend
115  * @child_needs_resume: runtime resume needed for child on resume from suspend
116  * @disable_on_idle: status flag used for disabling modules with resets
117  * @idle_work: work structure used to perform delayed idle on a module
118  * @pre_reset_quirk: module specific pre-reset quirk
119  * @post_reset_quirk: module specific post-reset quirk
120  * @reset_done_quirk: module specific reset done quirk
121  * @module_enable_quirk: module specific enable quirk
122  * @module_disable_quirk: module specific disable quirk
123  * @module_unlock_quirk: module specific sysconfig unlock quirk
124  * @module_lock_quirk: module specific sysconfig lock quirk
125  */
126 struct sysc {
127 	struct device *dev;
128 	u64 module_pa;
129 	u32 module_size;
130 	void __iomem *module_va;
131 	int offsets[SYSC_MAX_REGS];
132 	struct ti_sysc_module_data *mdata;
133 	struct clk **clocks;
134 	const char **clock_roles;
135 	int nr_clocks;
136 	struct reset_control *rsts;
137 	const char *legacy_mode;
138 	const struct sysc_capabilities *cap;
139 	struct sysc_config cfg;
140 	struct ti_sysc_cookie cookie;
141 	const char *name;
142 	u32 revision;
143 	u32 sysconfig;
144 	unsigned int reserved:1;
145 	unsigned int enabled:1;
146 	unsigned int needs_resume:1;
147 	unsigned int child_needs_resume:1;
148 	struct delayed_work idle_work;
149 	void (*pre_reset_quirk)(struct sysc *sysc);
150 	void (*post_reset_quirk)(struct sysc *sysc);
151 	void (*reset_done_quirk)(struct sysc *sysc);
152 	void (*module_enable_quirk)(struct sysc *sysc);
153 	void (*module_disable_quirk)(struct sysc *sysc);
154 	void (*module_unlock_quirk)(struct sysc *sysc);
155 	void (*module_lock_quirk)(struct sysc *sysc);
156 };
157 
158 static void sysc_parse_dts_quirks(struct sysc *ddata, struct device_node *np,
159 				  bool is_child);
160 static int sysc_reset(struct sysc *ddata);
161 
162 static void sysc_write(struct sysc *ddata, int offset, u32 value)
163 {
164 	if (ddata->cfg.quirks & SYSC_QUIRK_16BIT) {
165 		writew_relaxed(value & 0xffff, ddata->module_va + offset);
166 
167 		/* Only i2c revision has LO and HI register with stride of 4 */
168 		if (ddata->offsets[SYSC_REVISION] >= 0 &&
169 		    offset == ddata->offsets[SYSC_REVISION]) {
170 			u16 hi = value >> 16;
171 
172 			writew_relaxed(hi, ddata->module_va + offset + 4);
173 		}
174 
175 		return;
176 	}
177 
178 	writel_relaxed(value, ddata->module_va + offset);
179 }
180 
181 static u32 sysc_read(struct sysc *ddata, int offset)
182 {
183 	if (ddata->cfg.quirks & SYSC_QUIRK_16BIT) {
184 		u32 val;
185 
186 		val = readw_relaxed(ddata->module_va + offset);
187 
188 		/* Only i2c revision has LO and HI register with stride of 4 */
189 		if (ddata->offsets[SYSC_REVISION] >= 0 &&
190 		    offset == ddata->offsets[SYSC_REVISION]) {
191 			u16 tmp = readw_relaxed(ddata->module_va + offset + 4);
192 
193 			val |= tmp << 16;
194 		}
195 
196 		return val;
197 	}
198 
199 	return readl_relaxed(ddata->module_va + offset);
200 }
201 
202 static bool sysc_opt_clks_needed(struct sysc *ddata)
203 {
204 	return !!(ddata->cfg.quirks & SYSC_QUIRK_OPT_CLKS_NEEDED);
205 }
206 
207 static u32 sysc_read_revision(struct sysc *ddata)
208 {
209 	int offset = ddata->offsets[SYSC_REVISION];
210 
211 	if (offset < 0)
212 		return 0;
213 
214 	return sysc_read(ddata, offset);
215 }
216 
217 static u32 sysc_read_sysconfig(struct sysc *ddata)
218 {
219 	int offset = ddata->offsets[SYSC_SYSCONFIG];
220 
221 	if (offset < 0)
222 		return 0;
223 
224 	return sysc_read(ddata, offset);
225 }
226 
227 static u32 sysc_read_sysstatus(struct sysc *ddata)
228 {
229 	int offset = ddata->offsets[SYSC_SYSSTATUS];
230 
231 	if (offset < 0)
232 		return 0;
233 
234 	return sysc_read(ddata, offset);
235 }
236 
237 static int sysc_poll_reset_sysstatus(struct sysc *ddata)
238 {
239 	int error, retries;
240 	u32 syss_done, rstval;
241 
242 	if (ddata->cfg.quirks & SYSS_QUIRK_RESETDONE_INVERTED)
243 		syss_done = 0;
244 	else
245 		syss_done = ddata->cfg.syss_mask;
246 
247 	if (likely(!timekeeping_suspended)) {
248 		error = readx_poll_timeout_atomic(sysc_read_sysstatus, ddata,
249 				rstval, (rstval & ddata->cfg.syss_mask) ==
250 				syss_done, 100, MAX_MODULE_SOFTRESET_WAIT);
251 	} else {
252 		retries = MAX_MODULE_SOFTRESET_WAIT;
253 		while (retries--) {
254 			rstval = sysc_read_sysstatus(ddata);
255 			if ((rstval & ddata->cfg.syss_mask) == syss_done)
256 				return 0;
257 			udelay(2); /* Account for udelay flakeyness */
258 		}
259 		error = -ETIMEDOUT;
260 	}
261 
262 	return error;
263 }
264 
265 static int sysc_poll_reset_sysconfig(struct sysc *ddata)
266 {
267 	int error, retries;
268 	u32 sysc_mask, rstval;
269 
270 	sysc_mask = BIT(ddata->cap->regbits->srst_shift);
271 
272 	if (likely(!timekeeping_suspended)) {
273 		error = readx_poll_timeout_atomic(sysc_read_sysconfig, ddata,
274 				rstval, !(rstval & sysc_mask),
275 				100, MAX_MODULE_SOFTRESET_WAIT);
276 	} else {
277 		retries = MAX_MODULE_SOFTRESET_WAIT;
278 		while (retries--) {
279 			rstval = sysc_read_sysconfig(ddata);
280 			if (!(rstval & sysc_mask))
281 				return 0;
282 			udelay(2); /* Account for udelay flakeyness */
283 		}
284 		error = -ETIMEDOUT;
285 	}
286 
287 	return error;
288 }
289 
290 /* Poll on reset status */
291 static int sysc_wait_softreset(struct sysc *ddata)
292 {
293 	int syss_offset, error = 0;
294 
295 	if (ddata->cap->regbits->srst_shift < 0)
296 		return 0;
297 
298 	syss_offset = ddata->offsets[SYSC_SYSSTATUS];
299 
300 	if (syss_offset >= 0)
301 		error = sysc_poll_reset_sysstatus(ddata);
302 	else if (ddata->cfg.quirks & SYSC_QUIRK_RESET_STATUS)
303 		error = sysc_poll_reset_sysconfig(ddata);
304 
305 	return error;
306 }
307 
308 static int sysc_add_named_clock_from_child(struct sysc *ddata,
309 					   const char *name,
310 					   const char *optfck_name)
311 {
312 	struct device_node *np = ddata->dev->of_node;
313 	struct device_node *child;
314 	struct clk_lookup *cl;
315 	struct clk *clock;
316 	const char *n;
317 
318 	if (name)
319 		n = name;
320 	else
321 		n = optfck_name;
322 
323 	/* Does the clock alias already exist? */
324 	clock = of_clk_get_by_name(np, n);
325 	if (!IS_ERR(clock)) {
326 		clk_put(clock);
327 
328 		return 0;
329 	}
330 
331 	child = of_get_next_available_child(np, NULL);
332 	if (!child)
333 		return -ENODEV;
334 
335 	clock = devm_get_clk_from_child(ddata->dev, child, name);
336 	if (IS_ERR(clock))
337 		return PTR_ERR(clock);
338 
339 	/*
340 	 * Use clkdev_add() instead of clkdev_alloc() to avoid the MAX_DEV_ID
341 	 * limit for clk_get(). If cl ever needs to be freed, it should be done
342 	 * with clkdev_drop().
343 	 */
344 	cl = kzalloc(sizeof(*cl), GFP_KERNEL);
345 	if (!cl)
346 		return -ENOMEM;
347 
348 	cl->con_id = n;
349 	cl->dev_id = dev_name(ddata->dev);
350 	cl->clk = clock;
351 	clkdev_add(cl);
352 
353 	clk_put(clock);
354 
355 	return 0;
356 }
357 
358 static int sysc_init_ext_opt_clock(struct sysc *ddata, const char *name)
359 {
360 	const char *optfck_name;
361 	int error, index;
362 
363 	if (ddata->nr_clocks < SYSC_OPTFCK0)
364 		index = SYSC_OPTFCK0;
365 	else
366 		index = ddata->nr_clocks;
367 
368 	if (name)
369 		optfck_name = name;
370 	else
371 		optfck_name = clock_names[index];
372 
373 	error = sysc_add_named_clock_from_child(ddata, name, optfck_name);
374 	if (error)
375 		return error;
376 
377 	ddata->clock_roles[index] = optfck_name;
378 	ddata->nr_clocks++;
379 
380 	return 0;
381 }
382 
383 static int sysc_get_one_clock(struct sysc *ddata, const char *name)
384 {
385 	int error, i, index = -ENODEV;
386 
387 	if (!strncmp(clock_names[SYSC_FCK], name, 3))
388 		index = SYSC_FCK;
389 	else if (!strncmp(clock_names[SYSC_ICK], name, 3))
390 		index = SYSC_ICK;
391 
392 	if (index < 0) {
393 		for (i = SYSC_OPTFCK0; i < SYSC_MAX_CLOCKS; i++) {
394 			if (!ddata->clocks[i]) {
395 				index = i;
396 				break;
397 			}
398 		}
399 	}
400 
401 	if (index < 0) {
402 		dev_err(ddata->dev, "clock %s not added\n", name);
403 		return index;
404 	}
405 
406 	ddata->clocks[index] = devm_clk_get(ddata->dev, name);
407 	if (IS_ERR(ddata->clocks[index])) {
408 		dev_err(ddata->dev, "clock get error for %s: %li\n",
409 			name, PTR_ERR(ddata->clocks[index]));
410 
411 		return PTR_ERR(ddata->clocks[index]);
412 	}
413 
414 	error = clk_prepare(ddata->clocks[index]);
415 	if (error) {
416 		dev_err(ddata->dev, "clock prepare error for %s: %i\n",
417 			name, error);
418 
419 		return error;
420 	}
421 
422 	return 0;
423 }
424 
425 static int sysc_get_clocks(struct sysc *ddata)
426 {
427 	struct device_node *np = ddata->dev->of_node;
428 	struct property *prop;
429 	const char *name;
430 	int nr_fck = 0, nr_ick = 0, i, error = 0;
431 
432 	ddata->clock_roles = devm_kcalloc(ddata->dev,
433 					  SYSC_MAX_CLOCKS,
434 					  sizeof(*ddata->clock_roles),
435 					  GFP_KERNEL);
436 	if (!ddata->clock_roles)
437 		return -ENOMEM;
438 
439 	of_property_for_each_string(np, "clock-names", prop, name) {
440 		if (!strncmp(clock_names[SYSC_FCK], name, 3))
441 			nr_fck++;
442 		if (!strncmp(clock_names[SYSC_ICK], name, 3))
443 			nr_ick++;
444 		ddata->clock_roles[ddata->nr_clocks] = name;
445 		ddata->nr_clocks++;
446 	}
447 
448 	if (ddata->nr_clocks < 1)
449 		return 0;
450 
451 	if ((ddata->cfg.quirks & SYSC_QUIRK_EXT_OPT_CLOCK)) {
452 		error = sysc_init_ext_opt_clock(ddata, NULL);
453 		if (error)
454 			return error;
455 	}
456 
457 	if (ddata->nr_clocks > SYSC_MAX_CLOCKS) {
458 		dev_err(ddata->dev, "too many clocks for %pOF\n", np);
459 
460 		return -EINVAL;
461 	}
462 
463 	if (nr_fck > 1 || nr_ick > 1) {
464 		dev_err(ddata->dev, "max one fck and ick for %pOF\n", np);
465 
466 		return -EINVAL;
467 	}
468 
469 	/* Always add a slot for main clocks fck and ick even if unused */
470 	if (!nr_fck)
471 		ddata->nr_clocks++;
472 	if (!nr_ick)
473 		ddata->nr_clocks++;
474 
475 	ddata->clocks = devm_kcalloc(ddata->dev,
476 				     ddata->nr_clocks, sizeof(*ddata->clocks),
477 				     GFP_KERNEL);
478 	if (!ddata->clocks)
479 		return -ENOMEM;
480 
481 	for (i = 0; i < SYSC_MAX_CLOCKS; i++) {
482 		const char *name = ddata->clock_roles[i];
483 
484 		if (!name)
485 			continue;
486 
487 		error = sysc_get_one_clock(ddata, name);
488 		if (error)
489 			return error;
490 	}
491 
492 	return 0;
493 }
494 
495 static int sysc_enable_main_clocks(struct sysc *ddata)
496 {
497 	struct clk *clock;
498 	int i, error;
499 
500 	if (!ddata->clocks)
501 		return 0;
502 
503 	for (i = 0; i < SYSC_OPTFCK0; i++) {
504 		clock = ddata->clocks[i];
505 
506 		/* Main clocks may not have ick */
507 		if (IS_ERR_OR_NULL(clock))
508 			continue;
509 
510 		error = clk_enable(clock);
511 		if (error)
512 			goto err_disable;
513 	}
514 
515 	return 0;
516 
517 err_disable:
518 	for (i--; i >= 0; i--) {
519 		clock = ddata->clocks[i];
520 
521 		/* Main clocks may not have ick */
522 		if (IS_ERR_OR_NULL(clock))
523 			continue;
524 
525 		clk_disable(clock);
526 	}
527 
528 	return error;
529 }
530 
531 static void sysc_disable_main_clocks(struct sysc *ddata)
532 {
533 	struct clk *clock;
534 	int i;
535 
536 	if (!ddata->clocks)
537 		return;
538 
539 	for (i = 0; i < SYSC_OPTFCK0; i++) {
540 		clock = ddata->clocks[i];
541 		if (IS_ERR_OR_NULL(clock))
542 			continue;
543 
544 		clk_disable(clock);
545 	}
546 }
547 
548 static int sysc_enable_opt_clocks(struct sysc *ddata)
549 {
550 	struct clk *clock;
551 	int i, error;
552 
553 	if (!ddata->clocks || ddata->nr_clocks < SYSC_OPTFCK0 + 1)
554 		return 0;
555 
556 	for (i = SYSC_OPTFCK0; i < SYSC_MAX_CLOCKS; i++) {
557 		clock = ddata->clocks[i];
558 
559 		/* Assume no holes for opt clocks */
560 		if (IS_ERR_OR_NULL(clock))
561 			return 0;
562 
563 		error = clk_enable(clock);
564 		if (error)
565 			goto err_disable;
566 	}
567 
568 	return 0;
569 
570 err_disable:
571 	for (i--; i >= 0; i--) {
572 		clock = ddata->clocks[i];
573 		if (IS_ERR_OR_NULL(clock))
574 			continue;
575 
576 		clk_disable(clock);
577 	}
578 
579 	return error;
580 }
581 
582 static void sysc_disable_opt_clocks(struct sysc *ddata)
583 {
584 	struct clk *clock;
585 	int i;
586 
587 	if (!ddata->clocks || ddata->nr_clocks < SYSC_OPTFCK0 + 1)
588 		return;
589 
590 	for (i = SYSC_OPTFCK0; i < SYSC_MAX_CLOCKS; i++) {
591 		clock = ddata->clocks[i];
592 
593 		/* Assume no holes for opt clocks */
594 		if (IS_ERR_OR_NULL(clock))
595 			return;
596 
597 		clk_disable(clock);
598 	}
599 }
600 
601 static void sysc_clkdm_deny_idle(struct sysc *ddata)
602 {
603 	struct ti_sysc_platform_data *pdata;
604 
605 	if (ddata->legacy_mode || (ddata->cfg.quirks & SYSC_QUIRK_CLKDM_NOAUTO))
606 		return;
607 
608 	pdata = dev_get_platdata(ddata->dev);
609 	if (pdata && pdata->clkdm_deny_idle)
610 		pdata->clkdm_deny_idle(ddata->dev, &ddata->cookie);
611 }
612 
613 static void sysc_clkdm_allow_idle(struct sysc *ddata)
614 {
615 	struct ti_sysc_platform_data *pdata;
616 
617 	if (ddata->legacy_mode || (ddata->cfg.quirks & SYSC_QUIRK_CLKDM_NOAUTO))
618 		return;
619 
620 	pdata = dev_get_platdata(ddata->dev);
621 	if (pdata && pdata->clkdm_allow_idle)
622 		pdata->clkdm_allow_idle(ddata->dev, &ddata->cookie);
623 }
624 
625 /**
626  * sysc_init_resets - init rstctrl reset line if configured
627  * @ddata: device driver data
628  *
629  * See sysc_rstctrl_reset_deassert().
630  */
631 static int sysc_init_resets(struct sysc *ddata)
632 {
633 	ddata->rsts =
634 		devm_reset_control_get_optional_shared(ddata->dev, "rstctrl");
635 
636 	return PTR_ERR_OR_ZERO(ddata->rsts);
637 }
638 
639 /**
640  * sysc_parse_and_check_child_range - parses module IO region from ranges
641  * @ddata: device driver data
642  *
643  * In general we only need rev, syss, and sysc registers and not the whole
644  * module range. But we do want the offsets for these registers from the
645  * module base. This allows us to check them against the legacy hwmod
646  * platform data. Let's also check the ranges are configured properly.
647  */
648 static int sysc_parse_and_check_child_range(struct sysc *ddata)
649 {
650 	struct device_node *np = ddata->dev->of_node;
651 	struct of_range_parser parser;
652 	struct of_range range;
653 	int error;
654 
655 	error = of_range_parser_init(&parser, np);
656 	if (error)
657 		return error;
658 
659 	for_each_of_range(&parser, &range) {
660 		ddata->module_pa = range.cpu_addr;
661 		ddata->module_size = range.size;
662 		break;
663 	}
664 
665 	return 0;
666 }
667 
668 /* Interconnect instances to probe before l4_per instances */
669 static struct resource early_bus_ranges[] = {
670 	/* am3/4 l4_wkup */
671 	{ .start = 0x44c00000, .end = 0x44c00000 + 0x300000, },
672 	/* omap4/5 and dra7 l4_cfg */
673 	{ .start = 0x4a000000, .end = 0x4a000000 + 0x300000, },
674 	/* omap4 l4_wkup */
675 	{ .start = 0x4a300000, .end = 0x4a300000 + 0x30000,  },
676 	/* omap5 and dra7 l4_wkup without dra7 dcan segment */
677 	{ .start = 0x4ae00000, .end = 0x4ae00000 + 0x30000,  },
678 };
679 
680 static atomic_t sysc_defer = ATOMIC_INIT(10);
681 
682 /**
683  * sysc_defer_non_critical - defer non_critical interconnect probing
684  * @ddata: device driver data
685  *
686  * We want to probe l4_cfg and l4_wkup interconnect instances before any
687  * l4_per instances as l4_per instances depend on resources on l4_cfg and
688  * l4_wkup interconnects.
689  */
690 static int sysc_defer_non_critical(struct sysc *ddata)
691 {
692 	struct resource *res;
693 	int i;
694 
695 	if (!atomic_read(&sysc_defer))
696 		return 0;
697 
698 	for (i = 0; i < ARRAY_SIZE(early_bus_ranges); i++) {
699 		res = &early_bus_ranges[i];
700 		if (ddata->module_pa >= res->start &&
701 		    ddata->module_pa <= res->end) {
702 			atomic_set(&sysc_defer, 0);
703 
704 			return 0;
705 		}
706 	}
707 
708 	atomic_dec_if_positive(&sysc_defer);
709 
710 	return -EPROBE_DEFER;
711 }
712 
713 static struct device_node *stdout_path;
714 
715 static void sysc_init_stdout_path(struct sysc *ddata)
716 {
717 	struct device_node *np = NULL;
718 	const char *uart;
719 
720 	if (IS_ERR(stdout_path))
721 		return;
722 
723 	if (stdout_path)
724 		return;
725 
726 	np = of_find_node_by_path("/chosen");
727 	if (!np)
728 		goto err;
729 
730 	uart = of_get_property(np, "stdout-path", NULL);
731 	if (!uart)
732 		goto err;
733 
734 	np = of_find_node_by_path(uart);
735 	if (!np)
736 		goto err;
737 
738 	stdout_path = np;
739 
740 	return;
741 
742 err:
743 	stdout_path = ERR_PTR(-ENODEV);
744 }
745 
746 static void sysc_check_quirk_stdout(struct sysc *ddata,
747 				    struct device_node *np)
748 {
749 	sysc_init_stdout_path(ddata);
750 	if (np != stdout_path)
751 		return;
752 
753 	ddata->cfg.quirks |= SYSC_QUIRK_NO_IDLE_ON_INIT |
754 				SYSC_QUIRK_NO_RESET_ON_INIT;
755 }
756 
757 /**
758  * sysc_check_one_child - check child configuration
759  * @ddata: device driver data
760  * @np: child device node
761  *
762  * Let's avoid messy situations where we have new interconnect target
763  * node but children have "ti,hwmods". These belong to the interconnect
764  * target node and are managed by this driver.
765  */
766 static void sysc_check_one_child(struct sysc *ddata,
767 				 struct device_node *np)
768 {
769 	const char *name;
770 
771 	name = of_get_property(np, "ti,hwmods", NULL);
772 	if (name && !of_device_is_compatible(np, "ti,sysc"))
773 		dev_warn(ddata->dev, "really a child ti,hwmods property?");
774 
775 	sysc_check_quirk_stdout(ddata, np);
776 	sysc_parse_dts_quirks(ddata, np, true);
777 }
778 
779 static void sysc_check_children(struct sysc *ddata)
780 {
781 	struct device_node *child;
782 
783 	for_each_child_of_node(ddata->dev->of_node, child)
784 		sysc_check_one_child(ddata, child);
785 }
786 
787 /*
788  * So far only I2C uses 16-bit read access with clockactivity with revision
789  * in two registers with stride of 4. We can detect this based on the rev
790  * register size to configure things far enough to be able to properly read
791  * the revision register.
792  */
793 static void sysc_check_quirk_16bit(struct sysc *ddata, struct resource *res)
794 {
795 	if (resource_size(res) == 8)
796 		ddata->cfg.quirks |= SYSC_QUIRK_16BIT | SYSC_QUIRK_USE_CLOCKACT;
797 }
798 
799 /**
800  * sysc_parse_one - parses the interconnect target module registers
801  * @ddata: device driver data
802  * @reg: register to parse
803  */
804 static int sysc_parse_one(struct sysc *ddata, enum sysc_registers reg)
805 {
806 	struct resource *res;
807 	const char *name;
808 
809 	switch (reg) {
810 	case SYSC_REVISION:
811 	case SYSC_SYSCONFIG:
812 	case SYSC_SYSSTATUS:
813 		name = reg_names[reg];
814 		break;
815 	default:
816 		return -EINVAL;
817 	}
818 
819 	res = platform_get_resource_byname(to_platform_device(ddata->dev),
820 					   IORESOURCE_MEM, name);
821 	if (!res) {
822 		ddata->offsets[reg] = -ENODEV;
823 
824 		return 0;
825 	}
826 
827 	ddata->offsets[reg] = res->start - ddata->module_pa;
828 	if (reg == SYSC_REVISION)
829 		sysc_check_quirk_16bit(ddata, res);
830 
831 	return 0;
832 }
833 
834 static int sysc_parse_registers(struct sysc *ddata)
835 {
836 	int i, error;
837 
838 	for (i = 0; i < SYSC_MAX_REGS; i++) {
839 		error = sysc_parse_one(ddata, i);
840 		if (error)
841 			return error;
842 	}
843 
844 	return 0;
845 }
846 
847 /**
848  * sysc_check_registers - check for misconfigured register overlaps
849  * @ddata: device driver data
850  */
851 static int sysc_check_registers(struct sysc *ddata)
852 {
853 	int i, j, nr_regs = 0, nr_matches = 0;
854 
855 	for (i = 0; i < SYSC_MAX_REGS; i++) {
856 		if (ddata->offsets[i] < 0)
857 			continue;
858 
859 		if (ddata->offsets[i] > (ddata->module_size - 4)) {
860 			dev_err(ddata->dev, "register outside module range");
861 
862 				return -EINVAL;
863 		}
864 
865 		for (j = 0; j < SYSC_MAX_REGS; j++) {
866 			if (ddata->offsets[j] < 0)
867 				continue;
868 
869 			if (ddata->offsets[i] == ddata->offsets[j])
870 				nr_matches++;
871 		}
872 		nr_regs++;
873 	}
874 
875 	if (nr_matches > nr_regs) {
876 		dev_err(ddata->dev, "overlapping registers: (%i/%i)",
877 			nr_regs, nr_matches);
878 
879 		return -EINVAL;
880 	}
881 
882 	return 0;
883 }
884 
885 /**
886  * sysc_ioremap - ioremap register space for the interconnect target module
887  * @ddata: device driver data
888  *
889  * Note that the interconnect target module registers can be anywhere
890  * within the interconnect target module range. For example, SGX has
891  * them at offset 0x1fc00 in the 32MB module address space. And cpsw
892  * has them at offset 0x1200 in the CPSW_WR child. Usually the
893  * interconnect target module registers are at the beginning of
894  * the module range though.
895  */
896 static int sysc_ioremap(struct sysc *ddata)
897 {
898 	int size;
899 
900 	if (ddata->offsets[SYSC_REVISION] < 0 &&
901 	    ddata->offsets[SYSC_SYSCONFIG] < 0 &&
902 	    ddata->offsets[SYSC_SYSSTATUS] < 0) {
903 		size = ddata->module_size;
904 	} else {
905 		size = max3(ddata->offsets[SYSC_REVISION],
906 			    ddata->offsets[SYSC_SYSCONFIG],
907 			    ddata->offsets[SYSC_SYSSTATUS]);
908 
909 		if (size < SZ_1K)
910 			size = SZ_1K;
911 
912 		if ((size + sizeof(u32)) > ddata->module_size)
913 			size = ddata->module_size;
914 	}
915 
916 	ddata->module_va = devm_ioremap(ddata->dev,
917 					ddata->module_pa,
918 					size + sizeof(u32));
919 	if (!ddata->module_va)
920 		return -EIO;
921 
922 	return 0;
923 }
924 
925 /**
926  * sysc_map_and_check_registers - ioremap and check device registers
927  * @ddata: device driver data
928  */
929 static int sysc_map_and_check_registers(struct sysc *ddata)
930 {
931 	struct device_node *np = ddata->dev->of_node;
932 	int error;
933 
934 	error = sysc_parse_and_check_child_range(ddata);
935 	if (error)
936 		return error;
937 
938 	error = sysc_defer_non_critical(ddata);
939 	if (error)
940 		return error;
941 
942 	sysc_check_children(ddata);
943 
944 	if (!of_property_present(np, "reg"))
945 		return 0;
946 
947 	error = sysc_parse_registers(ddata);
948 	if (error)
949 		return error;
950 
951 	error = sysc_ioremap(ddata);
952 	if (error)
953 		return error;
954 
955 	error = sysc_check_registers(ddata);
956 	if (error)
957 		return error;
958 
959 	return 0;
960 }
961 
962 /**
963  * sysc_show_rev - read and show interconnect target module revision
964  * @bufp: buffer to print the information to
965  * @ddata: device driver data
966  */
967 static int sysc_show_rev(char *bufp, struct sysc *ddata)
968 {
969 	int len;
970 
971 	if (ddata->offsets[SYSC_REVISION] < 0)
972 		return sprintf(bufp, ":NA");
973 
974 	len = sprintf(bufp, ":%08x", ddata->revision);
975 
976 	return len;
977 }
978 
979 static int sysc_show_reg(struct sysc *ddata,
980 			 char *bufp, enum sysc_registers reg)
981 {
982 	if (ddata->offsets[reg] < 0)
983 		return sprintf(bufp, ":NA");
984 
985 	return sprintf(bufp, ":%x", ddata->offsets[reg]);
986 }
987 
988 static int sysc_show_name(char *bufp, struct sysc *ddata)
989 {
990 	if (!ddata->name)
991 		return 0;
992 
993 	return sprintf(bufp, ":%s", ddata->name);
994 }
995 
996 /**
997  * sysc_show_registers - show information about interconnect target module
998  * @ddata: device driver data
999  */
1000 static void sysc_show_registers(struct sysc *ddata)
1001 {
1002 	char buf[128];
1003 	char *bufp = buf;
1004 	int i;
1005 
1006 	for (i = 0; i < SYSC_MAX_REGS; i++)
1007 		bufp += sysc_show_reg(ddata, bufp, i);
1008 
1009 	bufp += sysc_show_rev(bufp, ddata);
1010 	bufp += sysc_show_name(bufp, ddata);
1011 
1012 	dev_dbg(ddata->dev, "%llx:%x%s\n",
1013 		ddata->module_pa, ddata->module_size,
1014 		buf);
1015 }
1016 
1017 /**
1018  * sysc_write_sysconfig - handle sysconfig quirks for register write
1019  * @ddata: device driver data
1020  * @value: register value
1021  */
1022 static void sysc_write_sysconfig(struct sysc *ddata, u32 value)
1023 {
1024 	if (ddata->module_unlock_quirk)
1025 		ddata->module_unlock_quirk(ddata);
1026 
1027 	sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], value);
1028 
1029 	if (ddata->module_lock_quirk)
1030 		ddata->module_lock_quirk(ddata);
1031 }
1032 
1033 #define SYSC_IDLE_MASK	(SYSC_NR_IDLEMODES - 1)
1034 #define SYSC_CLOCACT_ICK	2
1035 
1036 /* Caller needs to manage sysc_clkdm_deny_idle() and sysc_clkdm_allow_idle() */
1037 static int sysc_enable_module(struct device *dev)
1038 {
1039 	struct sysc *ddata;
1040 	const struct sysc_regbits *regbits;
1041 	u32 reg, idlemodes, best_mode;
1042 	int error;
1043 
1044 	ddata = dev_get_drvdata(dev);
1045 
1046 	/*
1047 	 * Some modules like DSS reset automatically on idle. Enable optional
1048 	 * reset clocks and wait for OCP softreset to complete.
1049 	 */
1050 	if (ddata->cfg.quirks & SYSC_QUIRK_OPT_CLKS_IN_RESET) {
1051 		error = sysc_enable_opt_clocks(ddata);
1052 		if (error) {
1053 			dev_err(ddata->dev,
1054 				"Optional clocks failed for enable: %i\n",
1055 				error);
1056 			return error;
1057 		}
1058 	}
1059 	/*
1060 	 * Some modules like i2c and hdq1w have unusable reset status unless
1061 	 * the module reset quirk is enabled. Skip status check on enable.
1062 	 */
1063 	if (!(ddata->cfg.quirks & SYSC_MODULE_QUIRK_ENA_RESETDONE)) {
1064 		error = sysc_wait_softreset(ddata);
1065 		if (error)
1066 			dev_warn(ddata->dev, "OCP softreset timed out\n");
1067 	}
1068 	if (ddata->cfg.quirks & SYSC_QUIRK_OPT_CLKS_IN_RESET)
1069 		sysc_disable_opt_clocks(ddata);
1070 
1071 	/*
1072 	 * Some subsystem private interconnects, like DSS top level module,
1073 	 * need only the automatic OCP softreset handling with no sysconfig
1074 	 * register bits to configure.
1075 	 */
1076 	if (ddata->offsets[SYSC_SYSCONFIG] == -ENODEV)
1077 		return 0;
1078 
1079 	regbits = ddata->cap->regbits;
1080 	reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
1081 
1082 	/*
1083 	 * Set CLOCKACTIVITY, we only use it for ick. And we only configure it
1084 	 * based on the SYSC_QUIRK_USE_CLOCKACT flag, not based on the hardware
1085 	 * capabilities. See the old HWMOD_SET_DEFAULT_CLOCKACT flag.
1086 	 */
1087 	if (regbits->clkact_shift >= 0 &&
1088 	    (ddata->cfg.quirks & SYSC_QUIRK_USE_CLOCKACT))
1089 		reg |= SYSC_CLOCACT_ICK << regbits->clkact_shift;
1090 
1091 	/* Set SIDLE mode */
1092 	idlemodes = ddata->cfg.sidlemodes;
1093 	if (!idlemodes || regbits->sidle_shift < 0)
1094 		goto set_midle;
1095 
1096 	if (ddata->cfg.quirks & (SYSC_QUIRK_SWSUP_SIDLE |
1097 				 SYSC_QUIRK_SWSUP_SIDLE_ACT)) {
1098 		best_mode = SYSC_IDLE_NO;
1099 	} else {
1100 		best_mode = fls(ddata->cfg.sidlemodes) - 1;
1101 		if (best_mode > SYSC_IDLE_MASK) {
1102 			dev_err(dev, "%s: invalid sidlemode\n", __func__);
1103 			return -EINVAL;
1104 		}
1105 
1106 		/* Set WAKEUP */
1107 		if (regbits->enwkup_shift >= 0 &&
1108 		    ddata->cfg.sysc_val & BIT(regbits->enwkup_shift))
1109 			reg |= BIT(regbits->enwkup_shift);
1110 	}
1111 
1112 	reg &= ~(SYSC_IDLE_MASK << regbits->sidle_shift);
1113 	reg |= best_mode << regbits->sidle_shift;
1114 	sysc_write_sysconfig(ddata, reg);
1115 
1116 set_midle:
1117 	/* Set MIDLE mode */
1118 	idlemodes = ddata->cfg.midlemodes;
1119 	if (!idlemodes || regbits->midle_shift < 0)
1120 		goto set_autoidle;
1121 
1122 	best_mode = fls(ddata->cfg.midlemodes) - 1;
1123 	if (best_mode > SYSC_IDLE_MASK) {
1124 		dev_err(dev, "%s: invalid midlemode\n", __func__);
1125 		error = -EINVAL;
1126 		goto save_context;
1127 	}
1128 
1129 	if (ddata->cfg.quirks & SYSC_QUIRK_SWSUP_MSTANDBY)
1130 		best_mode = SYSC_IDLE_NO;
1131 
1132 	reg &= ~(SYSC_IDLE_MASK << regbits->midle_shift);
1133 	reg |= best_mode << regbits->midle_shift;
1134 	sysc_write_sysconfig(ddata, reg);
1135 
1136 set_autoidle:
1137 	/* Autoidle bit must enabled separately if available */
1138 	if (regbits->autoidle_shift >= 0 &&
1139 	    ddata->cfg.sysc_val & BIT(regbits->autoidle_shift)) {
1140 		reg |= 1 << regbits->autoidle_shift;
1141 		sysc_write_sysconfig(ddata, reg);
1142 	}
1143 
1144 	error = 0;
1145 
1146 save_context:
1147 	/* Save context and flush posted write */
1148 	ddata->sysconfig = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
1149 
1150 	if (ddata->module_enable_quirk)
1151 		ddata->module_enable_quirk(ddata);
1152 
1153 	return error;
1154 }
1155 
1156 static int sysc_best_idle_mode(u32 idlemodes, u32 *best_mode)
1157 {
1158 	if (idlemodes & BIT(SYSC_IDLE_SMART_WKUP))
1159 		*best_mode = SYSC_IDLE_SMART_WKUP;
1160 	else if (idlemodes & BIT(SYSC_IDLE_SMART))
1161 		*best_mode = SYSC_IDLE_SMART;
1162 	else if (idlemodes & BIT(SYSC_IDLE_FORCE))
1163 		*best_mode = SYSC_IDLE_FORCE;
1164 	else
1165 		return -EINVAL;
1166 
1167 	return 0;
1168 }
1169 
1170 /* Caller needs to manage sysc_clkdm_deny_idle() and sysc_clkdm_allow_idle() */
1171 static int sysc_disable_module(struct device *dev)
1172 {
1173 	struct sysc *ddata;
1174 	const struct sysc_regbits *regbits;
1175 	u32 reg, idlemodes, best_mode;
1176 	int ret;
1177 
1178 	ddata = dev_get_drvdata(dev);
1179 	if (ddata->offsets[SYSC_SYSCONFIG] == -ENODEV)
1180 		return 0;
1181 
1182 	if (ddata->module_disable_quirk)
1183 		ddata->module_disable_quirk(ddata);
1184 
1185 	regbits = ddata->cap->regbits;
1186 	reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
1187 
1188 	/* Set MIDLE mode */
1189 	idlemodes = ddata->cfg.midlemodes;
1190 	if (!idlemodes || regbits->midle_shift < 0)
1191 		goto set_sidle;
1192 
1193 	ret = sysc_best_idle_mode(idlemodes, &best_mode);
1194 	if (ret) {
1195 		dev_err(dev, "%s: invalid midlemode\n", __func__);
1196 		return ret;
1197 	}
1198 
1199 	if (ddata->cfg.quirks & (SYSC_QUIRK_SWSUP_MSTANDBY) ||
1200 	    ddata->cfg.quirks & (SYSC_QUIRK_FORCE_MSTANDBY))
1201 		best_mode = SYSC_IDLE_FORCE;
1202 
1203 	reg &= ~(SYSC_IDLE_MASK << regbits->midle_shift);
1204 	reg |= best_mode << regbits->midle_shift;
1205 	sysc_write_sysconfig(ddata, reg);
1206 
1207 set_sidle:
1208 	/* Set SIDLE mode */
1209 	idlemodes = ddata->cfg.sidlemodes;
1210 	if (!idlemodes || regbits->sidle_shift < 0) {
1211 		ret = 0;
1212 		goto save_context;
1213 	}
1214 
1215 	if (ddata->cfg.quirks & SYSC_QUIRK_SWSUP_SIDLE) {
1216 		best_mode = SYSC_IDLE_FORCE;
1217 	} else {
1218 		ret = sysc_best_idle_mode(idlemodes, &best_mode);
1219 		if (ret) {
1220 			dev_err(dev, "%s: invalid sidlemode\n", __func__);
1221 			ret = -EINVAL;
1222 			goto save_context;
1223 		}
1224 	}
1225 
1226 	reg &= ~(SYSC_IDLE_MASK << regbits->sidle_shift);
1227 	reg |= best_mode << regbits->sidle_shift;
1228 	if (regbits->autoidle_shift >= 0 &&
1229 	    ddata->cfg.sysc_val & BIT(regbits->autoidle_shift))
1230 		reg |= 1 << regbits->autoidle_shift;
1231 	sysc_write_sysconfig(ddata, reg);
1232 
1233 	ret = 0;
1234 
1235 save_context:
1236 	/* Save context and flush posted write */
1237 	ddata->sysconfig = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
1238 
1239 	return ret;
1240 }
1241 
1242 static int __maybe_unused sysc_runtime_suspend_legacy(struct device *dev,
1243 						      struct sysc *ddata)
1244 {
1245 	struct ti_sysc_platform_data *pdata;
1246 	int error;
1247 
1248 	pdata = dev_get_platdata(ddata->dev);
1249 	if (!pdata)
1250 		return 0;
1251 
1252 	if (!pdata->idle_module)
1253 		return -ENODEV;
1254 
1255 	error = pdata->idle_module(dev, &ddata->cookie);
1256 	if (error)
1257 		dev_err(dev, "%s: could not idle: %i\n",
1258 			__func__, error);
1259 
1260 	reset_control_assert(ddata->rsts);
1261 
1262 	return 0;
1263 }
1264 
1265 static int __maybe_unused sysc_runtime_resume_legacy(struct device *dev,
1266 						     struct sysc *ddata)
1267 {
1268 	struct ti_sysc_platform_data *pdata;
1269 	int error;
1270 
1271 	pdata = dev_get_platdata(ddata->dev);
1272 	if (!pdata)
1273 		return 0;
1274 
1275 	if (!pdata->enable_module)
1276 		return -ENODEV;
1277 
1278 	error = pdata->enable_module(dev, &ddata->cookie);
1279 	if (error)
1280 		dev_err(dev, "%s: could not enable: %i\n",
1281 			__func__, error);
1282 
1283 	reset_control_deassert(ddata->rsts);
1284 
1285 	return 0;
1286 }
1287 
1288 static int __maybe_unused sysc_runtime_suspend(struct device *dev)
1289 {
1290 	struct sysc *ddata;
1291 	int error = 0;
1292 
1293 	ddata = dev_get_drvdata(dev);
1294 
1295 	if (!ddata->enabled)
1296 		return 0;
1297 
1298 	sysc_clkdm_deny_idle(ddata);
1299 
1300 	if (ddata->legacy_mode) {
1301 		error = sysc_runtime_suspend_legacy(dev, ddata);
1302 		if (error)
1303 			goto err_allow_idle;
1304 	} else {
1305 		error = sysc_disable_module(dev);
1306 		if (error)
1307 			goto err_allow_idle;
1308 	}
1309 
1310 	sysc_disable_main_clocks(ddata);
1311 
1312 	if (sysc_opt_clks_needed(ddata))
1313 		sysc_disable_opt_clocks(ddata);
1314 
1315 	ddata->enabled = false;
1316 
1317 err_allow_idle:
1318 	sysc_clkdm_allow_idle(ddata);
1319 
1320 	reset_control_assert(ddata->rsts);
1321 
1322 	return error;
1323 }
1324 
1325 static int __maybe_unused sysc_runtime_resume(struct device *dev)
1326 {
1327 	struct sysc *ddata;
1328 	int error = 0;
1329 
1330 	ddata = dev_get_drvdata(dev);
1331 
1332 	if (ddata->enabled)
1333 		return 0;
1334 
1335 
1336 	sysc_clkdm_deny_idle(ddata);
1337 
1338 	if (sysc_opt_clks_needed(ddata)) {
1339 		error = sysc_enable_opt_clocks(ddata);
1340 		if (error)
1341 			goto err_allow_idle;
1342 	}
1343 
1344 	error = sysc_enable_main_clocks(ddata);
1345 	if (error)
1346 		goto err_opt_clocks;
1347 
1348 	reset_control_deassert(ddata->rsts);
1349 
1350 	if (ddata->legacy_mode) {
1351 		error = sysc_runtime_resume_legacy(dev, ddata);
1352 		if (error)
1353 			goto err_main_clocks;
1354 	} else {
1355 		error = sysc_enable_module(dev);
1356 		if (error)
1357 			goto err_main_clocks;
1358 	}
1359 
1360 	ddata->enabled = true;
1361 
1362 	sysc_clkdm_allow_idle(ddata);
1363 
1364 	return 0;
1365 
1366 err_main_clocks:
1367 	sysc_disable_main_clocks(ddata);
1368 err_opt_clocks:
1369 	if (sysc_opt_clks_needed(ddata))
1370 		sysc_disable_opt_clocks(ddata);
1371 err_allow_idle:
1372 	sysc_clkdm_allow_idle(ddata);
1373 
1374 	return error;
1375 }
1376 
1377 /*
1378  * Checks if device context was lost. Assumes the sysconfig register value
1379  * after lost context is different from the configured value. Only works for
1380  * enabled devices.
1381  *
1382  * Eventually we may want to also add support to using the context lost
1383  * registers that some SoCs have.
1384  */
1385 static int sysc_check_context(struct sysc *ddata)
1386 {
1387 	u32 reg;
1388 
1389 	if (!ddata->enabled)
1390 		return -ENODATA;
1391 
1392 	reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
1393 	if (reg == ddata->sysconfig)
1394 		return 0;
1395 
1396 	return -EACCES;
1397 }
1398 
1399 static int sysc_reinit_module(struct sysc *ddata, bool leave_enabled)
1400 {
1401 	struct device *dev = ddata->dev;
1402 	int error;
1403 
1404 	if (ddata->enabled) {
1405 		/* Nothing to do if enabled and context not lost */
1406 		error = sysc_check_context(ddata);
1407 		if (!error)
1408 			return 0;
1409 
1410 		/* Disable target module if it is enabled */
1411 		error = sysc_runtime_suspend(dev);
1412 		if (error)
1413 			dev_warn(dev, "reinit suspend failed: %i\n", error);
1414 	}
1415 
1416 	/* Enable target module */
1417 	error = sysc_runtime_resume(dev);
1418 	if (error)
1419 		dev_warn(dev, "reinit resume failed: %i\n", error);
1420 
1421 	/* Some modules like am335x gpmc need reset and restore of sysconfig */
1422 	if (ddata->cfg.quirks & SYSC_QUIRK_RESET_ON_CTX_LOST) {
1423 		error = sysc_reset(ddata);
1424 		if (error)
1425 			dev_warn(dev, "reinit reset failed: %i\n", error);
1426 
1427 		sysc_write_sysconfig(ddata, ddata->sysconfig);
1428 	}
1429 
1430 	if (leave_enabled)
1431 		return error;
1432 
1433 	/* Disable target module if no leave_enabled was set */
1434 	error = sysc_runtime_suspend(dev);
1435 	if (error)
1436 		dev_warn(dev, "reinit suspend failed: %i\n", error);
1437 
1438 	return error;
1439 }
1440 
1441 static int __maybe_unused sysc_noirq_suspend(struct device *dev)
1442 {
1443 	struct sysc *ddata;
1444 
1445 	ddata = dev_get_drvdata(dev);
1446 
1447 	if (ddata->cfg.quirks &
1448 	    (SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE))
1449 		return 0;
1450 
1451 	if (!ddata->enabled)
1452 		return 0;
1453 
1454 	ddata->needs_resume = 1;
1455 
1456 	return sysc_runtime_suspend(dev);
1457 }
1458 
1459 static int __maybe_unused sysc_noirq_resume(struct device *dev)
1460 {
1461 	struct sysc *ddata;
1462 	int error = 0;
1463 
1464 	ddata = dev_get_drvdata(dev);
1465 
1466 	if (ddata->cfg.quirks &
1467 	    (SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE))
1468 		return 0;
1469 
1470 	if (ddata->cfg.quirks & SYSC_QUIRK_REINIT_ON_RESUME) {
1471 		error = sysc_reinit_module(ddata, ddata->needs_resume);
1472 		if (error)
1473 			dev_warn(dev, "noirq_resume failed: %i\n", error);
1474 	} else if (ddata->needs_resume) {
1475 		error = sysc_runtime_resume(dev);
1476 		if (error)
1477 			dev_warn(dev, "noirq_resume failed: %i\n", error);
1478 	}
1479 
1480 	ddata->needs_resume = 0;
1481 
1482 	return error;
1483 }
1484 
1485 static const struct dev_pm_ops sysc_pm_ops = {
1486 	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(sysc_noirq_suspend, sysc_noirq_resume)
1487 	SET_RUNTIME_PM_OPS(sysc_runtime_suspend,
1488 			   sysc_runtime_resume,
1489 			   NULL)
1490 };
1491 
1492 /* Module revision register based quirks */
1493 struct sysc_revision_quirk {
1494 	const char *name;
1495 	u32 base;
1496 	int rev_offset;
1497 	int sysc_offset;
1498 	int syss_offset;
1499 	u32 revision;
1500 	u32 revision_mask;
1501 	u32 quirks;
1502 };
1503 
1504 #define SYSC_QUIRK(optname, optbase, optrev, optsysc, optsyss,		\
1505 		   optrev_val, optrevmask, optquirkmask)		\
1506 	{								\
1507 		.name = (optname),					\
1508 		.base = (optbase),					\
1509 		.rev_offset = (optrev),					\
1510 		.sysc_offset = (optsysc),				\
1511 		.syss_offset = (optsyss),				\
1512 		.revision = (optrev_val),				\
1513 		.revision_mask = (optrevmask),				\
1514 		.quirks = (optquirkmask),				\
1515 	}
1516 
1517 static const struct sysc_revision_quirk sysc_revision_quirks[] = {
1518 	/* These drivers need to be fixed to not use pm_runtime_irq_safe() */
1519 	SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000046, 0xffffffff,
1520 		   SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
1521 	SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff,
1522 		   SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
1523 	/* Uarts on omap4 and later */
1524 	SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x50411e03, 0xffff00ff,
1525 		   SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
1526 	SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47422e03, 0xffffffff,
1527 		   SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
1528 
1529 	/* Quirks that need to be set based on the module address */
1530 	SYSC_QUIRK("mcpdm", 0x40132000, 0, 0x10, -ENODEV, 0x50000800, 0xffffffff,
1531 		   SYSC_QUIRK_EXT_OPT_CLOCK | SYSC_QUIRK_NO_RESET_ON_INIT |
1532 		   SYSC_QUIRK_SWSUP_SIDLE),
1533 
1534 	/* Quirks that need to be set based on detected module */
1535 	SYSC_QUIRK("aess", 0, 0, 0x10, -ENODEV, 0x40000000, 0xffffffff,
1536 		   SYSC_MODULE_QUIRK_AESS),
1537 	/* Errata i893 handling for dra7 dcan1 and 2 */
1538 	SYSC_QUIRK("dcan", 0x4ae3c000, 0x20, -ENODEV, -ENODEV, 0xa3170504, 0xffffffff,
1539 		   SYSC_QUIRK_CLKDM_NOAUTO),
1540 	SYSC_QUIRK("dcan", 0x48480000, 0x20, -ENODEV, -ENODEV, 0xa3170504, 0xffffffff,
1541 		   SYSC_QUIRK_CLKDM_NOAUTO),
1542 	SYSC_QUIRK("dss", 0x4832a000, 0, 0x10, 0x14, 0x00000020, 0xffffffff,
1543 		   SYSC_QUIRK_OPT_CLKS_IN_RESET | SYSC_MODULE_QUIRK_DSS_RESET),
1544 	SYSC_QUIRK("dss", 0x58000000, 0, -ENODEV, 0x14, 0x00000040, 0xffffffff,
1545 		   SYSC_QUIRK_OPT_CLKS_IN_RESET | SYSC_MODULE_QUIRK_DSS_RESET),
1546 	SYSC_QUIRK("dss", 0x58000000, 0, -ENODEV, 0x14, 0x00000061, 0xffffffff,
1547 		   SYSC_QUIRK_OPT_CLKS_IN_RESET | SYSC_MODULE_QUIRK_DSS_RESET),
1548 	SYSC_QUIRK("dwc3", 0x48880000, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff,
1549 		   SYSC_QUIRK_CLKDM_NOAUTO),
1550 	SYSC_QUIRK("dwc3", 0x488c0000, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff,
1551 		   SYSC_QUIRK_CLKDM_NOAUTO),
1552 	SYSC_QUIRK("gpio", 0, 0, 0x10, 0x114, 0x50600801, 0xffff00ff,
1553 		   SYSC_QUIRK_OPT_CLKS_IN_RESET),
1554 	SYSC_QUIRK("gpmc", 0, 0, 0x10, 0x14, 0x00000060, 0xffffffff,
1555 		   SYSC_QUIRK_REINIT_ON_CTX_LOST | SYSC_QUIRK_RESET_ON_CTX_LOST |
1556 		   SYSC_QUIRK_GPMC_DEBUG),
1557 	SYSC_QUIRK("hdmi", 0, 0, 0x10, -ENODEV, 0x50030200, 0xffffffff,
1558 		   SYSC_QUIRK_OPT_CLKS_NEEDED),
1559 	SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x00000006, 0xffffffff,
1560 		   SYSC_MODULE_QUIRK_HDQ1W | SYSC_MODULE_QUIRK_ENA_RESETDONE),
1561 	SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x0000000a, 0xffffffff,
1562 		   SYSC_MODULE_QUIRK_HDQ1W | SYSC_MODULE_QUIRK_ENA_RESETDONE),
1563 	SYSC_QUIRK("i2c", 0, 0, 0x20, 0x10, 0x00000036, 0x000000ff,
1564 		   SYSC_MODULE_QUIRK_I2C | SYSC_MODULE_QUIRK_ENA_RESETDONE),
1565 	SYSC_QUIRK("i2c", 0, 0, 0x20, 0x10, 0x0000003c, 0x000000ff,
1566 		   SYSC_MODULE_QUIRK_I2C | SYSC_MODULE_QUIRK_ENA_RESETDONE),
1567 	SYSC_QUIRK("i2c", 0, 0, 0x20, 0x10, 0x00000040, 0x000000ff,
1568 		   SYSC_MODULE_QUIRK_I2C | SYSC_MODULE_QUIRK_ENA_RESETDONE),
1569 	SYSC_QUIRK("i2c", 0, 0, 0x10, 0x90, 0x5040000a, 0xfffff0f0,
1570 		   SYSC_MODULE_QUIRK_I2C | SYSC_MODULE_QUIRK_ENA_RESETDONE),
1571 	SYSC_QUIRK("gpu", 0x50000000, 0x14, -ENODEV, -ENODEV, 0x00010201, 0xffffffff, 0),
1572 	SYSC_QUIRK("gpu", 0x50000000, 0xfe00, 0xfe10, -ENODEV, 0x40000000 , 0xffffffff,
1573 		   SYSC_MODULE_QUIRK_SGX),
1574 	SYSC_QUIRK("lcdc", 0, 0, 0x54, -ENODEV, 0x4f201000, 0xffffffff,
1575 		   SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
1576 	SYSC_QUIRK("mcasp", 0, 0, 0x4, -ENODEV, 0x44306302, 0xffffffff,
1577 		   SYSC_QUIRK_SWSUP_SIDLE),
1578 	SYSC_QUIRK("rtc", 0, 0x74, 0x78, -ENODEV, 0x4eb01908, 0xffff00f0,
1579 		   SYSC_MODULE_QUIRK_RTC_UNLOCK),
1580 	SYSC_QUIRK("tptc", 0, 0, 0x10, -ENODEV, 0x40006c00, 0xffffefff,
1581 		   SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
1582 	SYSC_QUIRK("tptc", 0, 0, -ENODEV, -ENODEV, 0x40007c00, 0xffffffff,
1583 		   SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
1584 	SYSC_QUIRK("sata", 0, 0xfc, 0x1100, -ENODEV, 0x5e412000, 0xffffffff,
1585 		   SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
1586 	SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, 0x14, 0x50700100, 0xffffffff,
1587 		   SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
1588 	SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, -ENODEV, 0x50700101, 0xffffffff,
1589 		   SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
1590 	SYSC_QUIRK("usb_otg_hs", 0, 0x400, 0x404, 0x408, 0x00000033,
1591 		   0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY |
1592 		   SYSC_MODULE_QUIRK_OTG),
1593 	SYSC_QUIRK("usb_otg_hs", 0, 0x400, 0x404, 0x408, 0x00000040,
1594 		   0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY |
1595 		   SYSC_MODULE_QUIRK_OTG),
1596 	SYSC_QUIRK("usb_otg_hs", 0, 0x400, 0x404, 0x408, 0x00000050,
1597 		   0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY |
1598 		   SYSC_MODULE_QUIRK_OTG),
1599 	SYSC_QUIRK("usb_otg_hs", 0, 0, 0x10, -ENODEV, 0x4ea2080d, 0xffffffff,
1600 		   SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY |
1601 		   SYSC_QUIRK_REINIT_ON_CTX_LOST),
1602 	SYSC_QUIRK("wdt", 0, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0,
1603 		   SYSC_MODULE_QUIRK_WDT),
1604 	/* PRUSS on am3, am4 and am5 */
1605 	SYSC_QUIRK("pruss", 0, 0x26000, 0x26004, -ENODEV, 0x47000000, 0xff000000,
1606 		   SYSC_MODULE_QUIRK_PRUSS),
1607 	/* Watchdog on am3 and am4 */
1608 	SYSC_QUIRK("wdt", 0x44e35000, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0,
1609 		   SYSC_MODULE_QUIRK_WDT | SYSC_QUIRK_SWSUP_SIDLE),
1610 
1611 #ifdef DEBUG
1612 	SYSC_QUIRK("adc", 0, 0, 0x10, -ENODEV, 0x47300001, 0xffffffff, 0),
1613 	SYSC_QUIRK("atl", 0, 0, -ENODEV, -ENODEV, 0x0a070100, 0xffffffff, 0),
1614 	SYSC_QUIRK("cm", 0, 0, -ENODEV, -ENODEV, 0x40000301, 0xffffffff, 0),
1615 	SYSC_QUIRK("control", 0, 0, 0x10, -ENODEV, 0x40000900, 0xffffffff, 0),
1616 	SYSC_QUIRK("cpgmac", 0, 0x1200, 0x1208, 0x1204, 0x4edb1902,
1617 		   0xffff00f0, 0),
1618 	SYSC_QUIRK("dcan", 0, 0x20, -ENODEV, -ENODEV, 0xa3170504, 0xffffffff, 0),
1619 	SYSC_QUIRK("dcan", 0, 0x20, -ENODEV, -ENODEV, 0x4edb1902, 0xffffffff, 0),
1620 	SYSC_QUIRK("dispc", 0x4832a400, 0, 0x10, 0x14, 0x00000030, 0xffffffff, 0),
1621 	SYSC_QUIRK("dispc", 0x58001000, 0, 0x10, 0x14, 0x00000040, 0xffffffff, 0),
1622 	SYSC_QUIRK("dispc", 0x58001000, 0, 0x10, 0x14, 0x00000051, 0xffffffff, 0),
1623 	SYSC_QUIRK("dmic", 0, 0, 0x10, -ENODEV, 0x50010000, 0xffffffff, 0),
1624 	SYSC_QUIRK("dsi", 0x58004000, 0, 0x10, 0x14, 0x00000030, 0xffffffff, 0),
1625 	SYSC_QUIRK("dsi", 0x58005000, 0, 0x10, 0x14, 0x00000030, 0xffffffff, 0),
1626 	SYSC_QUIRK("dsi", 0x58005000, 0, 0x10, 0x14, 0x00000040, 0xffffffff, 0),
1627 	SYSC_QUIRK("dsi", 0x58009000, 0, 0x10, 0x14, 0x00000040, 0xffffffff, 0),
1628 	SYSC_QUIRK("dwc3", 0, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff, 0),
1629 	SYSC_QUIRK("d2d", 0x4a0b6000, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0),
1630 	SYSC_QUIRK("d2d", 0x4a0cd000, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0),
1631 	SYSC_QUIRK("elm", 0x48080000, 0, 0x10, 0x14, 0x00000020, 0xffffffff, 0),
1632 	SYSC_QUIRK("emif", 0, 0, -ENODEV, -ENODEV, 0x40441403, 0xffff0fff, 0),
1633 	SYSC_QUIRK("emif", 0, 0, -ENODEV, -ENODEV, 0x50440500, 0xffffffff, 0),
1634 	SYSC_QUIRK("epwmss", 0, 0, 0x4, -ENODEV, 0x47400001, 0xffffffff, 0),
1635 	SYSC_QUIRK("gpu", 0, 0x1fc00, 0x1fc10, -ENODEV, 0, 0, 0),
1636 	SYSC_QUIRK("gpu", 0, 0xfe00, 0xfe10, -ENODEV, 0x40000000 , 0xffffffff, 0),
1637 	SYSC_QUIRK("hdmi", 0, 0, 0x10, -ENODEV, 0x50031d00, 0xffffffff, 0),
1638 	SYSC_QUIRK("hsi", 0, 0, 0x10, 0x14, 0x50043101, 0xffffffff, 0),
1639 	SYSC_QUIRK("iss", 0, 0, 0x10, -ENODEV, 0x40000101, 0xffffffff, 0),
1640 	SYSC_QUIRK("keypad", 0x4a31c000, 0, 0x10, 0x14, 0x00000020, 0xffffffff, 0),
1641 	SYSC_QUIRK("mcasp", 0, 0, 0x4, -ENODEV, 0x44307b02, 0xffffffff, 0),
1642 	SYSC_QUIRK("mcbsp", 0, -ENODEV, 0x8c, -ENODEV, 0, 0, 0),
1643 	SYSC_QUIRK("mcspi", 0, 0, 0x10, -ENODEV, 0x40300a0b, 0xffff00ff, 0),
1644 	SYSC_QUIRK("mcspi", 0, 0, 0x110, 0x114, 0x40300a0b, 0xffffffff, 0),
1645 	SYSC_QUIRK("mailbox", 0, 0, 0x10, -ENODEV, 0x00000400, 0xffffffff, 0),
1646 	SYSC_QUIRK("m3", 0, 0, -ENODEV, -ENODEV, 0x5f580105, 0x0fff0f00, 0),
1647 	SYSC_QUIRK("ocp2scp", 0, 0, 0x10, 0x14, 0x50060005, 0xfffffff0, 0),
1648 	SYSC_QUIRK("ocp2scp", 0, 0, -ENODEV, -ENODEV, 0x50060007, 0xffffffff, 0),
1649 	SYSC_QUIRK("padconf", 0, 0, 0x10, -ENODEV, 0x4fff0800, 0xffffffff, 0),
1650 	SYSC_QUIRK("padconf", 0, 0, -ENODEV, -ENODEV, 0x40001100, 0xffffffff, 0),
1651 	SYSC_QUIRK("pcie", 0x51000000, -ENODEV, -ENODEV, -ENODEV, 0, 0, 0),
1652 	SYSC_QUIRK("pcie", 0x51800000, -ENODEV, -ENODEV, -ENODEV, 0, 0, 0),
1653 	SYSC_QUIRK("prcm", 0, 0, -ENODEV, -ENODEV, 0x40000100, 0xffffffff, 0),
1654 	SYSC_QUIRK("prcm", 0, 0, -ENODEV, -ENODEV, 0x00004102, 0xffffffff, 0),
1655 	SYSC_QUIRK("prcm", 0, 0, -ENODEV, -ENODEV, 0x40000400, 0xffffffff, 0),
1656 	SYSC_QUIRK("rfbi", 0x4832a800, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0),
1657 	SYSC_QUIRK("rfbi", 0x58002000, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0),
1658 	SYSC_QUIRK("scm", 0, 0, 0x10, -ENODEV, 0x40000900, 0xffffffff, 0),
1659 	SYSC_QUIRK("scm", 0, 0, -ENODEV, -ENODEV, 0x4e8b0100, 0xffffffff, 0),
1660 	SYSC_QUIRK("scm", 0, 0, -ENODEV, -ENODEV, 0x4f000100, 0xffffffff, 0),
1661 	SYSC_QUIRK("scm", 0, 0, -ENODEV, -ENODEV, 0x40000900, 0xffffffff, 0),
1662 	SYSC_QUIRK("scrm", 0, 0, -ENODEV, -ENODEV, 0x00000010, 0xffffffff, 0),
1663 	SYSC_QUIRK("sdio", 0, 0, 0x10, -ENODEV, 0x40202301, 0xffff0ff0, 0),
1664 	SYSC_QUIRK("sdio", 0, 0x2fc, 0x110, 0x114, 0x31010000, 0xffffffff, 0),
1665 	SYSC_QUIRK("sdma", 0, 0, 0x2c, 0x28, 0x00010900, 0xffffffff, 0),
1666 	SYSC_QUIRK("sham", 0, 0x100, 0x110, 0x114, 0x40000c03, 0xffffffff, 0),
1667 	SYSC_QUIRK("slimbus", 0, 0, 0x10, -ENODEV, 0x40000902, 0xffffffff, 0),
1668 	SYSC_QUIRK("slimbus", 0, 0, 0x10, -ENODEV, 0x40002903, 0xffffffff, 0),
1669 	SYSC_QUIRK("smartreflex", 0, -ENODEV, 0x24, -ENODEV, 0x00000000, 0xffffffff, 0),
1670 	SYSC_QUIRK("smartreflex", 0, -ENODEV, 0x38, -ENODEV, 0x00000000, 0xffffffff, 0),
1671 	SYSC_QUIRK("spinlock", 0, 0, 0x10, -ENODEV, 0x50020000, 0xffffffff, 0),
1672 	SYSC_QUIRK("rng", 0, 0x1fe0, 0x1fe4, -ENODEV, 0x00000020, 0xffffffff, 0),
1673 	SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000013, 0xffffffff, 0),
1674 	SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000015, 0xffffffff, 0),
1675 	/* Some timers on omap4 and later */
1676 	SYSC_QUIRK("timer", 0, 0, 0x10, -ENODEV, 0x50002100, 0xffffffff, 0),
1677 	SYSC_QUIRK("timer", 0, 0, 0x10, -ENODEV, 0x4fff1301, 0xffff00ff, 0),
1678 	SYSC_QUIRK("timer32k", 0, 0, 0x4, -ENODEV, 0x00000040, 0xffffffff, 0),
1679 	SYSC_QUIRK("timer32k", 0, 0, 0x4, -ENODEV, 0x00000011, 0xffffffff, 0),
1680 	SYSC_QUIRK("timer32k", 0, 0, 0x4, -ENODEV, 0x00000060, 0xffffffff, 0),
1681 	SYSC_QUIRK("tpcc", 0, 0, -ENODEV, -ENODEV, 0x40014c00, 0xffffffff, 0),
1682 	SYSC_QUIRK("usbhstll", 0, 0, 0x10, 0x14, 0x00000004, 0xffffffff, 0),
1683 	SYSC_QUIRK("usbhstll", 0, 0, 0x10, 0x14, 0x00000008, 0xffffffff, 0),
1684 	SYSC_QUIRK("venc", 0x58003000, 0, -ENODEV, -ENODEV, 0x00000002, 0xffffffff, 0),
1685 	SYSC_QUIRK("vfpe", 0, 0, 0x104, -ENODEV, 0x4d001200, 0xffffffff, 0),
1686 #endif
1687 };
1688 
1689 /*
1690  * Early quirks based on module base and register offsets only that are
1691  * needed before the module revision can be read
1692  */
1693 static void sysc_init_early_quirks(struct sysc *ddata)
1694 {
1695 	const struct sysc_revision_quirk *q;
1696 	int i;
1697 
1698 	for (i = 0; i < ARRAY_SIZE(sysc_revision_quirks); i++) {
1699 		q = &sysc_revision_quirks[i];
1700 
1701 		if (!q->base)
1702 			continue;
1703 
1704 		if (q->base != ddata->module_pa)
1705 			continue;
1706 
1707 		if (q->rev_offset != ddata->offsets[SYSC_REVISION])
1708 			continue;
1709 
1710 		if (q->sysc_offset != ddata->offsets[SYSC_SYSCONFIG])
1711 			continue;
1712 
1713 		if (q->syss_offset != ddata->offsets[SYSC_SYSSTATUS])
1714 			continue;
1715 
1716 		ddata->name = q->name;
1717 		ddata->cfg.quirks |= q->quirks;
1718 	}
1719 }
1720 
1721 /* Quirks that also consider the revision register value */
1722 static void sysc_init_revision_quirks(struct sysc *ddata)
1723 {
1724 	const struct sysc_revision_quirk *q;
1725 	int i;
1726 
1727 	for (i = 0; i < ARRAY_SIZE(sysc_revision_quirks); i++) {
1728 		q = &sysc_revision_quirks[i];
1729 
1730 		if (q->base && q->base != ddata->module_pa)
1731 			continue;
1732 
1733 		if (q->rev_offset != ddata->offsets[SYSC_REVISION])
1734 			continue;
1735 
1736 		if (q->sysc_offset != ddata->offsets[SYSC_SYSCONFIG])
1737 			continue;
1738 
1739 		if (q->syss_offset != ddata->offsets[SYSC_SYSSTATUS])
1740 			continue;
1741 
1742 		if (q->revision == ddata->revision ||
1743 		    (q->revision & q->revision_mask) ==
1744 		    (ddata->revision & q->revision_mask)) {
1745 			ddata->name = q->name;
1746 			ddata->cfg.quirks |= q->quirks;
1747 		}
1748 	}
1749 }
1750 
1751 /*
1752  * DSS needs dispc outputs disabled to reset modules. Returns mask of
1753  * enabled DSS interrupts. Eventually we may be able to do this on
1754  * dispc init rather than top-level DSS init.
1755  */
1756 static u32 sysc_quirk_dispc(struct sysc *ddata, int dispc_offset,
1757 			    bool disable)
1758 {
1759 	bool lcd_en, digit_en, lcd2_en = false, lcd3_en = false;
1760 	const int lcd_en_mask = BIT(0), digit_en_mask = BIT(1);
1761 	int manager_count;
1762 	bool framedonetv_irq = true;
1763 	u32 val, irq_mask = 0;
1764 
1765 	switch (sysc_soc->soc) {
1766 	case SOC_2420 ... SOC_3630:
1767 		manager_count = 2;
1768 		framedonetv_irq = false;
1769 		break;
1770 	case SOC_4430 ... SOC_4470:
1771 		manager_count = 3;
1772 		break;
1773 	case SOC_5430:
1774 	case SOC_DRA7:
1775 		manager_count = 4;
1776 		break;
1777 	case SOC_AM4:
1778 		manager_count = 1;
1779 		framedonetv_irq = false;
1780 		break;
1781 	case SOC_UNKNOWN:
1782 	default:
1783 		return 0;
1784 	}
1785 
1786 	/* Remap the whole module range to be able to reset dispc outputs */
1787 	devm_iounmap(ddata->dev, ddata->module_va);
1788 	ddata->module_va = devm_ioremap(ddata->dev,
1789 					ddata->module_pa,
1790 					ddata->module_size);
1791 	if (!ddata->module_va)
1792 		return -EIO;
1793 
1794 	/* DISP_CONTROL, shut down lcd and digit on disable if enabled */
1795 	val = sysc_read(ddata, dispc_offset + 0x40);
1796 	lcd_en = val & lcd_en_mask;
1797 	digit_en = val & digit_en_mask;
1798 	if (lcd_en)
1799 		irq_mask |= BIT(0);			/* FRAMEDONE */
1800 	if (digit_en) {
1801 		if (framedonetv_irq)
1802 			irq_mask |= BIT(24);		/* FRAMEDONETV */
1803 		else
1804 			irq_mask |= BIT(2) | BIT(3);	/* EVSYNC bits */
1805 	}
1806 	if (disable && (lcd_en || digit_en))
1807 		sysc_write(ddata, dispc_offset + 0x40,
1808 			   val & ~(lcd_en_mask | digit_en_mask));
1809 
1810 	if (manager_count <= 2)
1811 		return irq_mask;
1812 
1813 	/* DISPC_CONTROL2 */
1814 	val = sysc_read(ddata, dispc_offset + 0x238);
1815 	lcd2_en = val & lcd_en_mask;
1816 	if (lcd2_en)
1817 		irq_mask |= BIT(22);			/* FRAMEDONE2 */
1818 	if (disable && lcd2_en)
1819 		sysc_write(ddata, dispc_offset + 0x238,
1820 			   val & ~lcd_en_mask);
1821 
1822 	if (manager_count <= 3)
1823 		return irq_mask;
1824 
1825 	/* DISPC_CONTROL3 */
1826 	val = sysc_read(ddata, dispc_offset + 0x848);
1827 	lcd3_en = val & lcd_en_mask;
1828 	if (lcd3_en)
1829 		irq_mask |= BIT(30);			/* FRAMEDONE3 */
1830 	if (disable && lcd3_en)
1831 		sysc_write(ddata, dispc_offset + 0x848,
1832 			   val & ~lcd_en_mask);
1833 
1834 	return irq_mask;
1835 }
1836 
1837 /* DSS needs child outputs disabled and SDI registers cleared for reset */
1838 static void sysc_pre_reset_quirk_dss(struct sysc *ddata)
1839 {
1840 	const int dispc_offset = 0x1000;
1841 	int error;
1842 	u32 irq_mask, val;
1843 
1844 	/* Get enabled outputs */
1845 	irq_mask = sysc_quirk_dispc(ddata, dispc_offset, false);
1846 	if (!irq_mask)
1847 		return;
1848 
1849 	/* Clear IRQSTATUS */
1850 	sysc_write(ddata, dispc_offset + 0x18, irq_mask);
1851 
1852 	/* Disable outputs */
1853 	val = sysc_quirk_dispc(ddata, dispc_offset, true);
1854 
1855 	/* Poll IRQSTATUS */
1856 	error = readl_poll_timeout(ddata->module_va + dispc_offset + 0x18,
1857 				   val, val != irq_mask, 100, 50);
1858 	if (error)
1859 		dev_warn(ddata->dev, "%s: timed out %08x !+ %08x\n",
1860 			 __func__, val, irq_mask);
1861 
1862 	if (sysc_soc->soc == SOC_3430) {
1863 		/* Clear DSS_SDI_CONTROL */
1864 		sysc_write(ddata, 0x44, 0);
1865 
1866 		/* Clear DSS_PLL_CONTROL */
1867 		sysc_write(ddata, 0x48, 0);
1868 	}
1869 
1870 	/* Clear DSS_CONTROL to switch DSS clock sources to PRCM if not */
1871 	sysc_write(ddata, 0x40, 0);
1872 }
1873 
1874 /* 1-wire needs module's internal clocks enabled for reset */
1875 static void sysc_pre_reset_quirk_hdq1w(struct sysc *ddata)
1876 {
1877 	int offset = 0x0c;	/* HDQ_CTRL_STATUS */
1878 	u16 val;
1879 
1880 	val = sysc_read(ddata, offset);
1881 	val |= BIT(5);
1882 	sysc_write(ddata, offset, val);
1883 }
1884 
1885 /* AESS (Audio Engine SubSystem) needs autogating set after enable */
1886 static void sysc_module_enable_quirk_aess(struct sysc *ddata)
1887 {
1888 	int offset = 0x7c;	/* AESS_AUTO_GATING_ENABLE */
1889 
1890 	sysc_write(ddata, offset, 1);
1891 }
1892 
1893 /* I2C needs to be disabled for reset */
1894 static void sysc_clk_quirk_i2c(struct sysc *ddata, bool enable)
1895 {
1896 	int offset;
1897 	u16 val;
1898 
1899 	/* I2C_CON, omap2/3 is different from omap4 and later */
1900 	if ((ddata->revision & 0xffffff00) == 0x001f0000)
1901 		offset = 0x24;
1902 	else
1903 		offset = 0xa4;
1904 
1905 	/* I2C_EN */
1906 	val = sysc_read(ddata, offset);
1907 	if (enable)
1908 		val |= BIT(15);
1909 	else
1910 		val &= ~BIT(15);
1911 	sysc_write(ddata, offset, val);
1912 }
1913 
1914 static void sysc_pre_reset_quirk_i2c(struct sysc *ddata)
1915 {
1916 	sysc_clk_quirk_i2c(ddata, false);
1917 }
1918 
1919 static void sysc_post_reset_quirk_i2c(struct sysc *ddata)
1920 {
1921 	sysc_clk_quirk_i2c(ddata, true);
1922 }
1923 
1924 /* RTC on am3 and 4 needs to be unlocked and locked for sysconfig */
1925 static void sysc_quirk_rtc(struct sysc *ddata, bool lock)
1926 {
1927 	u32 val, kick0_val = 0, kick1_val = 0;
1928 	unsigned long flags;
1929 	int error;
1930 
1931 	if (!lock) {
1932 		kick0_val = 0x83e70b13;
1933 		kick1_val = 0x95a4f1e0;
1934 	}
1935 
1936 	local_irq_save(flags);
1937 	/* RTC_STATUS BUSY bit may stay active for 1/32768 seconds (~30 usec) */
1938 	error = readl_poll_timeout_atomic(ddata->module_va + 0x44, val,
1939 					  !(val & BIT(0)), 100, 50);
1940 	if (error)
1941 		dev_warn(ddata->dev, "rtc busy timeout\n");
1942 	/* Now we have ~15 microseconds to read/write various registers */
1943 	sysc_write(ddata, 0x6c, kick0_val);
1944 	sysc_write(ddata, 0x70, kick1_val);
1945 	local_irq_restore(flags);
1946 }
1947 
1948 static void sysc_module_unlock_quirk_rtc(struct sysc *ddata)
1949 {
1950 	sysc_quirk_rtc(ddata, false);
1951 }
1952 
1953 static void sysc_module_lock_quirk_rtc(struct sysc *ddata)
1954 {
1955 	sysc_quirk_rtc(ddata, true);
1956 }
1957 
1958 /* OTG omap2430 glue layer up to omap4 needs OTG_FORCESTDBY configured */
1959 static void sysc_module_enable_quirk_otg(struct sysc *ddata)
1960 {
1961 	int offset = 0x414;	/* OTG_FORCESTDBY */
1962 
1963 	sysc_write(ddata, offset, 0);
1964 }
1965 
1966 static void sysc_module_disable_quirk_otg(struct sysc *ddata)
1967 {
1968 	int offset = 0x414;	/* OTG_FORCESTDBY */
1969 	u32 val = BIT(0);	/* ENABLEFORCE */
1970 
1971 	sysc_write(ddata, offset, val);
1972 }
1973 
1974 /* 36xx SGX needs a quirk for to bypass OCP IPG interrupt logic */
1975 static void sysc_module_enable_quirk_sgx(struct sysc *ddata)
1976 {
1977 	int offset = 0xff08;	/* OCP_DEBUG_CONFIG */
1978 	u32 val = BIT(31);	/* THALIA_INT_BYPASS */
1979 
1980 	sysc_write(ddata, offset, val);
1981 }
1982 
1983 /* Watchdog timer needs a disable sequence after reset */
1984 static void sysc_reset_done_quirk_wdt(struct sysc *ddata)
1985 {
1986 	int wps, spr, error;
1987 	u32 val;
1988 
1989 	wps = 0x34;
1990 	spr = 0x48;
1991 
1992 	sysc_write(ddata, spr, 0xaaaa);
1993 	error = readl_poll_timeout(ddata->module_va + wps, val,
1994 				   !(val & 0x10), 100,
1995 				   MAX_MODULE_SOFTRESET_WAIT);
1996 	if (error)
1997 		dev_warn(ddata->dev, "wdt disable step1 failed\n");
1998 
1999 	sysc_write(ddata, spr, 0x5555);
2000 	error = readl_poll_timeout(ddata->module_va + wps, val,
2001 				   !(val & 0x10), 100,
2002 				   MAX_MODULE_SOFTRESET_WAIT);
2003 	if (error)
2004 		dev_warn(ddata->dev, "wdt disable step2 failed\n");
2005 }
2006 
2007 /* PRUSS needs to set MSTANDBY_INIT inorder to idle properly */
2008 static void sysc_module_disable_quirk_pruss(struct sysc *ddata)
2009 {
2010 	u32 reg;
2011 
2012 	reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
2013 	reg |= SYSC_PRUSS_STANDBY_INIT;
2014 	sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
2015 }
2016 
2017 static void sysc_init_module_quirks(struct sysc *ddata)
2018 {
2019 	if (ddata->legacy_mode || !ddata->name)
2020 		return;
2021 
2022 	if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_HDQ1W) {
2023 		ddata->pre_reset_quirk = sysc_pre_reset_quirk_hdq1w;
2024 
2025 		return;
2026 	}
2027 
2028 #ifdef CONFIG_OMAP_GPMC_DEBUG
2029 	if (ddata->cfg.quirks & SYSC_QUIRK_GPMC_DEBUG) {
2030 		ddata->cfg.quirks |= SYSC_QUIRK_NO_RESET_ON_INIT;
2031 
2032 		return;
2033 	}
2034 #endif
2035 
2036 	if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_I2C) {
2037 		ddata->pre_reset_quirk = sysc_pre_reset_quirk_i2c;
2038 		ddata->post_reset_quirk = sysc_post_reset_quirk_i2c;
2039 
2040 		return;
2041 	}
2042 
2043 	if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_AESS)
2044 		ddata->module_enable_quirk = sysc_module_enable_quirk_aess;
2045 
2046 	if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_DSS_RESET)
2047 		ddata->pre_reset_quirk = sysc_pre_reset_quirk_dss;
2048 
2049 	if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_RTC_UNLOCK) {
2050 		ddata->module_unlock_quirk = sysc_module_unlock_quirk_rtc;
2051 		ddata->module_lock_quirk = sysc_module_lock_quirk_rtc;
2052 
2053 		return;
2054 	}
2055 
2056 	if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_OTG) {
2057 		ddata->module_enable_quirk = sysc_module_enable_quirk_otg;
2058 		ddata->module_disable_quirk = sysc_module_disable_quirk_otg;
2059 	}
2060 
2061 	if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_SGX)
2062 		ddata->module_enable_quirk = sysc_module_enable_quirk_sgx;
2063 
2064 	if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_WDT) {
2065 		ddata->reset_done_quirk = sysc_reset_done_quirk_wdt;
2066 		ddata->module_disable_quirk = sysc_reset_done_quirk_wdt;
2067 	}
2068 
2069 	if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_PRUSS)
2070 		ddata->module_disable_quirk = sysc_module_disable_quirk_pruss;
2071 }
2072 
2073 static int sysc_clockdomain_init(struct sysc *ddata)
2074 {
2075 	struct ti_sysc_platform_data *pdata = dev_get_platdata(ddata->dev);
2076 	struct clk *fck = NULL, *ick = NULL;
2077 	int error;
2078 
2079 	if (!pdata || !pdata->init_clockdomain)
2080 		return 0;
2081 
2082 	switch (ddata->nr_clocks) {
2083 	case 2:
2084 		ick = ddata->clocks[SYSC_ICK];
2085 		fallthrough;
2086 	case 1:
2087 		fck = ddata->clocks[SYSC_FCK];
2088 		break;
2089 	case 0:
2090 		return 0;
2091 	}
2092 
2093 	error = pdata->init_clockdomain(ddata->dev, fck, ick, &ddata->cookie);
2094 	if (!error || error == -ENODEV)
2095 		return 0;
2096 
2097 	return error;
2098 }
2099 
2100 /*
2101  * Note that pdata->init_module() typically does a reset first. After
2102  * pdata->init_module() is done, PM runtime can be used for the interconnect
2103  * target module.
2104  */
2105 static int sysc_legacy_init(struct sysc *ddata)
2106 {
2107 	struct ti_sysc_platform_data *pdata = dev_get_platdata(ddata->dev);
2108 	int error;
2109 
2110 	if (!pdata || !pdata->init_module)
2111 		return 0;
2112 
2113 	error = pdata->init_module(ddata->dev, ddata->mdata, &ddata->cookie);
2114 	if (error == -EEXIST)
2115 		error = 0;
2116 
2117 	return error;
2118 }
2119 
2120 /*
2121  * Note that the caller must ensure the interconnect target module is enabled
2122  * before calling reset. Otherwise reset will not complete.
2123  */
2124 static int sysc_reset(struct sysc *ddata)
2125 {
2126 	int sysc_offset, sysc_val, error;
2127 	u32 sysc_mask;
2128 
2129 	sysc_offset = ddata->offsets[SYSC_SYSCONFIG];
2130 
2131 	if (ddata->legacy_mode ||
2132 	    ddata->cap->regbits->srst_shift < 0 ||
2133 	    ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT)
2134 		return 0;
2135 
2136 	sysc_mask = BIT(ddata->cap->regbits->srst_shift);
2137 
2138 	if (ddata->pre_reset_quirk)
2139 		ddata->pre_reset_quirk(ddata);
2140 
2141 	if (sysc_offset >= 0) {
2142 		sysc_val = sysc_read_sysconfig(ddata);
2143 		sysc_val |= sysc_mask;
2144 		sysc_write(ddata, sysc_offset, sysc_val);
2145 	}
2146 
2147 	if (ddata->cfg.srst_udelay)
2148 		usleep_range(ddata->cfg.srst_udelay,
2149 			     ddata->cfg.srst_udelay * 2);
2150 
2151 	if (ddata->post_reset_quirk)
2152 		ddata->post_reset_quirk(ddata);
2153 
2154 	error = sysc_wait_softreset(ddata);
2155 	if (error)
2156 		dev_warn(ddata->dev, "OCP softreset timed out\n");
2157 
2158 	if (ddata->reset_done_quirk)
2159 		ddata->reset_done_quirk(ddata);
2160 
2161 	return error;
2162 }
2163 
2164 /*
2165  * At this point the module is configured enough to read the revision but
2166  * module may not be completely configured yet to use PM runtime. Enable
2167  * all clocks directly during init to configure the quirks needed for PM
2168  * runtime based on the revision register.
2169  */
2170 static int sysc_init_module(struct sysc *ddata)
2171 {
2172 	bool rstctrl_deasserted = false;
2173 	int error = 0;
2174 
2175 	error = sysc_clockdomain_init(ddata);
2176 	if (error)
2177 		return error;
2178 
2179 	sysc_clkdm_deny_idle(ddata);
2180 
2181 	/*
2182 	 * Always enable clocks. The bootloader may or may not have enabled
2183 	 * the related clocks.
2184 	 */
2185 	error = sysc_enable_opt_clocks(ddata);
2186 	if (error)
2187 		return error;
2188 
2189 	error = sysc_enable_main_clocks(ddata);
2190 	if (error)
2191 		goto err_opt_clocks;
2192 
2193 	if (!(ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT)) {
2194 		error = reset_control_deassert(ddata->rsts);
2195 		if (error)
2196 			goto err_main_clocks;
2197 		rstctrl_deasserted = true;
2198 	}
2199 
2200 	ddata->revision = sysc_read_revision(ddata);
2201 	sysc_init_revision_quirks(ddata);
2202 	sysc_init_module_quirks(ddata);
2203 
2204 	if (ddata->legacy_mode) {
2205 		error = sysc_legacy_init(ddata);
2206 		if (error)
2207 			goto err_main_clocks;
2208 	}
2209 
2210 	if (!ddata->legacy_mode) {
2211 		error = sysc_enable_module(ddata->dev);
2212 		if (error)
2213 			goto err_main_clocks;
2214 	}
2215 
2216 	error = sysc_reset(ddata);
2217 	if (error)
2218 		dev_err(ddata->dev, "Reset failed with %d\n", error);
2219 
2220 	if (error && !ddata->legacy_mode)
2221 		sysc_disable_module(ddata->dev);
2222 
2223 err_main_clocks:
2224 	if (error)
2225 		sysc_disable_main_clocks(ddata);
2226 err_opt_clocks:
2227 	/* No re-enable of clockdomain autoidle to prevent module autoidle */
2228 	if (error) {
2229 		sysc_disable_opt_clocks(ddata);
2230 		sysc_clkdm_allow_idle(ddata);
2231 	}
2232 
2233 	if (error && rstctrl_deasserted &&
2234 	    !(ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT))
2235 		reset_control_assert(ddata->rsts);
2236 
2237 	return error;
2238 }
2239 
2240 static int sysc_init_sysc_mask(struct sysc *ddata)
2241 {
2242 	struct device_node *np = ddata->dev->of_node;
2243 	int error;
2244 	u32 val;
2245 
2246 	error = of_property_read_u32(np, "ti,sysc-mask", &val);
2247 	if (error)
2248 		return 0;
2249 
2250 	ddata->cfg.sysc_val = val & ddata->cap->sysc_mask;
2251 
2252 	return 0;
2253 }
2254 
2255 static int sysc_init_idlemode(struct sysc *ddata, u8 *idlemodes,
2256 			      const char *name)
2257 {
2258 	struct device_node *np = ddata->dev->of_node;
2259 	struct property *prop;
2260 	const __be32 *p;
2261 	u32 val;
2262 
2263 	of_property_for_each_u32(np, name, prop, p, val) {
2264 		if (val >= SYSC_NR_IDLEMODES) {
2265 			dev_err(ddata->dev, "invalid idlemode: %i\n", val);
2266 			return -EINVAL;
2267 		}
2268 		*idlemodes |=  (1 << val);
2269 	}
2270 
2271 	return 0;
2272 }
2273 
2274 static int sysc_init_idlemodes(struct sysc *ddata)
2275 {
2276 	int error;
2277 
2278 	error = sysc_init_idlemode(ddata, &ddata->cfg.midlemodes,
2279 				   "ti,sysc-midle");
2280 	if (error)
2281 		return error;
2282 
2283 	error = sysc_init_idlemode(ddata, &ddata->cfg.sidlemodes,
2284 				   "ti,sysc-sidle");
2285 	if (error)
2286 		return error;
2287 
2288 	return 0;
2289 }
2290 
2291 /*
2292  * Only some devices on omap4 and later have SYSCONFIG reset done
2293  * bit. We can detect this if there is no SYSSTATUS at all, or the
2294  * SYSTATUS bit 0 is not used. Note that some SYSSTATUS registers
2295  * have multiple bits for the child devices like OHCI and EHCI.
2296  * Depends on SYSC being parsed first.
2297  */
2298 static int sysc_init_syss_mask(struct sysc *ddata)
2299 {
2300 	struct device_node *np = ddata->dev->of_node;
2301 	int error;
2302 	u32 val;
2303 
2304 	error = of_property_read_u32(np, "ti,syss-mask", &val);
2305 	if (error) {
2306 		if ((ddata->cap->type == TI_SYSC_OMAP4 ||
2307 		     ddata->cap->type == TI_SYSC_OMAP4_TIMER) &&
2308 		    (ddata->cfg.sysc_val & SYSC_OMAP4_SOFTRESET))
2309 			ddata->cfg.quirks |= SYSC_QUIRK_RESET_STATUS;
2310 
2311 		return 0;
2312 	}
2313 
2314 	if (!(val & 1) && (ddata->cfg.sysc_val & SYSC_OMAP4_SOFTRESET))
2315 		ddata->cfg.quirks |= SYSC_QUIRK_RESET_STATUS;
2316 
2317 	ddata->cfg.syss_mask = val;
2318 
2319 	return 0;
2320 }
2321 
2322 /*
2323  * Many child device drivers need to have fck and opt clocks available
2324  * to get the clock rate for device internal configuration etc.
2325  */
2326 static int sysc_child_add_named_clock(struct sysc *ddata,
2327 				      struct device *child,
2328 				      const char *name)
2329 {
2330 	struct clk *clk;
2331 	struct clk_lookup *l;
2332 	int error = 0;
2333 
2334 	if (!name)
2335 		return 0;
2336 
2337 	clk = clk_get(child, name);
2338 	if (!IS_ERR(clk)) {
2339 		error = -EEXIST;
2340 		goto put_clk;
2341 	}
2342 
2343 	clk = clk_get(ddata->dev, name);
2344 	if (IS_ERR(clk))
2345 		return -ENODEV;
2346 
2347 	l = clkdev_create(clk, name, dev_name(child));
2348 	if (!l)
2349 		error = -ENOMEM;
2350 put_clk:
2351 	clk_put(clk);
2352 
2353 	return error;
2354 }
2355 
2356 static int sysc_child_add_clocks(struct sysc *ddata,
2357 				 struct device *child)
2358 {
2359 	int i, error;
2360 
2361 	for (i = 0; i < ddata->nr_clocks; i++) {
2362 		error = sysc_child_add_named_clock(ddata,
2363 						   child,
2364 						   ddata->clock_roles[i]);
2365 		if (error && error != -EEXIST) {
2366 			dev_err(ddata->dev, "could not add child clock %s: %i\n",
2367 				ddata->clock_roles[i], error);
2368 
2369 			return error;
2370 		}
2371 	}
2372 
2373 	return 0;
2374 }
2375 
2376 static struct device_type sysc_device_type = {
2377 };
2378 
2379 static struct sysc *sysc_child_to_parent(struct device *dev)
2380 {
2381 	struct device *parent = dev->parent;
2382 
2383 	if (!parent || parent->type != &sysc_device_type)
2384 		return NULL;
2385 
2386 	return dev_get_drvdata(parent);
2387 }
2388 
2389 static int __maybe_unused sysc_child_runtime_suspend(struct device *dev)
2390 {
2391 	struct sysc *ddata;
2392 	int error;
2393 
2394 	ddata = sysc_child_to_parent(dev);
2395 
2396 	error = pm_generic_runtime_suspend(dev);
2397 	if (error)
2398 		return error;
2399 
2400 	if (!ddata->enabled)
2401 		return 0;
2402 
2403 	return sysc_runtime_suspend(ddata->dev);
2404 }
2405 
2406 static int __maybe_unused sysc_child_runtime_resume(struct device *dev)
2407 {
2408 	struct sysc *ddata;
2409 	int error;
2410 
2411 	ddata = sysc_child_to_parent(dev);
2412 
2413 	if (!ddata->enabled) {
2414 		error = sysc_runtime_resume(ddata->dev);
2415 		if (error < 0)
2416 			dev_err(ddata->dev,
2417 				"%s error: %i\n", __func__, error);
2418 	}
2419 
2420 	return pm_generic_runtime_resume(dev);
2421 }
2422 
2423 #ifdef CONFIG_PM_SLEEP
2424 static int sysc_child_suspend_noirq(struct device *dev)
2425 {
2426 	struct sysc *ddata;
2427 	int error;
2428 
2429 	ddata = sysc_child_to_parent(dev);
2430 
2431 	dev_dbg(ddata->dev, "%s %s\n", __func__,
2432 		ddata->name ? ddata->name : "");
2433 
2434 	error = pm_generic_suspend_noirq(dev);
2435 	if (error) {
2436 		dev_err(dev, "%s error at %i: %i\n",
2437 			__func__, __LINE__, error);
2438 
2439 		return error;
2440 	}
2441 
2442 	if (!pm_runtime_status_suspended(dev)) {
2443 		error = pm_generic_runtime_suspend(dev);
2444 		if (error) {
2445 			dev_dbg(dev, "%s busy at %i: %i\n",
2446 				__func__, __LINE__, error);
2447 
2448 			return 0;
2449 		}
2450 
2451 		error = sysc_runtime_suspend(ddata->dev);
2452 		if (error) {
2453 			dev_err(dev, "%s error at %i: %i\n",
2454 				__func__, __LINE__, error);
2455 
2456 			return error;
2457 		}
2458 
2459 		ddata->child_needs_resume = true;
2460 	}
2461 
2462 	return 0;
2463 }
2464 
2465 static int sysc_child_resume_noirq(struct device *dev)
2466 {
2467 	struct sysc *ddata;
2468 	int error;
2469 
2470 	ddata = sysc_child_to_parent(dev);
2471 
2472 	dev_dbg(ddata->dev, "%s %s\n", __func__,
2473 		ddata->name ? ddata->name : "");
2474 
2475 	if (ddata->child_needs_resume) {
2476 		ddata->child_needs_resume = false;
2477 
2478 		error = sysc_runtime_resume(ddata->dev);
2479 		if (error)
2480 			dev_err(ddata->dev,
2481 				"%s runtime resume error: %i\n",
2482 				__func__, error);
2483 
2484 		error = pm_generic_runtime_resume(dev);
2485 		if (error)
2486 			dev_err(ddata->dev,
2487 				"%s generic runtime resume: %i\n",
2488 				__func__, error);
2489 	}
2490 
2491 	return pm_generic_resume_noirq(dev);
2492 }
2493 #endif
2494 
2495 static struct dev_pm_domain sysc_child_pm_domain = {
2496 	.ops = {
2497 		SET_RUNTIME_PM_OPS(sysc_child_runtime_suspend,
2498 				   sysc_child_runtime_resume,
2499 				   NULL)
2500 		USE_PLATFORM_PM_SLEEP_OPS
2501 		SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(sysc_child_suspend_noirq,
2502 					      sysc_child_resume_noirq)
2503 	}
2504 };
2505 
2506 /* Caller needs to take list_lock if ever used outside of cpu_pm */
2507 static void sysc_reinit_modules(struct sysc_soc_info *soc)
2508 {
2509 	struct sysc_module *module;
2510 	struct sysc *ddata;
2511 
2512 	list_for_each_entry(module, &sysc_soc->restored_modules, node) {
2513 		ddata = module->ddata;
2514 		sysc_reinit_module(ddata, ddata->enabled);
2515 	}
2516 }
2517 
2518 /**
2519  * sysc_context_notifier - optionally reset and restore module after idle
2520  * @nb: notifier block
2521  * @cmd: unused
2522  * @v: unused
2523  *
2524  * Some interconnect target modules need to be restored, or reset and restored
2525  * on CPU_PM CPU_PM_CLUSTER_EXIT notifier. This is needed at least for am335x
2526  * OTG and GPMC target modules even if the modules are unused.
2527  */
2528 static int sysc_context_notifier(struct notifier_block *nb, unsigned long cmd,
2529 				 void *v)
2530 {
2531 	struct sysc_soc_info *soc;
2532 
2533 	soc = container_of(nb, struct sysc_soc_info, nb);
2534 
2535 	switch (cmd) {
2536 	case CPU_CLUSTER_PM_ENTER:
2537 		break;
2538 	case CPU_CLUSTER_PM_ENTER_FAILED:	/* No need to restore context */
2539 		break;
2540 	case CPU_CLUSTER_PM_EXIT:
2541 		sysc_reinit_modules(soc);
2542 		break;
2543 	}
2544 
2545 	return NOTIFY_OK;
2546 }
2547 
2548 /**
2549  * sysc_add_restored - optionally add reset and restore quirk hanlling
2550  * @ddata: device data
2551  */
2552 static void sysc_add_restored(struct sysc *ddata)
2553 {
2554 	struct sysc_module *restored_module;
2555 
2556 	restored_module = kzalloc(sizeof(*restored_module), GFP_KERNEL);
2557 	if (!restored_module)
2558 		return;
2559 
2560 	restored_module->ddata = ddata;
2561 
2562 	mutex_lock(&sysc_soc->list_lock);
2563 
2564 	list_add(&restored_module->node, &sysc_soc->restored_modules);
2565 
2566 	if (sysc_soc->nb.notifier_call)
2567 		goto out_unlock;
2568 
2569 	sysc_soc->nb.notifier_call = sysc_context_notifier;
2570 	cpu_pm_register_notifier(&sysc_soc->nb);
2571 
2572 out_unlock:
2573 	mutex_unlock(&sysc_soc->list_lock);
2574 }
2575 
2576 /**
2577  * sysc_legacy_idle_quirk - handle children in omap_device compatible way
2578  * @ddata: device driver data
2579  * @child: child device driver
2580  *
2581  * Allow idle for child devices as done with _od_runtime_suspend().
2582  * Otherwise many child devices will not idle because of the permanent
2583  * parent usecount set in pm_runtime_irq_safe().
2584  *
2585  * Note that the long term solution is to just modify the child device
2586  * drivers to not set pm_runtime_irq_safe() and then this can be just
2587  * dropped.
2588  */
2589 static void sysc_legacy_idle_quirk(struct sysc *ddata, struct device *child)
2590 {
2591 	if (ddata->cfg.quirks & SYSC_QUIRK_LEGACY_IDLE)
2592 		dev_pm_domain_set(child, &sysc_child_pm_domain);
2593 }
2594 
2595 static int sysc_notifier_call(struct notifier_block *nb,
2596 			      unsigned long event, void *device)
2597 {
2598 	struct device *dev = device;
2599 	struct sysc *ddata;
2600 	int error;
2601 
2602 	ddata = sysc_child_to_parent(dev);
2603 	if (!ddata)
2604 		return NOTIFY_DONE;
2605 
2606 	switch (event) {
2607 	case BUS_NOTIFY_ADD_DEVICE:
2608 		error = sysc_child_add_clocks(ddata, dev);
2609 		if (error)
2610 			return error;
2611 		sysc_legacy_idle_quirk(ddata, dev);
2612 		break;
2613 	default:
2614 		break;
2615 	}
2616 
2617 	return NOTIFY_DONE;
2618 }
2619 
2620 static struct notifier_block sysc_nb = {
2621 	.notifier_call = sysc_notifier_call,
2622 };
2623 
2624 /* Device tree configured quirks */
2625 struct sysc_dts_quirk {
2626 	const char *name;
2627 	u32 mask;
2628 };
2629 
2630 static const struct sysc_dts_quirk sysc_dts_quirks[] = {
2631 	{ .name = "ti,no-idle-on-init",
2632 	  .mask = SYSC_QUIRK_NO_IDLE_ON_INIT, },
2633 	{ .name = "ti,no-reset-on-init",
2634 	  .mask = SYSC_QUIRK_NO_RESET_ON_INIT, },
2635 	{ .name = "ti,no-idle",
2636 	  .mask = SYSC_QUIRK_NO_IDLE, },
2637 };
2638 
2639 static void sysc_parse_dts_quirks(struct sysc *ddata, struct device_node *np,
2640 				  bool is_child)
2641 {
2642 	const struct property *prop;
2643 	int i, len;
2644 
2645 	for (i = 0; i < ARRAY_SIZE(sysc_dts_quirks); i++) {
2646 		const char *name = sysc_dts_quirks[i].name;
2647 
2648 		prop = of_get_property(np, name, &len);
2649 		if (!prop)
2650 			continue;
2651 
2652 		ddata->cfg.quirks |= sysc_dts_quirks[i].mask;
2653 		if (is_child) {
2654 			dev_warn(ddata->dev,
2655 				 "dts flag should be at module level for %s\n",
2656 				 name);
2657 		}
2658 	}
2659 }
2660 
2661 static int sysc_init_dts_quirks(struct sysc *ddata)
2662 {
2663 	struct device_node *np = ddata->dev->of_node;
2664 	int error;
2665 	u32 val;
2666 
2667 	ddata->legacy_mode = of_get_property(np, "ti,hwmods", NULL);
2668 
2669 	sysc_parse_dts_quirks(ddata, np, false);
2670 	error = of_property_read_u32(np, "ti,sysc-delay-us", &val);
2671 	if (!error) {
2672 		if (val > 255) {
2673 			dev_warn(ddata->dev, "bad ti,sysc-delay-us: %i\n",
2674 				 val);
2675 		}
2676 
2677 		ddata->cfg.srst_udelay = (u8)val;
2678 	}
2679 
2680 	return 0;
2681 }
2682 
2683 static void sysc_unprepare(struct sysc *ddata)
2684 {
2685 	int i;
2686 
2687 	if (!ddata->clocks)
2688 		return;
2689 
2690 	for (i = 0; i < SYSC_MAX_CLOCKS; i++) {
2691 		if (!IS_ERR_OR_NULL(ddata->clocks[i]))
2692 			clk_unprepare(ddata->clocks[i]);
2693 	}
2694 }
2695 
2696 /*
2697  * Common sysc register bits found on omap2, also known as type1
2698  */
2699 static const struct sysc_regbits sysc_regbits_omap2 = {
2700 	.dmadisable_shift = -ENODEV,
2701 	.midle_shift = 12,
2702 	.sidle_shift = 3,
2703 	.clkact_shift = 8,
2704 	.emufree_shift = 5,
2705 	.enwkup_shift = 2,
2706 	.srst_shift = 1,
2707 	.autoidle_shift = 0,
2708 };
2709 
2710 static const struct sysc_capabilities sysc_omap2 = {
2711 	.type = TI_SYSC_OMAP2,
2712 	.sysc_mask = SYSC_OMAP2_CLOCKACTIVITY | SYSC_OMAP2_EMUFREE |
2713 		     SYSC_OMAP2_ENAWAKEUP | SYSC_OMAP2_SOFTRESET |
2714 		     SYSC_OMAP2_AUTOIDLE,
2715 	.regbits = &sysc_regbits_omap2,
2716 };
2717 
2718 /* All omap2 and 3 timers, and timers 1, 2 & 10 on omap 4 and 5 */
2719 static const struct sysc_capabilities sysc_omap2_timer = {
2720 	.type = TI_SYSC_OMAP2_TIMER,
2721 	.sysc_mask = SYSC_OMAP2_CLOCKACTIVITY | SYSC_OMAP2_EMUFREE |
2722 		     SYSC_OMAP2_ENAWAKEUP | SYSC_OMAP2_SOFTRESET |
2723 		     SYSC_OMAP2_AUTOIDLE,
2724 	.regbits = &sysc_regbits_omap2,
2725 	.mod_quirks = SYSC_QUIRK_USE_CLOCKACT,
2726 };
2727 
2728 /*
2729  * SHAM2 (SHA1/MD5) sysc found on omap3, a variant of sysc_regbits_omap2
2730  * with different sidle position
2731  */
2732 static const struct sysc_regbits sysc_regbits_omap3_sham = {
2733 	.dmadisable_shift = -ENODEV,
2734 	.midle_shift = -ENODEV,
2735 	.sidle_shift = 4,
2736 	.clkact_shift = -ENODEV,
2737 	.enwkup_shift = -ENODEV,
2738 	.srst_shift = 1,
2739 	.autoidle_shift = 0,
2740 	.emufree_shift = -ENODEV,
2741 };
2742 
2743 static const struct sysc_capabilities sysc_omap3_sham = {
2744 	.type = TI_SYSC_OMAP3_SHAM,
2745 	.sysc_mask = SYSC_OMAP2_SOFTRESET | SYSC_OMAP2_AUTOIDLE,
2746 	.regbits = &sysc_regbits_omap3_sham,
2747 };
2748 
2749 /*
2750  * AES register bits found on omap3 and later, a variant of
2751  * sysc_regbits_omap2 with different sidle position
2752  */
2753 static const struct sysc_regbits sysc_regbits_omap3_aes = {
2754 	.dmadisable_shift = -ENODEV,
2755 	.midle_shift = -ENODEV,
2756 	.sidle_shift = 6,
2757 	.clkact_shift = -ENODEV,
2758 	.enwkup_shift = -ENODEV,
2759 	.srst_shift = 1,
2760 	.autoidle_shift = 0,
2761 	.emufree_shift = -ENODEV,
2762 };
2763 
2764 static const struct sysc_capabilities sysc_omap3_aes = {
2765 	.type = TI_SYSC_OMAP3_AES,
2766 	.sysc_mask = SYSC_OMAP2_SOFTRESET | SYSC_OMAP2_AUTOIDLE,
2767 	.regbits = &sysc_regbits_omap3_aes,
2768 };
2769 
2770 /*
2771  * Common sysc register bits found on omap4, also known as type2
2772  */
2773 static const struct sysc_regbits sysc_regbits_omap4 = {
2774 	.dmadisable_shift = 16,
2775 	.midle_shift = 4,
2776 	.sidle_shift = 2,
2777 	.clkact_shift = -ENODEV,
2778 	.enwkup_shift = -ENODEV,
2779 	.emufree_shift = 1,
2780 	.srst_shift = 0,
2781 	.autoidle_shift = -ENODEV,
2782 };
2783 
2784 static const struct sysc_capabilities sysc_omap4 = {
2785 	.type = TI_SYSC_OMAP4,
2786 	.sysc_mask = SYSC_OMAP4_DMADISABLE | SYSC_OMAP4_FREEEMU |
2787 		     SYSC_OMAP4_SOFTRESET,
2788 	.regbits = &sysc_regbits_omap4,
2789 };
2790 
2791 static const struct sysc_capabilities sysc_omap4_timer = {
2792 	.type = TI_SYSC_OMAP4_TIMER,
2793 	.sysc_mask = SYSC_OMAP4_DMADISABLE | SYSC_OMAP4_FREEEMU |
2794 		     SYSC_OMAP4_SOFTRESET,
2795 	.regbits = &sysc_regbits_omap4,
2796 };
2797 
2798 /*
2799  * Common sysc register bits found on omap4, also known as type3
2800  */
2801 static const struct sysc_regbits sysc_regbits_omap4_simple = {
2802 	.dmadisable_shift = -ENODEV,
2803 	.midle_shift = 2,
2804 	.sidle_shift = 0,
2805 	.clkact_shift = -ENODEV,
2806 	.enwkup_shift = -ENODEV,
2807 	.srst_shift = -ENODEV,
2808 	.emufree_shift = -ENODEV,
2809 	.autoidle_shift = -ENODEV,
2810 };
2811 
2812 static const struct sysc_capabilities sysc_omap4_simple = {
2813 	.type = TI_SYSC_OMAP4_SIMPLE,
2814 	.regbits = &sysc_regbits_omap4_simple,
2815 };
2816 
2817 /*
2818  * SmartReflex sysc found on omap34xx
2819  */
2820 static const struct sysc_regbits sysc_regbits_omap34xx_sr = {
2821 	.dmadisable_shift = -ENODEV,
2822 	.midle_shift = -ENODEV,
2823 	.sidle_shift = -ENODEV,
2824 	.clkact_shift = 20,
2825 	.enwkup_shift = -ENODEV,
2826 	.srst_shift = -ENODEV,
2827 	.emufree_shift = -ENODEV,
2828 	.autoidle_shift = -ENODEV,
2829 };
2830 
2831 static const struct sysc_capabilities sysc_34xx_sr = {
2832 	.type = TI_SYSC_OMAP34XX_SR,
2833 	.sysc_mask = SYSC_OMAP2_CLOCKACTIVITY,
2834 	.regbits = &sysc_regbits_omap34xx_sr,
2835 	.mod_quirks = SYSC_QUIRK_USE_CLOCKACT | SYSC_QUIRK_UNCACHED |
2836 		      SYSC_QUIRK_LEGACY_IDLE,
2837 };
2838 
2839 /*
2840  * SmartReflex sysc found on omap36xx and later
2841  */
2842 static const struct sysc_regbits sysc_regbits_omap36xx_sr = {
2843 	.dmadisable_shift = -ENODEV,
2844 	.midle_shift = -ENODEV,
2845 	.sidle_shift = 24,
2846 	.clkact_shift = -ENODEV,
2847 	.enwkup_shift = 26,
2848 	.srst_shift = -ENODEV,
2849 	.emufree_shift = -ENODEV,
2850 	.autoidle_shift = -ENODEV,
2851 };
2852 
2853 static const struct sysc_capabilities sysc_36xx_sr = {
2854 	.type = TI_SYSC_OMAP36XX_SR,
2855 	.sysc_mask = SYSC_OMAP3_SR_ENAWAKEUP,
2856 	.regbits = &sysc_regbits_omap36xx_sr,
2857 	.mod_quirks = SYSC_QUIRK_UNCACHED | SYSC_QUIRK_LEGACY_IDLE,
2858 };
2859 
2860 static const struct sysc_capabilities sysc_omap4_sr = {
2861 	.type = TI_SYSC_OMAP4_SR,
2862 	.regbits = &sysc_regbits_omap36xx_sr,
2863 	.mod_quirks = SYSC_QUIRK_LEGACY_IDLE,
2864 };
2865 
2866 /*
2867  * McASP register bits found on omap4 and later
2868  */
2869 static const struct sysc_regbits sysc_regbits_omap4_mcasp = {
2870 	.dmadisable_shift = -ENODEV,
2871 	.midle_shift = -ENODEV,
2872 	.sidle_shift = 0,
2873 	.clkact_shift = -ENODEV,
2874 	.enwkup_shift = -ENODEV,
2875 	.srst_shift = -ENODEV,
2876 	.emufree_shift = -ENODEV,
2877 	.autoidle_shift = -ENODEV,
2878 };
2879 
2880 static const struct sysc_capabilities sysc_omap4_mcasp = {
2881 	.type = TI_SYSC_OMAP4_MCASP,
2882 	.regbits = &sysc_regbits_omap4_mcasp,
2883 	.mod_quirks = SYSC_QUIRK_OPT_CLKS_NEEDED,
2884 };
2885 
2886 /*
2887  * McASP found on dra7 and later
2888  */
2889 static const struct sysc_capabilities sysc_dra7_mcasp = {
2890 	.type = TI_SYSC_OMAP4_SIMPLE,
2891 	.regbits = &sysc_regbits_omap4_simple,
2892 	.mod_quirks = SYSC_QUIRK_OPT_CLKS_NEEDED,
2893 };
2894 
2895 /*
2896  * FS USB host found on omap4 and later
2897  */
2898 static const struct sysc_regbits sysc_regbits_omap4_usb_host_fs = {
2899 	.dmadisable_shift = -ENODEV,
2900 	.midle_shift = -ENODEV,
2901 	.sidle_shift = 24,
2902 	.clkact_shift = -ENODEV,
2903 	.enwkup_shift = 26,
2904 	.srst_shift = -ENODEV,
2905 	.emufree_shift = -ENODEV,
2906 	.autoidle_shift = -ENODEV,
2907 };
2908 
2909 static const struct sysc_capabilities sysc_omap4_usb_host_fs = {
2910 	.type = TI_SYSC_OMAP4_USB_HOST_FS,
2911 	.sysc_mask = SYSC_OMAP2_ENAWAKEUP,
2912 	.regbits = &sysc_regbits_omap4_usb_host_fs,
2913 };
2914 
2915 static const struct sysc_regbits sysc_regbits_dra7_mcan = {
2916 	.dmadisable_shift = -ENODEV,
2917 	.midle_shift = -ENODEV,
2918 	.sidle_shift = -ENODEV,
2919 	.clkact_shift = -ENODEV,
2920 	.enwkup_shift = 4,
2921 	.srst_shift = 0,
2922 	.emufree_shift = -ENODEV,
2923 	.autoidle_shift = -ENODEV,
2924 };
2925 
2926 static const struct sysc_capabilities sysc_dra7_mcan = {
2927 	.type = TI_SYSC_DRA7_MCAN,
2928 	.sysc_mask = SYSC_DRA7_MCAN_ENAWAKEUP | SYSC_OMAP4_SOFTRESET,
2929 	.regbits = &sysc_regbits_dra7_mcan,
2930 	.mod_quirks = SYSS_QUIRK_RESETDONE_INVERTED,
2931 };
2932 
2933 /*
2934  * PRUSS found on some AM33xx, AM437x and AM57xx SoCs
2935  */
2936 static const struct sysc_capabilities sysc_pruss = {
2937 	.type = TI_SYSC_PRUSS,
2938 	.sysc_mask = SYSC_PRUSS_STANDBY_INIT | SYSC_PRUSS_SUB_MWAIT,
2939 	.regbits = &sysc_regbits_omap4_simple,
2940 	.mod_quirks = SYSC_MODULE_QUIRK_PRUSS,
2941 };
2942 
2943 static int sysc_init_pdata(struct sysc *ddata)
2944 {
2945 	struct ti_sysc_platform_data *pdata = dev_get_platdata(ddata->dev);
2946 	struct ti_sysc_module_data *mdata;
2947 
2948 	if (!pdata)
2949 		return 0;
2950 
2951 	mdata = devm_kzalloc(ddata->dev, sizeof(*mdata), GFP_KERNEL);
2952 	if (!mdata)
2953 		return -ENOMEM;
2954 
2955 	if (ddata->legacy_mode) {
2956 		mdata->name = ddata->legacy_mode;
2957 		mdata->module_pa = ddata->module_pa;
2958 		mdata->module_size = ddata->module_size;
2959 		mdata->offsets = ddata->offsets;
2960 		mdata->nr_offsets = SYSC_MAX_REGS;
2961 		mdata->cap = ddata->cap;
2962 		mdata->cfg = &ddata->cfg;
2963 	}
2964 
2965 	ddata->mdata = mdata;
2966 
2967 	return 0;
2968 }
2969 
2970 static int sysc_init_match(struct sysc *ddata)
2971 {
2972 	const struct sysc_capabilities *cap;
2973 
2974 	cap = of_device_get_match_data(ddata->dev);
2975 	if (!cap)
2976 		return -EINVAL;
2977 
2978 	ddata->cap = cap;
2979 	if (ddata->cap)
2980 		ddata->cfg.quirks |= ddata->cap->mod_quirks;
2981 
2982 	return 0;
2983 }
2984 
2985 static void ti_sysc_idle(struct work_struct *work)
2986 {
2987 	struct sysc *ddata;
2988 
2989 	ddata = container_of(work, struct sysc, idle_work.work);
2990 
2991 	/*
2992 	 * One time decrement of clock usage counts if left on from init.
2993 	 * Note that we disable opt clocks unconditionally in this case
2994 	 * as they are enabled unconditionally during init without
2995 	 * considering sysc_opt_clks_needed() at that point.
2996 	 */
2997 	if (ddata->cfg.quirks & (SYSC_QUIRK_NO_IDLE |
2998 				 SYSC_QUIRK_NO_IDLE_ON_INIT)) {
2999 		sysc_disable_main_clocks(ddata);
3000 		sysc_disable_opt_clocks(ddata);
3001 		sysc_clkdm_allow_idle(ddata);
3002 	}
3003 
3004 	/* Keep permanent PM runtime usage count for SYSC_QUIRK_NO_IDLE */
3005 	if (ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE)
3006 		return;
3007 
3008 	/*
3009 	 * Decrement PM runtime usage count for SYSC_QUIRK_NO_IDLE_ON_INIT
3010 	 * and SYSC_QUIRK_NO_RESET_ON_INIT
3011 	 */
3012 	if (pm_runtime_active(ddata->dev))
3013 		pm_runtime_put_sync(ddata->dev);
3014 }
3015 
3016 /*
3017  * SoC model and features detection. Only needed for SoCs that need
3018  * special handling for quirks, no need to list others.
3019  */
3020 static const struct soc_device_attribute sysc_soc_match[] = {
3021 	SOC_FLAG("OMAP242*", SOC_2420),
3022 	SOC_FLAG("OMAP243*", SOC_2430),
3023 	SOC_FLAG("OMAP3[45]*", SOC_3430),
3024 	SOC_FLAG("OMAP3[67]*", SOC_3630),
3025 	SOC_FLAG("OMAP443*", SOC_4430),
3026 	SOC_FLAG("OMAP446*", SOC_4460),
3027 	SOC_FLAG("OMAP447*", SOC_4470),
3028 	SOC_FLAG("OMAP54*", SOC_5430),
3029 	SOC_FLAG("AM433", SOC_AM3),
3030 	SOC_FLAG("AM43*", SOC_AM4),
3031 	SOC_FLAG("DRA7*", SOC_DRA7),
3032 
3033 	{ /* sentinel */ }
3034 };
3035 
3036 /*
3037  * List of SoCs variants with disabled features. By default we assume all
3038  * devices in the device tree are available so no need to list those SoCs.
3039  */
3040 static const struct soc_device_attribute sysc_soc_feat_match[] = {
3041 	/* OMAP3430/3530 and AM3517 variants with some accelerators disabled */
3042 	SOC_FLAG("AM3505", DIS_SGX),
3043 	SOC_FLAG("OMAP3525", DIS_SGX),
3044 	SOC_FLAG("OMAP3515", DIS_IVA | DIS_SGX),
3045 	SOC_FLAG("OMAP3503", DIS_ISP | DIS_IVA | DIS_SGX),
3046 
3047 	/* OMAP3630/DM3730 variants with some accelerators disabled */
3048 	SOC_FLAG("AM3703", DIS_IVA | DIS_SGX),
3049 	SOC_FLAG("DM3725", DIS_SGX),
3050 	SOC_FLAG("OMAP3611", DIS_ISP | DIS_IVA | DIS_SGX),
3051 	SOC_FLAG("OMAP3615/AM3715", DIS_IVA),
3052 	SOC_FLAG("OMAP3621", DIS_ISP),
3053 
3054 	{ /* sentinel */ }
3055 };
3056 
3057 static int sysc_add_disabled(unsigned long base)
3058 {
3059 	struct sysc_address *disabled_module;
3060 
3061 	disabled_module = kzalloc(sizeof(*disabled_module), GFP_KERNEL);
3062 	if (!disabled_module)
3063 		return -ENOMEM;
3064 
3065 	disabled_module->base = base;
3066 
3067 	mutex_lock(&sysc_soc->list_lock);
3068 	list_add(&disabled_module->node, &sysc_soc->disabled_modules);
3069 	mutex_unlock(&sysc_soc->list_lock);
3070 
3071 	return 0;
3072 }
3073 
3074 /*
3075  * One time init to detect the booted SoC, disable unavailable features
3076  * and initialize list for optional cpu_pm notifier.
3077  *
3078  * Note that we initialize static data shared across all ti-sysc instances
3079  * so ddata is only used for SoC type. This can be called from module_init
3080  * once we no longer need to rely on platform data.
3081  */
3082 static int sysc_init_static_data(struct sysc *ddata)
3083 {
3084 	const struct soc_device_attribute *match;
3085 	struct ti_sysc_platform_data *pdata;
3086 	unsigned long features = 0;
3087 	struct device_node *np;
3088 
3089 	if (sysc_soc)
3090 		return 0;
3091 
3092 	sysc_soc = kzalloc(sizeof(*sysc_soc), GFP_KERNEL);
3093 	if (!sysc_soc)
3094 		return -ENOMEM;
3095 
3096 	mutex_init(&sysc_soc->list_lock);
3097 	INIT_LIST_HEAD(&sysc_soc->disabled_modules);
3098 	INIT_LIST_HEAD(&sysc_soc->restored_modules);
3099 	sysc_soc->general_purpose = true;
3100 
3101 	pdata = dev_get_platdata(ddata->dev);
3102 	if (pdata && pdata->soc_type_gp)
3103 		sysc_soc->general_purpose = pdata->soc_type_gp();
3104 
3105 	match = soc_device_match(sysc_soc_match);
3106 	if (match && match->data)
3107 		sysc_soc->soc = (int)match->data;
3108 
3109 	/*
3110 	 * Check and warn about possible old incomplete dtb. We now want to see
3111 	 * simple-pm-bus instead of simple-bus in the dtb for genpd using SoCs.
3112 	 */
3113 	switch (sysc_soc->soc) {
3114 	case SOC_AM3:
3115 	case SOC_AM4:
3116 	case SOC_4430 ... SOC_4470:
3117 	case SOC_5430:
3118 	case SOC_DRA7:
3119 		np = of_find_node_by_path("/ocp");
3120 		WARN_ONCE(np && of_device_is_compatible(np, "simple-bus"),
3121 			  "ti-sysc: Incomplete old dtb, please update\n");
3122 		break;
3123 	default:
3124 		break;
3125 	}
3126 
3127 	/* Ignore devices that are not available on HS and EMU SoCs */
3128 	if (!sysc_soc->general_purpose) {
3129 		switch (sysc_soc->soc) {
3130 		case SOC_3430 ... SOC_3630:
3131 			sysc_add_disabled(0x48304000);	/* timer12 */
3132 			break;
3133 		case SOC_AM3:
3134 			sysc_add_disabled(0x48310000);  /* rng */
3135 			break;
3136 		default:
3137 			break;
3138 		}
3139 	}
3140 
3141 	match = soc_device_match(sysc_soc_feat_match);
3142 	if (!match)
3143 		return 0;
3144 
3145 	if (match->data)
3146 		features = (unsigned long)match->data;
3147 
3148 	/*
3149 	 * Add disabled devices to the list based on the module base.
3150 	 * Note that this must be done before we attempt to access the
3151 	 * device and have module revision checks working.
3152 	 */
3153 	if (features & DIS_ISP)
3154 		sysc_add_disabled(0x480bd400);
3155 	if (features & DIS_IVA)
3156 		sysc_add_disabled(0x5d000000);
3157 	if (features & DIS_SGX)
3158 		sysc_add_disabled(0x50000000);
3159 
3160 	return 0;
3161 }
3162 
3163 static void sysc_cleanup_static_data(void)
3164 {
3165 	struct sysc_module *restored_module;
3166 	struct sysc_address *disabled_module;
3167 	struct list_head *pos, *tmp;
3168 
3169 	if (!sysc_soc)
3170 		return;
3171 
3172 	if (sysc_soc->nb.notifier_call)
3173 		cpu_pm_unregister_notifier(&sysc_soc->nb);
3174 
3175 	mutex_lock(&sysc_soc->list_lock);
3176 	list_for_each_safe(pos, tmp, &sysc_soc->restored_modules) {
3177 		restored_module = list_entry(pos, struct sysc_module, node);
3178 		list_del(pos);
3179 		kfree(restored_module);
3180 	}
3181 	list_for_each_safe(pos, tmp, &sysc_soc->disabled_modules) {
3182 		disabled_module = list_entry(pos, struct sysc_address, node);
3183 		list_del(pos);
3184 		kfree(disabled_module);
3185 	}
3186 	mutex_unlock(&sysc_soc->list_lock);
3187 }
3188 
3189 static int sysc_check_disabled_devices(struct sysc *ddata)
3190 {
3191 	struct sysc_address *disabled_module;
3192 	int error = 0;
3193 
3194 	mutex_lock(&sysc_soc->list_lock);
3195 	list_for_each_entry(disabled_module, &sysc_soc->disabled_modules, node) {
3196 		if (ddata->module_pa == disabled_module->base) {
3197 			dev_dbg(ddata->dev, "module disabled for this SoC\n");
3198 			error = -ENODEV;
3199 			break;
3200 		}
3201 	}
3202 	mutex_unlock(&sysc_soc->list_lock);
3203 
3204 	return error;
3205 }
3206 
3207 /*
3208  * Ignore timers tagged with no-reset and no-idle. These are likely in use,
3209  * for example by drivers/clocksource/timer-ti-dm-systimer.c. If more checks
3210  * are needed, we could also look at the timer register configuration.
3211  */
3212 static int sysc_check_active_timer(struct sysc *ddata)
3213 {
3214 	int error;
3215 
3216 	if (ddata->cap->type != TI_SYSC_OMAP2_TIMER &&
3217 	    ddata->cap->type != TI_SYSC_OMAP4_TIMER)
3218 		return 0;
3219 
3220 	/*
3221 	 * Quirk for omap3 beagleboard revision A to B4 to use gpt12.
3222 	 * Revision C and later are fixed with commit 23885389dbbb ("ARM:
3223 	 * dts: Fix timer regression for beagleboard revision c"). This all
3224 	 * can be dropped if we stop supporting old beagleboard revisions
3225 	 * A to B4 at some point.
3226 	 */
3227 	if (sysc_soc->soc == SOC_3430)
3228 		error = -ENXIO;
3229 	else
3230 		error = -EBUSY;
3231 
3232 	if ((ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT) &&
3233 	    (ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE))
3234 		return error;
3235 
3236 	return 0;
3237 }
3238 
3239 static const struct of_device_id sysc_match_table[] = {
3240 	{ .compatible = "simple-bus", },
3241 	{ /* sentinel */ },
3242 };
3243 
3244 static int sysc_probe(struct platform_device *pdev)
3245 {
3246 	struct ti_sysc_platform_data *pdata = dev_get_platdata(&pdev->dev);
3247 	struct sysc *ddata;
3248 	int error;
3249 
3250 	ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
3251 	if (!ddata)
3252 		return -ENOMEM;
3253 
3254 	ddata->offsets[SYSC_REVISION] = -ENODEV;
3255 	ddata->offsets[SYSC_SYSCONFIG] = -ENODEV;
3256 	ddata->offsets[SYSC_SYSSTATUS] = -ENODEV;
3257 	ddata->dev = &pdev->dev;
3258 	platform_set_drvdata(pdev, ddata);
3259 
3260 	error = sysc_init_static_data(ddata);
3261 	if (error)
3262 		return error;
3263 
3264 	error = sysc_init_match(ddata);
3265 	if (error)
3266 		return error;
3267 
3268 	error = sysc_init_dts_quirks(ddata);
3269 	if (error)
3270 		return error;
3271 
3272 	error = sysc_map_and_check_registers(ddata);
3273 	if (error)
3274 		return error;
3275 
3276 	error = sysc_init_sysc_mask(ddata);
3277 	if (error)
3278 		return error;
3279 
3280 	error = sysc_init_idlemodes(ddata);
3281 	if (error)
3282 		return error;
3283 
3284 	error = sysc_init_syss_mask(ddata);
3285 	if (error)
3286 		return error;
3287 
3288 	error = sysc_init_pdata(ddata);
3289 	if (error)
3290 		return error;
3291 
3292 	sysc_init_early_quirks(ddata);
3293 
3294 	error = sysc_check_disabled_devices(ddata);
3295 	if (error)
3296 		return error;
3297 
3298 	error = sysc_check_active_timer(ddata);
3299 	if (error == -ENXIO)
3300 		ddata->reserved = true;
3301 	else if (error)
3302 		return error;
3303 
3304 	error = sysc_get_clocks(ddata);
3305 	if (error)
3306 		return error;
3307 
3308 	error = sysc_init_resets(ddata);
3309 	if (error)
3310 		goto unprepare;
3311 
3312 	error = sysc_init_module(ddata);
3313 	if (error)
3314 		goto unprepare;
3315 
3316 	pm_runtime_enable(ddata->dev);
3317 	error = pm_runtime_resume_and_get(ddata->dev);
3318 	if (error < 0) {
3319 		pm_runtime_disable(ddata->dev);
3320 		goto unprepare;
3321 	}
3322 
3323 	/* Balance use counts as PM runtime should have enabled these all */
3324 	if (!(ddata->cfg.quirks &
3325 	      (SYSC_QUIRK_NO_IDLE | SYSC_QUIRK_NO_IDLE_ON_INIT))) {
3326 		sysc_disable_main_clocks(ddata);
3327 		sysc_disable_opt_clocks(ddata);
3328 		sysc_clkdm_allow_idle(ddata);
3329 	}
3330 
3331 	if (!(ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT))
3332 		reset_control_assert(ddata->rsts);
3333 
3334 	sysc_show_registers(ddata);
3335 
3336 	ddata->dev->type = &sysc_device_type;
3337 
3338 	if (!ddata->reserved) {
3339 		error = of_platform_populate(ddata->dev->of_node,
3340 					     sysc_match_table,
3341 					     pdata ? pdata->auxdata : NULL,
3342 					     ddata->dev);
3343 		if (error)
3344 			goto err;
3345 	}
3346 
3347 	INIT_DELAYED_WORK(&ddata->idle_work, ti_sysc_idle);
3348 
3349 	/* At least earlycon won't survive without deferred idle */
3350 	if (ddata->cfg.quirks & (SYSC_QUIRK_NO_IDLE |
3351 				 SYSC_QUIRK_NO_IDLE_ON_INIT |
3352 				 SYSC_QUIRK_NO_RESET_ON_INIT)) {
3353 		schedule_delayed_work(&ddata->idle_work, 3000);
3354 	} else {
3355 		pm_runtime_put(&pdev->dev);
3356 	}
3357 
3358 	if (ddata->cfg.quirks & SYSC_QUIRK_REINIT_ON_CTX_LOST)
3359 		sysc_add_restored(ddata);
3360 
3361 	return 0;
3362 
3363 err:
3364 	pm_runtime_put_sync(&pdev->dev);
3365 	pm_runtime_disable(&pdev->dev);
3366 unprepare:
3367 	sysc_unprepare(ddata);
3368 
3369 	return error;
3370 }
3371 
3372 static int sysc_remove(struct platform_device *pdev)
3373 {
3374 	struct sysc *ddata = platform_get_drvdata(pdev);
3375 	int error;
3376 
3377 	/* Device can still be enabled, see deferred idle quirk in probe */
3378 	if (cancel_delayed_work_sync(&ddata->idle_work))
3379 		ti_sysc_idle(&ddata->idle_work.work);
3380 
3381 	error = pm_runtime_resume_and_get(ddata->dev);
3382 	if (error < 0) {
3383 		pm_runtime_disable(ddata->dev);
3384 		goto unprepare;
3385 	}
3386 
3387 	of_platform_depopulate(&pdev->dev);
3388 
3389 	pm_runtime_put_sync(&pdev->dev);
3390 	pm_runtime_disable(&pdev->dev);
3391 
3392 	if (!reset_control_status(ddata->rsts))
3393 		reset_control_assert(ddata->rsts);
3394 
3395 unprepare:
3396 	sysc_unprepare(ddata);
3397 
3398 	return 0;
3399 }
3400 
3401 static const struct of_device_id sysc_match[] = {
3402 	{ .compatible = "ti,sysc-omap2", .data = &sysc_omap2, },
3403 	{ .compatible = "ti,sysc-omap2-timer", .data = &sysc_omap2_timer, },
3404 	{ .compatible = "ti,sysc-omap4", .data = &sysc_omap4, },
3405 	{ .compatible = "ti,sysc-omap4-timer", .data = &sysc_omap4_timer, },
3406 	{ .compatible = "ti,sysc-omap4-simple", .data = &sysc_omap4_simple, },
3407 	{ .compatible = "ti,sysc-omap3430-sr", .data = &sysc_34xx_sr, },
3408 	{ .compatible = "ti,sysc-omap3630-sr", .data = &sysc_36xx_sr, },
3409 	{ .compatible = "ti,sysc-omap4-sr", .data = &sysc_omap4_sr, },
3410 	{ .compatible = "ti,sysc-omap3-sham", .data = &sysc_omap3_sham, },
3411 	{ .compatible = "ti,sysc-omap-aes", .data = &sysc_omap3_aes, },
3412 	{ .compatible = "ti,sysc-mcasp", .data = &sysc_omap4_mcasp, },
3413 	{ .compatible = "ti,sysc-dra7-mcasp", .data = &sysc_dra7_mcasp, },
3414 	{ .compatible = "ti,sysc-usb-host-fs",
3415 	  .data = &sysc_omap4_usb_host_fs, },
3416 	{ .compatible = "ti,sysc-dra7-mcan", .data = &sysc_dra7_mcan, },
3417 	{ .compatible = "ti,sysc-pruss", .data = &sysc_pruss, },
3418 	{  },
3419 };
3420 MODULE_DEVICE_TABLE(of, sysc_match);
3421 
3422 static struct platform_driver sysc_driver = {
3423 	.probe		= sysc_probe,
3424 	.remove		= sysc_remove,
3425 	.driver         = {
3426 		.name   = "ti-sysc",
3427 		.of_match_table	= sysc_match,
3428 		.pm = &sysc_pm_ops,
3429 	},
3430 };
3431 
3432 static int __init sysc_init(void)
3433 {
3434 	bus_register_notifier(&platform_bus_type, &sysc_nb);
3435 
3436 	return platform_driver_register(&sysc_driver);
3437 }
3438 module_init(sysc_init);
3439 
3440 static void __exit sysc_exit(void)
3441 {
3442 	bus_unregister_notifier(&platform_bus_type, &sysc_nb);
3443 	platform_driver_unregister(&sysc_driver);
3444 	sysc_cleanup_static_data();
3445 }
3446 module_exit(sysc_exit);
3447 
3448 MODULE_DESCRIPTION("TI sysc interconnect target driver");
3449 MODULE_LICENSE("GPL v2");
3450