xref: /openbmc/linux/arch/arm/mach-omap2/prm_common.c (revision b34e08d5)
1 /*
2  * OMAP2+ common Power & Reset Management (PRM) IP block functions
3  *
4  * Copyright (C) 2011 Texas Instruments, Inc.
5  * Tero Kristo <t-kristo@ti.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  *
12  * For historical purposes, the API used to configure the PRM
13  * interrupt handler refers to it as the "PRCM interrupt."  The
14  * underlying registers are located in the PRM on OMAP3/4.
15  *
16  * XXX This code should eventually be moved to a PRM driver.
17  */
18 
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/init.h>
22 #include <linux/io.h>
23 #include <linux/irq.h>
24 #include <linux/interrupt.h>
25 #include <linux/slab.h>
26 #include <linux/of.h>
27 #include <linux/of_address.h>
28 #include <linux/clk-provider.h>
29 #include <linux/clk/ti.h>
30 
31 #include "soc.h"
32 #include "prm2xxx_3xxx.h"
33 #include "prm2xxx.h"
34 #include "prm3xxx.h"
35 #include "prm44xx.h"
36 #include "common.h"
37 #include "clock.h"
38 
39 /*
40  * OMAP_PRCM_MAX_NR_PENDING_REG: maximum number of PRM_IRQ*_MPU regs
41  * XXX this is technically not needed, since
42  * omap_prcm_register_chain_handler() could allocate this based on the
43  * actual amount of memory needed for the SoC
44  */
45 #define OMAP_PRCM_MAX_NR_PENDING_REG		2
46 
47 /*
48  * prcm_irq_chips: an array of all of the "generic IRQ chips" in use
49  * by the PRCM interrupt handler code.  There will be one 'chip' per
50  * PRM_{IRQSTATUS,IRQENABLE}_MPU register pair.  (So OMAP3 will have
51  * one "chip" and OMAP4 will have two.)
52  */
53 static struct irq_chip_generic **prcm_irq_chips;
54 
55 /*
56  * prcm_irq_setup: the PRCM IRQ parameters for the hardware the code
57  * is currently running on.  Defined and passed by initialization code
58  * that calls omap_prcm_register_chain_handler().
59  */
60 static struct omap_prcm_irq_setup *prcm_irq_setup;
61 
62 /* prm_base: base virtual address of the PRM IP block */
63 void __iomem *prm_base;
64 
65 /*
66  * prm_ll_data: function pointers to SoC-specific implementations of
67  * common PRM functions
68  */
69 static struct prm_ll_data null_prm_ll_data;
70 static struct prm_ll_data *prm_ll_data = &null_prm_ll_data;
71 
72 /* Private functions */
73 
74 /*
75  * Move priority events from events to priority_events array
76  */
77 static void omap_prcm_events_filter_priority(unsigned long *events,
78 	unsigned long *priority_events)
79 {
80 	int i;
81 
82 	for (i = 0; i < prcm_irq_setup->nr_regs; i++) {
83 		priority_events[i] =
84 			events[i] & prcm_irq_setup->priority_mask[i];
85 		events[i] ^= priority_events[i];
86 	}
87 }
88 
89 /*
90  * PRCM Interrupt Handler
91  *
92  * This is a common handler for the OMAP PRCM interrupts. Pending
93  * interrupts are detected by a call to prcm_pending_events and
94  * dispatched accordingly. Clearing of the wakeup events should be
95  * done by the SoC specific individual handlers.
96  */
97 static void omap_prcm_irq_handler(unsigned int irq, struct irq_desc *desc)
98 {
99 	unsigned long pending[OMAP_PRCM_MAX_NR_PENDING_REG];
100 	unsigned long priority_pending[OMAP_PRCM_MAX_NR_PENDING_REG];
101 	struct irq_chip *chip = irq_desc_get_chip(desc);
102 	unsigned int virtirq;
103 	int nr_irq = prcm_irq_setup->nr_regs * 32;
104 
105 	/*
106 	 * If we are suspended, mask all interrupts from PRCM level,
107 	 * this does not ack them, and they will be pending until we
108 	 * re-enable the interrupts, at which point the
109 	 * omap_prcm_irq_handler will be executed again.  The
110 	 * _save_and_clear_irqen() function must ensure that the PRM
111 	 * write to disable all IRQs has reached the PRM before
112 	 * returning, or spurious PRCM interrupts may occur during
113 	 * suspend.
114 	 */
115 	if (prcm_irq_setup->suspended) {
116 		prcm_irq_setup->save_and_clear_irqen(prcm_irq_setup->saved_mask);
117 		prcm_irq_setup->suspend_save_flag = true;
118 	}
119 
120 	/*
121 	 * Loop until all pending irqs are handled, since
122 	 * generic_handle_irq() can cause new irqs to come
123 	 */
124 	while (!prcm_irq_setup->suspended) {
125 		prcm_irq_setup->read_pending_irqs(pending);
126 
127 		/* No bit set, then all IRQs are handled */
128 		if (find_first_bit(pending, nr_irq) >= nr_irq)
129 			break;
130 
131 		omap_prcm_events_filter_priority(pending, priority_pending);
132 
133 		/*
134 		 * Loop on all currently pending irqs so that new irqs
135 		 * cannot starve previously pending irqs
136 		 */
137 
138 		/* Serve priority events first */
139 		for_each_set_bit(virtirq, priority_pending, nr_irq)
140 			generic_handle_irq(prcm_irq_setup->base_irq + virtirq);
141 
142 		/* Serve normal events next */
143 		for_each_set_bit(virtirq, pending, nr_irq)
144 			generic_handle_irq(prcm_irq_setup->base_irq + virtirq);
145 	}
146 	if (chip->irq_ack)
147 		chip->irq_ack(&desc->irq_data);
148 	if (chip->irq_eoi)
149 		chip->irq_eoi(&desc->irq_data);
150 	chip->irq_unmask(&desc->irq_data);
151 
152 	prcm_irq_setup->ocp_barrier(); /* avoid spurious IRQs */
153 }
154 
155 /* Public functions */
156 
157 /**
158  * omap_prcm_event_to_irq - given a PRCM event name, returns the
159  * corresponding IRQ on which the handler should be registered
160  * @name: name of the PRCM interrupt bit to look up - see struct omap_prcm_irq
161  *
162  * Returns the Linux internal IRQ ID corresponding to @name upon success,
163  * or -ENOENT upon failure.
164  */
165 int omap_prcm_event_to_irq(const char *name)
166 {
167 	int i;
168 
169 	if (!prcm_irq_setup || !name)
170 		return -ENOENT;
171 
172 	for (i = 0; i < prcm_irq_setup->nr_irqs; i++)
173 		if (!strcmp(prcm_irq_setup->irqs[i].name, name))
174 			return prcm_irq_setup->base_irq +
175 				prcm_irq_setup->irqs[i].offset;
176 
177 	return -ENOENT;
178 }
179 
180 /**
181  * omap_prcm_irq_cleanup - reverses memory allocated and other steps
182  * done by omap_prcm_register_chain_handler()
183  *
184  * No return value.
185  */
186 void omap_prcm_irq_cleanup(void)
187 {
188 	int i;
189 
190 	if (!prcm_irq_setup) {
191 		pr_err("PRCM: IRQ handler not initialized; cannot cleanup\n");
192 		return;
193 	}
194 
195 	if (prcm_irq_chips) {
196 		for (i = 0; i < prcm_irq_setup->nr_regs; i++) {
197 			if (prcm_irq_chips[i])
198 				irq_remove_generic_chip(prcm_irq_chips[i],
199 					0xffffffff, 0, 0);
200 			prcm_irq_chips[i] = NULL;
201 		}
202 		kfree(prcm_irq_chips);
203 		prcm_irq_chips = NULL;
204 	}
205 
206 	kfree(prcm_irq_setup->saved_mask);
207 	prcm_irq_setup->saved_mask = NULL;
208 
209 	kfree(prcm_irq_setup->priority_mask);
210 	prcm_irq_setup->priority_mask = NULL;
211 
212 	irq_set_chained_handler(prcm_irq_setup->irq, NULL);
213 
214 	if (prcm_irq_setup->base_irq > 0)
215 		irq_free_descs(prcm_irq_setup->base_irq,
216 			prcm_irq_setup->nr_regs * 32);
217 	prcm_irq_setup->base_irq = 0;
218 }
219 
220 void omap_prcm_irq_prepare(void)
221 {
222 	prcm_irq_setup->suspended = true;
223 }
224 
225 void omap_prcm_irq_complete(void)
226 {
227 	prcm_irq_setup->suspended = false;
228 
229 	/* If we have not saved the masks, do not attempt to restore */
230 	if (!prcm_irq_setup->suspend_save_flag)
231 		return;
232 
233 	prcm_irq_setup->suspend_save_flag = false;
234 
235 	/*
236 	 * Re-enable all masked PRCM irq sources, this causes the PRCM
237 	 * interrupt to fire immediately if the events were masked
238 	 * previously in the chain handler
239 	 */
240 	prcm_irq_setup->restore_irqen(prcm_irq_setup->saved_mask);
241 }
242 
243 /**
244  * omap_prcm_register_chain_handler - initializes the prcm chained interrupt
245  * handler based on provided parameters
246  * @irq_setup: hardware data about the underlying PRM/PRCM
247  *
248  * Set up the PRCM chained interrupt handler on the PRCM IRQ.  Sets up
249  * one generic IRQ chip per PRM interrupt status/enable register pair.
250  * Returns 0 upon success, -EINVAL if called twice or if invalid
251  * arguments are passed, or -ENOMEM on any other error.
252  */
253 int omap_prcm_register_chain_handler(struct omap_prcm_irq_setup *irq_setup)
254 {
255 	int nr_regs;
256 	u32 mask[OMAP_PRCM_MAX_NR_PENDING_REG];
257 	int offset, i;
258 	struct irq_chip_generic *gc;
259 	struct irq_chip_type *ct;
260 
261 	if (!irq_setup)
262 		return -EINVAL;
263 
264 	nr_regs = irq_setup->nr_regs;
265 
266 	if (prcm_irq_setup) {
267 		pr_err("PRCM: already initialized; won't reinitialize\n");
268 		return -EINVAL;
269 	}
270 
271 	if (nr_regs > OMAP_PRCM_MAX_NR_PENDING_REG) {
272 		pr_err("PRCM: nr_regs too large\n");
273 		return -EINVAL;
274 	}
275 
276 	prcm_irq_setup = irq_setup;
277 
278 	prcm_irq_chips = kzalloc(sizeof(void *) * nr_regs, GFP_KERNEL);
279 	prcm_irq_setup->saved_mask = kzalloc(sizeof(u32) * nr_regs, GFP_KERNEL);
280 	prcm_irq_setup->priority_mask = kzalloc(sizeof(u32) * nr_regs,
281 		GFP_KERNEL);
282 
283 	if (!prcm_irq_chips || !prcm_irq_setup->saved_mask ||
284 	    !prcm_irq_setup->priority_mask) {
285 		pr_err("PRCM: kzalloc failed\n");
286 		goto err;
287 	}
288 
289 	memset(mask, 0, sizeof(mask));
290 
291 	for (i = 0; i < irq_setup->nr_irqs; i++) {
292 		offset = irq_setup->irqs[i].offset;
293 		mask[offset >> 5] |= 1 << (offset & 0x1f);
294 		if (irq_setup->irqs[i].priority)
295 			irq_setup->priority_mask[offset >> 5] |=
296 				1 << (offset & 0x1f);
297 	}
298 
299 	irq_set_chained_handler(irq_setup->irq, omap_prcm_irq_handler);
300 
301 	irq_setup->base_irq = irq_alloc_descs(-1, 0, irq_setup->nr_regs * 32,
302 		0);
303 
304 	if (irq_setup->base_irq < 0) {
305 		pr_err("PRCM: failed to allocate irq descs: %d\n",
306 			irq_setup->base_irq);
307 		goto err;
308 	}
309 
310 	for (i = 0; i < irq_setup->nr_regs; i++) {
311 		gc = irq_alloc_generic_chip("PRCM", 1,
312 			irq_setup->base_irq + i * 32, prm_base,
313 			handle_level_irq);
314 
315 		if (!gc) {
316 			pr_err("PRCM: failed to allocate generic chip\n");
317 			goto err;
318 		}
319 		ct = gc->chip_types;
320 		ct->chip.irq_ack = irq_gc_ack_set_bit;
321 		ct->chip.irq_mask = irq_gc_mask_clr_bit;
322 		ct->chip.irq_unmask = irq_gc_mask_set_bit;
323 
324 		ct->regs.ack = irq_setup->ack + i * 4;
325 		ct->regs.mask = irq_setup->mask + i * 4;
326 
327 		irq_setup_generic_chip(gc, mask[i], 0, IRQ_NOREQUEST, 0);
328 		prcm_irq_chips[i] = gc;
329 	}
330 
331 	if (of_have_populated_dt()) {
332 		int irq = omap_prcm_event_to_irq("io");
333 		if (cpu_is_omap34xx())
334 			omap_pcs_legacy_init(irq,
335 				omap3xxx_prm_reconfigure_io_chain);
336 		else
337 			omap_pcs_legacy_init(irq,
338 				omap44xx_prm_reconfigure_io_chain);
339 	}
340 
341 	return 0;
342 
343 err:
344 	omap_prcm_irq_cleanup();
345 	return -ENOMEM;
346 }
347 
348 /**
349  * omap2_set_globals_prm - set the PRM base address (for early use)
350  * @prm: PRM base virtual address
351  *
352  * XXX Will be replaced when the PRM/CM drivers are completed.
353  */
354 void __init omap2_set_globals_prm(void __iomem *prm)
355 {
356 	prm_base = prm;
357 }
358 
359 /**
360  * prm_read_reset_sources - return the sources of the SoC's last reset
361  *
362  * Return a u32 bitmask representing the reset sources that caused the
363  * SoC to reset.  The low-level per-SoC functions called by this
364  * function remap the SoC-specific reset source bits into an
365  * OMAP-common set of reset source bits, defined in
366  * arch/arm/mach-omap2/prm.h.  Returns the standardized reset source
367  * u32 bitmask from the hardware upon success, or returns (1 <<
368  * OMAP_UNKNOWN_RST_SRC_ID_SHIFT) if no low-level read_reset_sources()
369  * function was registered.
370  */
371 u32 prm_read_reset_sources(void)
372 {
373 	u32 ret = 1 << OMAP_UNKNOWN_RST_SRC_ID_SHIFT;
374 
375 	if (prm_ll_data->read_reset_sources)
376 		ret = prm_ll_data->read_reset_sources();
377 	else
378 		WARN_ONCE(1, "prm: %s: no mapping function defined for reset sources\n", __func__);
379 
380 	return ret;
381 }
382 
383 /**
384  * prm_was_any_context_lost_old - was device context lost? (old API)
385  * @part: PRM partition ID (e.g., OMAP4430_PRM_PARTITION)
386  * @inst: PRM instance offset (e.g., OMAP4430_PRM_MPU_INST)
387  * @idx: CONTEXT register offset
388  *
389  * Return 1 if any bits were set in the *_CONTEXT_* register
390  * identified by (@part, @inst, @idx), which means that some context
391  * was lost for that module; otherwise, return 0.  XXX Deprecated;
392  * callers need to use a less-SoC-dependent way to identify hardware
393  * IP blocks.
394  */
395 bool prm_was_any_context_lost_old(u8 part, s16 inst, u16 idx)
396 {
397 	bool ret = true;
398 
399 	if (prm_ll_data->was_any_context_lost_old)
400 		ret = prm_ll_data->was_any_context_lost_old(part, inst, idx);
401 	else
402 		WARN_ONCE(1, "prm: %s: no mapping function defined\n",
403 			  __func__);
404 
405 	return ret;
406 }
407 
408 /**
409  * prm_clear_context_lost_flags_old - clear context loss flags (old API)
410  * @part: PRM partition ID (e.g., OMAP4430_PRM_PARTITION)
411  * @inst: PRM instance offset (e.g., OMAP4430_PRM_MPU_INST)
412  * @idx: CONTEXT register offset
413  *
414  * Clear hardware context loss bits for the module identified by
415  * (@part, @inst, @idx).  No return value.  XXX Deprecated; callers
416  * need to use a less-SoC-dependent way to identify hardware IP
417  * blocks.
418  */
419 void prm_clear_context_loss_flags_old(u8 part, s16 inst, u16 idx)
420 {
421 	if (prm_ll_data->clear_context_loss_flags_old)
422 		prm_ll_data->clear_context_loss_flags_old(part, inst, idx);
423 	else
424 		WARN_ONCE(1, "prm: %s: no mapping function defined\n",
425 			  __func__);
426 }
427 
428 /**
429  * prm_register - register per-SoC low-level data with the PRM
430  * @pld: low-level per-SoC OMAP PRM data & function pointers to register
431  *
432  * Register per-SoC low-level OMAP PRM data and function pointers with
433  * the OMAP PRM common interface.  The caller must keep the data
434  * pointed to by @pld valid until it calls prm_unregister() and
435  * it returns successfully.  Returns 0 upon success, -EINVAL if @pld
436  * is NULL, or -EEXIST if prm_register() has already been called
437  * without an intervening prm_unregister().
438  */
439 int prm_register(struct prm_ll_data *pld)
440 {
441 	if (!pld)
442 		return -EINVAL;
443 
444 	if (prm_ll_data != &null_prm_ll_data)
445 		return -EEXIST;
446 
447 	prm_ll_data = pld;
448 
449 	return 0;
450 }
451 
452 /**
453  * prm_unregister - unregister per-SoC low-level data & function pointers
454  * @pld: low-level per-SoC OMAP PRM data & function pointers to unregister
455  *
456  * Unregister per-SoC low-level OMAP PRM data and function pointers
457  * that were previously registered with prm_register().  The
458  * caller may not destroy any of the data pointed to by @pld until
459  * this function returns successfully.  Returns 0 upon success, or
460  * -EINVAL if @pld is NULL or if @pld does not match the struct
461  * prm_ll_data * previously registered by prm_register().
462  */
463 int prm_unregister(struct prm_ll_data *pld)
464 {
465 	if (!pld || prm_ll_data != pld)
466 		return -EINVAL;
467 
468 	prm_ll_data = &null_prm_ll_data;
469 
470 	return 0;
471 }
472 
473 static struct of_device_id omap_prcm_dt_match_table[] = {
474 	{ .compatible = "ti,am3-prcm" },
475 	{ .compatible = "ti,am3-scrm" },
476 	{ .compatible = "ti,am4-prcm" },
477 	{ .compatible = "ti,am4-scrm" },
478 	{ .compatible = "ti,omap3-prm" },
479 	{ .compatible = "ti,omap3-cm" },
480 	{ .compatible = "ti,omap3-scrm" },
481 	{ .compatible = "ti,omap4-cm1" },
482 	{ .compatible = "ti,omap4-prm" },
483 	{ .compatible = "ti,omap4-cm2" },
484 	{ .compatible = "ti,omap4-scrm" },
485 	{ .compatible = "ti,omap5-prm" },
486 	{ .compatible = "ti,omap5-cm-core-aon" },
487 	{ .compatible = "ti,omap5-scrm" },
488 	{ .compatible = "ti,omap5-cm-core" },
489 	{ .compatible = "ti,dra7-prm" },
490 	{ .compatible = "ti,dra7-cm-core-aon" },
491 	{ .compatible = "ti,dra7-cm-core" },
492 	{ }
493 };
494 
495 static struct clk_hw_omap memmap_dummy_ck = {
496 	.flags = MEMMAP_ADDRESSING,
497 };
498 
499 static u32 prm_clk_readl(void __iomem *reg)
500 {
501 	return omap2_clk_readl(&memmap_dummy_ck, reg);
502 }
503 
504 static void prm_clk_writel(u32 val, void __iomem *reg)
505 {
506 	omap2_clk_writel(val, &memmap_dummy_ck, reg);
507 }
508 
509 static struct ti_clk_ll_ops omap_clk_ll_ops = {
510 	.clk_readl = prm_clk_readl,
511 	.clk_writel = prm_clk_writel,
512 };
513 
514 int __init of_prcm_init(void)
515 {
516 	struct device_node *np;
517 	void __iomem *mem;
518 	int memmap_index = 0;
519 
520 	ti_clk_ll_ops = &omap_clk_ll_ops;
521 
522 	for_each_matching_node(np, omap_prcm_dt_match_table) {
523 		mem = of_iomap(np, 0);
524 		clk_memmaps[memmap_index] = mem;
525 		ti_dt_clk_init_provider(np, memmap_index);
526 		memmap_index++;
527 	}
528 
529 	ti_dt_clockdomains_setup();
530 
531 	return 0;
532 }
533