xref: /openbmc/linux/drivers/mfd/db8500-prcmu.c (revision 3df57bcf)
1 /*
2  * Copyright (C) STMicroelectronics 2009
3  * Copyright (C) ST-Ericsson SA 2010
4  *
5  * License Terms: GNU General Public License v2
6  * Author: Kumar Sanghvi <kumar.sanghvi@stericsson.com>
7  * Author: Sundar Iyer <sundar.iyer@stericsson.com>
8  * Author: Mattias Nilsson <mattias.i.nilsson@stericsson.com>
9  *
10  * U8500 PRCM Unit interface driver
11  *
12  */
13 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/delay.h>
16 #include <linux/errno.h>
17 #include <linux/err.h>
18 #include <linux/spinlock.h>
19 #include <linux/io.h>
20 #include <linux/slab.h>
21 #include <linux/mutex.h>
22 #include <linux/completion.h>
23 #include <linux/irq.h>
24 #include <linux/jiffies.h>
25 #include <linux/bitops.h>
26 #include <linux/fs.h>
27 #include <linux/platform_device.h>
28 #include <linux/uaccess.h>
29 #include <linux/mfd/core.h>
30 #include <linux/mfd/db8500-prcmu.h>
31 #include <mach/hardware.h>
32 #include <mach/irqs.h>
33 #include <mach/db8500-regs.h>
34 #include <mach/id.h>
35 #include "db8500-prcmu-regs.h"
36 
37 /* Offset for the firmware version within the TCPM */
38 #define PRCMU_FW_VERSION_OFFSET 0xA4
39 
40 /* PRCMU project numbers, defined by PRCMU FW */
41 #define PRCMU_PROJECT_ID_8500V1_0 1
42 #define PRCMU_PROJECT_ID_8500V2_0 2
43 #define PRCMU_PROJECT_ID_8400V2_0 3
44 
45 /* Index of different voltages to be used when accessing AVSData */
46 #define PRCM_AVS_BASE		0x2FC
47 #define PRCM_AVS_VBB_RET	(PRCM_AVS_BASE + 0x0)
48 #define PRCM_AVS_VBB_MAX_OPP	(PRCM_AVS_BASE + 0x1)
49 #define PRCM_AVS_VBB_100_OPP	(PRCM_AVS_BASE + 0x2)
50 #define PRCM_AVS_VBB_50_OPP	(PRCM_AVS_BASE + 0x3)
51 #define PRCM_AVS_VARM_MAX_OPP	(PRCM_AVS_BASE + 0x4)
52 #define PRCM_AVS_VARM_100_OPP	(PRCM_AVS_BASE + 0x5)
53 #define PRCM_AVS_VARM_50_OPP	(PRCM_AVS_BASE + 0x6)
54 #define PRCM_AVS_VARM_RET	(PRCM_AVS_BASE + 0x7)
55 #define PRCM_AVS_VAPE_100_OPP	(PRCM_AVS_BASE + 0x8)
56 #define PRCM_AVS_VAPE_50_OPP	(PRCM_AVS_BASE + 0x9)
57 #define PRCM_AVS_VMOD_100_OPP	(PRCM_AVS_BASE + 0xA)
58 #define PRCM_AVS_VMOD_50_OPP	(PRCM_AVS_BASE + 0xB)
59 #define PRCM_AVS_VSAFE		(PRCM_AVS_BASE + 0xC)
60 
61 #define PRCM_AVS_VOLTAGE		0
62 #define PRCM_AVS_VOLTAGE_MASK		0x3f
63 #define PRCM_AVS_ISSLOWSTARTUP		6
64 #define PRCM_AVS_ISSLOWSTARTUP_MASK	(1 << PRCM_AVS_ISSLOWSTARTUP)
65 #define PRCM_AVS_ISMODEENABLE		7
66 #define PRCM_AVS_ISMODEENABLE_MASK	(1 << PRCM_AVS_ISMODEENABLE)
67 
68 #define PRCM_BOOT_STATUS	0xFFF
69 #define PRCM_ROMCODE_A2P	0xFFE
70 #define PRCM_ROMCODE_P2A	0xFFD
71 #define PRCM_XP70_CUR_PWR_STATE 0xFFC      /* 4 BYTES */
72 
73 #define PRCM_SW_RST_REASON 0xFF8 /* 2 bytes */
74 
75 #define _PRCM_MBOX_HEADER		0xFE8 /* 16 bytes */
76 #define PRCM_MBOX_HEADER_REQ_MB0	(_PRCM_MBOX_HEADER + 0x0)
77 #define PRCM_MBOX_HEADER_REQ_MB1	(_PRCM_MBOX_HEADER + 0x1)
78 #define PRCM_MBOX_HEADER_REQ_MB2	(_PRCM_MBOX_HEADER + 0x2)
79 #define PRCM_MBOX_HEADER_REQ_MB3	(_PRCM_MBOX_HEADER + 0x3)
80 #define PRCM_MBOX_HEADER_REQ_MB4	(_PRCM_MBOX_HEADER + 0x4)
81 #define PRCM_MBOX_HEADER_REQ_MB5	(_PRCM_MBOX_HEADER + 0x5)
82 #define PRCM_MBOX_HEADER_ACK_MB0	(_PRCM_MBOX_HEADER + 0x8)
83 
84 /* Req Mailboxes */
85 #define PRCM_REQ_MB0 0xFDC /* 12 bytes  */
86 #define PRCM_REQ_MB1 0xFD0 /* 12 bytes  */
87 #define PRCM_REQ_MB2 0xFC0 /* 16 bytes  */
88 #define PRCM_REQ_MB3 0xE4C /* 372 bytes  */
89 #define PRCM_REQ_MB4 0xE48 /* 4 bytes  */
90 #define PRCM_REQ_MB5 0xE44 /* 4 bytes  */
91 
92 /* Ack Mailboxes */
93 #define PRCM_ACK_MB0 0xE08 /* 52 bytes  */
94 #define PRCM_ACK_MB1 0xE04 /* 4 bytes */
95 #define PRCM_ACK_MB2 0xE00 /* 4 bytes */
96 #define PRCM_ACK_MB3 0xDFC /* 4 bytes */
97 #define PRCM_ACK_MB4 0xDF8 /* 4 bytes */
98 #define PRCM_ACK_MB5 0xDF4 /* 4 bytes */
99 
100 /* Mailbox 0 headers */
101 #define MB0H_POWER_STATE_TRANS		0
102 #define MB0H_CONFIG_WAKEUPS_EXE		1
103 #define MB0H_READ_WAKEUP_ACK		3
104 #define MB0H_CONFIG_WAKEUPS_SLEEP	4
105 
106 #define MB0H_WAKEUP_EXE 2
107 #define MB0H_WAKEUP_SLEEP 5
108 
109 /* Mailbox 0 REQs */
110 #define PRCM_REQ_MB0_AP_POWER_STATE	(PRCM_REQ_MB0 + 0x0)
111 #define PRCM_REQ_MB0_AP_PLL_STATE	(PRCM_REQ_MB0 + 0x1)
112 #define PRCM_REQ_MB0_ULP_CLOCK_STATE	(PRCM_REQ_MB0 + 0x2)
113 #define PRCM_REQ_MB0_DO_NOT_WFI		(PRCM_REQ_MB0 + 0x3)
114 #define PRCM_REQ_MB0_WAKEUP_8500	(PRCM_REQ_MB0 + 0x4)
115 #define PRCM_REQ_MB0_WAKEUP_4500	(PRCM_REQ_MB0 + 0x8)
116 
117 /* Mailbox 0 ACKs */
118 #define PRCM_ACK_MB0_AP_PWRSTTR_STATUS	(PRCM_ACK_MB0 + 0x0)
119 #define PRCM_ACK_MB0_READ_POINTER	(PRCM_ACK_MB0 + 0x1)
120 #define PRCM_ACK_MB0_WAKEUP_0_8500	(PRCM_ACK_MB0 + 0x4)
121 #define PRCM_ACK_MB0_WAKEUP_0_4500	(PRCM_ACK_MB0 + 0x8)
122 #define PRCM_ACK_MB0_WAKEUP_1_8500	(PRCM_ACK_MB0 + 0x1C)
123 #define PRCM_ACK_MB0_WAKEUP_1_4500	(PRCM_ACK_MB0 + 0x20)
124 #define PRCM_ACK_MB0_EVENT_4500_NUMBERS	20
125 
126 /* Mailbox 1 headers */
127 #define MB1H_ARM_APE_OPP 0x0
128 #define MB1H_RESET_MODEM 0x2
129 #define MB1H_REQUEST_APE_OPP_100_VOLT 0x3
130 #define MB1H_RELEASE_APE_OPP_100_VOLT 0x4
131 #define MB1H_RELEASE_USB_WAKEUP 0x5
132 
133 /* Mailbox 1 Requests */
134 #define PRCM_REQ_MB1_ARM_OPP			(PRCM_REQ_MB1 + 0x0)
135 #define PRCM_REQ_MB1_APE_OPP			(PRCM_REQ_MB1 + 0x1)
136 #define PRCM_REQ_MB1_APE_OPP_100_RESTORE	(PRCM_REQ_MB1 + 0x4)
137 #define PRCM_REQ_MB1_ARM_OPP_100_RESTORE	(PRCM_REQ_MB1 + 0x8)
138 
139 /* Mailbox 1 ACKs */
140 #define PRCM_ACK_MB1_CURRENT_ARM_OPP	(PRCM_ACK_MB1 + 0x0)
141 #define PRCM_ACK_MB1_CURRENT_APE_OPP	(PRCM_ACK_MB1 + 0x1)
142 #define PRCM_ACK_MB1_APE_VOLTAGE_STATUS	(PRCM_ACK_MB1 + 0x2)
143 #define PRCM_ACK_MB1_DVFS_STATUS	(PRCM_ACK_MB1 + 0x3)
144 
145 /* Mailbox 2 headers */
146 #define MB2H_DPS	0x0
147 #define MB2H_AUTO_PWR	0x1
148 
149 /* Mailbox 2 REQs */
150 #define PRCM_REQ_MB2_SVA_MMDSP		(PRCM_REQ_MB2 + 0x0)
151 #define PRCM_REQ_MB2_SVA_PIPE		(PRCM_REQ_MB2 + 0x1)
152 #define PRCM_REQ_MB2_SIA_MMDSP		(PRCM_REQ_MB2 + 0x2)
153 #define PRCM_REQ_MB2_SIA_PIPE		(PRCM_REQ_MB2 + 0x3)
154 #define PRCM_REQ_MB2_SGA		(PRCM_REQ_MB2 + 0x4)
155 #define PRCM_REQ_MB2_B2R2_MCDE		(PRCM_REQ_MB2 + 0x5)
156 #define PRCM_REQ_MB2_ESRAM12		(PRCM_REQ_MB2 + 0x6)
157 #define PRCM_REQ_MB2_ESRAM34		(PRCM_REQ_MB2 + 0x7)
158 #define PRCM_REQ_MB2_AUTO_PM_SLEEP	(PRCM_REQ_MB2 + 0x8)
159 #define PRCM_REQ_MB2_AUTO_PM_IDLE	(PRCM_REQ_MB2 + 0xC)
160 
161 /* Mailbox 2 ACKs */
162 #define PRCM_ACK_MB2_DPS_STATUS (PRCM_ACK_MB2 + 0x0)
163 #define HWACC_PWR_ST_OK 0xFE
164 
165 /* Mailbox 3 headers */
166 #define MB3H_ANC	0x0
167 #define MB3H_SIDETONE	0x1
168 #define MB3H_SYSCLK	0xE
169 
170 /* Mailbox 3 Requests */
171 #define PRCM_REQ_MB3_ANC_FIR_COEFF	(PRCM_REQ_MB3 + 0x0)
172 #define PRCM_REQ_MB3_ANC_IIR_COEFF	(PRCM_REQ_MB3 + 0x20)
173 #define PRCM_REQ_MB3_ANC_SHIFTER	(PRCM_REQ_MB3 + 0x60)
174 #define PRCM_REQ_MB3_ANC_WARP		(PRCM_REQ_MB3 + 0x64)
175 #define PRCM_REQ_MB3_SIDETONE_FIR_GAIN	(PRCM_REQ_MB3 + 0x68)
176 #define PRCM_REQ_MB3_SIDETONE_FIR_COEFF	(PRCM_REQ_MB3 + 0x6C)
177 #define PRCM_REQ_MB3_SYSCLK_MGT		(PRCM_REQ_MB3 + 0x16C)
178 
179 /* Mailbox 4 headers */
180 #define MB4H_DDR_INIT	0x0
181 #define MB4H_MEM_ST	0x1
182 #define MB4H_HOTDOG	0x12
183 #define MB4H_HOTMON	0x13
184 #define MB4H_HOT_PERIOD	0x14
185 
186 /* Mailbox 4 Requests */
187 #define PRCM_REQ_MB4_DDR_ST_AP_SLEEP_IDLE	(PRCM_REQ_MB4 + 0x0)
188 #define PRCM_REQ_MB4_DDR_ST_AP_DEEP_IDLE	(PRCM_REQ_MB4 + 0x1)
189 #define PRCM_REQ_MB4_ESRAM0_ST			(PRCM_REQ_MB4 + 0x3)
190 #define PRCM_REQ_MB4_HOTDOG_THRESHOLD		(PRCM_REQ_MB4 + 0x0)
191 #define PRCM_REQ_MB4_HOTMON_LOW			(PRCM_REQ_MB4 + 0x0)
192 #define PRCM_REQ_MB4_HOTMON_HIGH		(PRCM_REQ_MB4 + 0x1)
193 #define PRCM_REQ_MB4_HOTMON_CONFIG		(PRCM_REQ_MB4 + 0x2)
194 #define PRCM_REQ_MB4_HOT_PERIOD			(PRCM_REQ_MB4 + 0x0)
195 #define HOTMON_CONFIG_LOW			BIT(0)
196 #define HOTMON_CONFIG_HIGH			BIT(1)
197 
198 /* Mailbox 5 Requests */
199 #define PRCM_REQ_MB5_I2C_SLAVE_OP	(PRCM_REQ_MB5 + 0x0)
200 #define PRCM_REQ_MB5_I2C_HW_BITS	(PRCM_REQ_MB5 + 0x1)
201 #define PRCM_REQ_MB5_I2C_REG		(PRCM_REQ_MB5 + 0x2)
202 #define PRCM_REQ_MB5_I2C_VAL		(PRCM_REQ_MB5 + 0x3)
203 #define PRCMU_I2C_WRITE(slave) \
204 	(((slave) << 1) | (cpu_is_u8500v2() ? BIT(6) : 0))
205 #define PRCMU_I2C_READ(slave) \
206 	(((slave) << 1) | BIT(0) | (cpu_is_u8500v2() ? BIT(6) : 0))
207 #define PRCMU_I2C_STOP_EN		BIT(3)
208 
209 /* Mailbox 5 ACKs */
210 #define PRCM_ACK_MB5_I2C_STATUS	(PRCM_ACK_MB5 + 0x1)
211 #define PRCM_ACK_MB5_I2C_VAL	(PRCM_ACK_MB5 + 0x3)
212 #define I2C_WR_OK 0x1
213 #define I2C_RD_OK 0x2
214 
215 #define NUM_MB 8
216 #define MBOX_BIT BIT
217 #define ALL_MBOX_BITS (MBOX_BIT(NUM_MB) - 1)
218 
219 /*
220  * Wakeups/IRQs
221  */
222 
223 #define WAKEUP_BIT_RTC BIT(0)
224 #define WAKEUP_BIT_RTT0 BIT(1)
225 #define WAKEUP_BIT_RTT1 BIT(2)
226 #define WAKEUP_BIT_HSI0 BIT(3)
227 #define WAKEUP_BIT_HSI1 BIT(4)
228 #define WAKEUP_BIT_CA_WAKE BIT(5)
229 #define WAKEUP_BIT_USB BIT(6)
230 #define WAKEUP_BIT_ABB BIT(7)
231 #define WAKEUP_BIT_ABB_FIFO BIT(8)
232 #define WAKEUP_BIT_SYSCLK_OK BIT(9)
233 #define WAKEUP_BIT_CA_SLEEP BIT(10)
234 #define WAKEUP_BIT_AC_WAKE_ACK BIT(11)
235 #define WAKEUP_BIT_SIDE_TONE_OK BIT(12)
236 #define WAKEUP_BIT_ANC_OK BIT(13)
237 #define WAKEUP_BIT_SW_ERROR BIT(14)
238 #define WAKEUP_BIT_AC_SLEEP_ACK BIT(15)
239 #define WAKEUP_BIT_ARM BIT(17)
240 #define WAKEUP_BIT_HOTMON_LOW BIT(18)
241 #define WAKEUP_BIT_HOTMON_HIGH BIT(19)
242 #define WAKEUP_BIT_MODEM_SW_RESET_REQ BIT(20)
243 #define WAKEUP_BIT_GPIO0 BIT(23)
244 #define WAKEUP_BIT_GPIO1 BIT(24)
245 #define WAKEUP_BIT_GPIO2 BIT(25)
246 #define WAKEUP_BIT_GPIO3 BIT(26)
247 #define WAKEUP_BIT_GPIO4 BIT(27)
248 #define WAKEUP_BIT_GPIO5 BIT(28)
249 #define WAKEUP_BIT_GPIO6 BIT(29)
250 #define WAKEUP_BIT_GPIO7 BIT(30)
251 #define WAKEUP_BIT_GPIO8 BIT(31)
252 
253 /*
254  * This vector maps irq numbers to the bits in the bit field used in
255  * communication with the PRCMU firmware.
256  *
257  * The reason for having this is to keep the irq numbers contiguous even though
258  * the bits in the bit field are not. (The bits also have a tendency to move
259  * around, to further complicate matters.)
260  */
261 #define IRQ_INDEX(_name) ((IRQ_PRCMU_##_name) - IRQ_PRCMU_BASE)
262 #define IRQ_ENTRY(_name)[IRQ_INDEX(_name)] = (WAKEUP_BIT_##_name)
263 static u32 prcmu_irq_bit[NUM_PRCMU_WAKEUPS] = {
264 	IRQ_ENTRY(RTC),
265 	IRQ_ENTRY(RTT0),
266 	IRQ_ENTRY(RTT1),
267 	IRQ_ENTRY(HSI0),
268 	IRQ_ENTRY(HSI1),
269 	IRQ_ENTRY(CA_WAKE),
270 	IRQ_ENTRY(USB),
271 	IRQ_ENTRY(ABB),
272 	IRQ_ENTRY(ABB_FIFO),
273 	IRQ_ENTRY(CA_SLEEP),
274 	IRQ_ENTRY(ARM),
275 	IRQ_ENTRY(HOTMON_LOW),
276 	IRQ_ENTRY(HOTMON_HIGH),
277 	IRQ_ENTRY(MODEM_SW_RESET_REQ),
278 	IRQ_ENTRY(GPIO0),
279 	IRQ_ENTRY(GPIO1),
280 	IRQ_ENTRY(GPIO2),
281 	IRQ_ENTRY(GPIO3),
282 	IRQ_ENTRY(GPIO4),
283 	IRQ_ENTRY(GPIO5),
284 	IRQ_ENTRY(GPIO6),
285 	IRQ_ENTRY(GPIO7),
286 	IRQ_ENTRY(GPIO8)
287 };
288 
289 #define VALID_WAKEUPS (BIT(NUM_PRCMU_WAKEUP_INDICES) - 1)
290 #define WAKEUP_ENTRY(_name)[PRCMU_WAKEUP_INDEX_##_name] = (WAKEUP_BIT_##_name)
291 static u32 prcmu_wakeup_bit[NUM_PRCMU_WAKEUP_INDICES] = {
292 	WAKEUP_ENTRY(RTC),
293 	WAKEUP_ENTRY(RTT0),
294 	WAKEUP_ENTRY(RTT1),
295 	WAKEUP_ENTRY(HSI0),
296 	WAKEUP_ENTRY(HSI1),
297 	WAKEUP_ENTRY(USB),
298 	WAKEUP_ENTRY(ABB),
299 	WAKEUP_ENTRY(ABB_FIFO),
300 	WAKEUP_ENTRY(ARM)
301 };
302 
303 /*
304  * mb0_transfer - state needed for mailbox 0 communication.
305  * @lock:		The transaction lock.
306  * @dbb_events_lock:	A lock used to handle concurrent access to (parts of)
307  *			the request data.
308  * @mask_work:		Work structure used for (un)masking wakeup interrupts.
309  * @req:		Request data that need to persist between requests.
310  */
311 static struct {
312 	spinlock_t lock;
313 	spinlock_t dbb_irqs_lock;
314 	struct work_struct mask_work;
315 	struct mutex ac_wake_lock;
316 	struct completion ac_wake_work;
317 	struct {
318 		u32 dbb_irqs;
319 		u32 dbb_wakeups;
320 		u32 abb_events;
321 	} req;
322 } mb0_transfer;
323 
324 /*
325  * mb1_transfer - state needed for mailbox 1 communication.
326  * @lock:	The transaction lock.
327  * @work:	The transaction completion structure.
328  * @ack:	Reply ("acknowledge") data.
329  */
330 static struct {
331 	struct mutex lock;
332 	struct completion work;
333 	struct {
334 		u8 header;
335 		u8 arm_opp;
336 		u8 ape_opp;
337 		u8 ape_voltage_status;
338 	} ack;
339 } mb1_transfer;
340 
341 /*
342  * mb2_transfer - state needed for mailbox 2 communication.
343  * @lock:            The transaction lock.
344  * @work:            The transaction completion structure.
345  * @auto_pm_lock:    The autonomous power management configuration lock.
346  * @auto_pm_enabled: A flag indicating whether autonomous PM is enabled.
347  * @req:             Request data that need to persist between requests.
348  * @ack:             Reply ("acknowledge") data.
349  */
350 static struct {
351 	struct mutex lock;
352 	struct completion work;
353 	spinlock_t auto_pm_lock;
354 	bool auto_pm_enabled;
355 	struct {
356 		u8 status;
357 	} ack;
358 } mb2_transfer;
359 
360 /*
361  * mb3_transfer - state needed for mailbox 3 communication.
362  * @lock:		The request lock.
363  * @sysclk_lock:	A lock used to handle concurrent sysclk requests.
364  * @sysclk_work:	Work structure used for sysclk requests.
365  */
366 static struct {
367 	spinlock_t lock;
368 	struct mutex sysclk_lock;
369 	struct completion sysclk_work;
370 } mb3_transfer;
371 
372 /*
373  * mb4_transfer - state needed for mailbox 4 communication.
374  * @lock:	The transaction lock.
375  * @work:	The transaction completion structure.
376  */
377 static struct {
378 	struct mutex lock;
379 	struct completion work;
380 } mb4_transfer;
381 
382 /*
383  * mb5_transfer - state needed for mailbox 5 communication.
384  * @lock:	The transaction lock.
385  * @work:	The transaction completion structure.
386  * @ack:	Reply ("acknowledge") data.
387  */
388 static struct {
389 	struct mutex lock;
390 	struct completion work;
391 	struct {
392 		u8 status;
393 		u8 value;
394 	} ack;
395 } mb5_transfer;
396 
397 static atomic_t ac_wake_req_state = ATOMIC_INIT(0);
398 
399 /* Spinlocks */
400 static DEFINE_SPINLOCK(clkout_lock);
401 static DEFINE_SPINLOCK(gpiocr_lock);
402 
403 /* Global var to runtime determine TCDM base for v2 or v1 */
404 static __iomem void *tcdm_base;
405 
406 struct clk_mgt {
407 	unsigned int offset;
408 	u32 pllsw;
409 };
410 
411 static DEFINE_SPINLOCK(clk_mgt_lock);
412 
413 #define CLK_MGT_ENTRY(_name)[PRCMU_##_name] = { (PRCM_##_name##_MGT), 0 }
414 struct clk_mgt clk_mgt[PRCMU_NUM_REG_CLOCKS] = {
415 	CLK_MGT_ENTRY(SGACLK),
416 	CLK_MGT_ENTRY(UARTCLK),
417 	CLK_MGT_ENTRY(MSP02CLK),
418 	CLK_MGT_ENTRY(MSP1CLK),
419 	CLK_MGT_ENTRY(I2CCLK),
420 	CLK_MGT_ENTRY(SDMMCCLK),
421 	CLK_MGT_ENTRY(SLIMCLK),
422 	CLK_MGT_ENTRY(PER1CLK),
423 	CLK_MGT_ENTRY(PER2CLK),
424 	CLK_MGT_ENTRY(PER3CLK),
425 	CLK_MGT_ENTRY(PER5CLK),
426 	CLK_MGT_ENTRY(PER6CLK),
427 	CLK_MGT_ENTRY(PER7CLK),
428 	CLK_MGT_ENTRY(LCDCLK),
429 	CLK_MGT_ENTRY(BMLCLK),
430 	CLK_MGT_ENTRY(HSITXCLK),
431 	CLK_MGT_ENTRY(HSIRXCLK),
432 	CLK_MGT_ENTRY(HDMICLK),
433 	CLK_MGT_ENTRY(APEATCLK),
434 	CLK_MGT_ENTRY(APETRACECLK),
435 	CLK_MGT_ENTRY(MCDECLK),
436 	CLK_MGT_ENTRY(IPI2CCLK),
437 	CLK_MGT_ENTRY(DSIALTCLK),
438 	CLK_MGT_ENTRY(DMACLK),
439 	CLK_MGT_ENTRY(B2R2CLK),
440 	CLK_MGT_ENTRY(TVCLK),
441 	CLK_MGT_ENTRY(SSPCLK),
442 	CLK_MGT_ENTRY(RNGCLK),
443 	CLK_MGT_ENTRY(UICCCLK),
444 };
445 
446 /*
447 * Used by MCDE to setup all necessary PRCMU registers
448 */
449 #define PRCMU_RESET_DSIPLL		0x00004000
450 #define PRCMU_UNCLAMP_DSIPLL		0x00400800
451 
452 #define PRCMU_CLK_PLL_DIV_SHIFT		0
453 #define PRCMU_CLK_PLL_SW_SHIFT		5
454 #define PRCMU_CLK_38			(1 << 9)
455 #define PRCMU_CLK_38_SRC		(1 << 10)
456 #define PRCMU_CLK_38_DIV		(1 << 11)
457 
458 /* PLLDIV=12, PLLSW=4 (PLLDDR) */
459 #define PRCMU_DSI_CLOCK_SETTING		0x0000008C
460 
461 /* PLLDIV=8, PLLSW=4 (PLLDDR) */
462 #define PRCMU_DSI_CLOCK_SETTING_U8400	0x00000088
463 
464 /* DPI 50000000 Hz */
465 #define PRCMU_DPI_CLOCK_SETTING		((1 << PRCMU_CLK_PLL_SW_SHIFT) | \
466 					  (16 << PRCMU_CLK_PLL_DIV_SHIFT))
467 #define PRCMU_DSI_LP_CLOCK_SETTING	0x00000E00
468 
469 /* D=101, N=1, R=4, SELDIV2=0 */
470 #define PRCMU_PLLDSI_FREQ_SETTING	0x00040165
471 
472 /* D=70, N=1, R=3, SELDIV2=0 */
473 #define PRCMU_PLLDSI_FREQ_SETTING_U8400	0x00030146
474 
475 #define PRCMU_ENABLE_PLLDSI		0x00000001
476 #define PRCMU_DISABLE_PLLDSI		0x00000000
477 #define PRCMU_RELEASE_RESET_DSS		0x0000400C
478 #define PRCMU_DSI_PLLOUT_SEL_SETTING	0x00000202
479 /* ESC clk, div0=1, div1=1, div2=3 */
480 #define PRCMU_ENABLE_ESCAPE_CLOCK_DIV	0x07030101
481 #define PRCMU_DISABLE_ESCAPE_CLOCK_DIV	0x00030101
482 #define PRCMU_DSI_RESET_SW		0x00000007
483 
484 #define PRCMU_PLLDSI_LOCKP_LOCKED	0x3
485 
486 static struct {
487 	u8 project_number;
488 	u8 api_version;
489 	u8 func_version;
490 	u8 errata;
491 } prcmu_version;
492 
493 
494 int prcmu_enable_dsipll(void)
495 {
496 	int i;
497 	unsigned int plldsifreq;
498 
499 	/* Clear DSIPLL_RESETN */
500 	writel(PRCMU_RESET_DSIPLL, (_PRCMU_BASE + PRCM_APE_RESETN_CLR));
501 	/* Unclamp DSIPLL in/out */
502 	writel(PRCMU_UNCLAMP_DSIPLL, (_PRCMU_BASE + PRCM_MMIP_LS_CLAMP_CLR));
503 
504 	if (prcmu_is_u8400())
505 		plldsifreq = PRCMU_PLLDSI_FREQ_SETTING_U8400;
506 	else
507 		plldsifreq = PRCMU_PLLDSI_FREQ_SETTING;
508 	/* Set DSI PLL FREQ */
509 	writel(plldsifreq, (_PRCMU_BASE + PRCM_PLLDSI_FREQ));
510 	writel(PRCMU_DSI_PLLOUT_SEL_SETTING,
511 		(_PRCMU_BASE + PRCM_DSI_PLLOUT_SEL));
512 	/* Enable Escape clocks */
513 	writel(PRCMU_ENABLE_ESCAPE_CLOCK_DIV,
514 					(_PRCMU_BASE + PRCM_DSITVCLK_DIV));
515 
516 	/* Start DSI PLL */
517 	writel(PRCMU_ENABLE_PLLDSI, (_PRCMU_BASE + PRCM_PLLDSI_ENABLE));
518 	/* Reset DSI PLL */
519 	writel(PRCMU_DSI_RESET_SW, (_PRCMU_BASE + PRCM_DSI_SW_RESET));
520 	for (i = 0; i < 10; i++) {
521 		if ((readl(_PRCMU_BASE + PRCM_PLLDSI_LOCKP) &
522 			PRCMU_PLLDSI_LOCKP_LOCKED)
523 					== PRCMU_PLLDSI_LOCKP_LOCKED)
524 			break;
525 		udelay(100);
526 	}
527 	/* Set DSIPLL_RESETN */
528 	writel(PRCMU_RESET_DSIPLL, (_PRCMU_BASE + PRCM_APE_RESETN_SET));
529 	return 0;
530 }
531 
532 int prcmu_disable_dsipll(void)
533 {
534 	/* Disable dsi pll */
535 	writel(PRCMU_DISABLE_PLLDSI, (_PRCMU_BASE + PRCM_PLLDSI_ENABLE));
536 	/* Disable  escapeclock */
537 	writel(PRCMU_DISABLE_ESCAPE_CLOCK_DIV,
538 					(_PRCMU_BASE + PRCM_DSITVCLK_DIV));
539 	return 0;
540 }
541 
542 int prcmu_set_display_clocks(void)
543 {
544 	unsigned long flags;
545 	unsigned int dsiclk;
546 
547 	if (prcmu_is_u8400())
548 		dsiclk = PRCMU_DSI_CLOCK_SETTING_U8400;
549 	else
550 		dsiclk = PRCMU_DSI_CLOCK_SETTING;
551 
552 	spin_lock_irqsave(&clk_mgt_lock, flags);
553 
554 	/* Grab the HW semaphore. */
555 	while ((readl(_PRCMU_BASE + PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0)
556 		cpu_relax();
557 
558 	writel(dsiclk, (_PRCMU_BASE + PRCM_HDMICLK_MGT));
559 	writel(PRCMU_DSI_LP_CLOCK_SETTING, (_PRCMU_BASE + PRCM_TVCLK_MGT));
560 	writel(PRCMU_DPI_CLOCK_SETTING, (_PRCMU_BASE + PRCM_LCDCLK_MGT));
561 
562 	/* Release the HW semaphore. */
563 	writel(0, (_PRCMU_BASE + PRCM_SEM));
564 
565 	spin_unlock_irqrestore(&clk_mgt_lock, flags);
566 
567 	return 0;
568 }
569 
570 /**
571  * prcmu_enable_spi2 - Enables pin muxing for SPI2 on OtherAlternateC1.
572  */
573 void prcmu_enable_spi2(void)
574 {
575 	u32 reg;
576 	unsigned long flags;
577 
578 	spin_lock_irqsave(&gpiocr_lock, flags);
579 	reg = readl(_PRCMU_BASE + PRCM_GPIOCR);
580 	writel(reg | PRCM_GPIOCR_SPI2_SELECT, _PRCMU_BASE + PRCM_GPIOCR);
581 	spin_unlock_irqrestore(&gpiocr_lock, flags);
582 }
583 
584 /**
585  * prcmu_disable_spi2 - Disables pin muxing for SPI2 on OtherAlternateC1.
586  */
587 void prcmu_disable_spi2(void)
588 {
589 	u32 reg;
590 	unsigned long flags;
591 
592 	spin_lock_irqsave(&gpiocr_lock, flags);
593 	reg = readl(_PRCMU_BASE + PRCM_GPIOCR);
594 	writel(reg & ~PRCM_GPIOCR_SPI2_SELECT, _PRCMU_BASE + PRCM_GPIOCR);
595 	spin_unlock_irqrestore(&gpiocr_lock, flags);
596 }
597 
598 bool prcmu_has_arm_maxopp(void)
599 {
600 	return (readb(tcdm_base + PRCM_AVS_VARM_MAX_OPP) &
601 		PRCM_AVS_ISMODEENABLE_MASK) == PRCM_AVS_ISMODEENABLE_MASK;
602 }
603 
604 bool prcmu_is_u8400(void)
605 {
606 	return prcmu_version.project_number == PRCMU_PROJECT_ID_8400V2_0;
607 }
608 
609 /**
610  * prcmu_get_boot_status - PRCMU boot status checking
611  * Returns: the current PRCMU boot status
612  */
613 int prcmu_get_boot_status(void)
614 {
615 	return readb(tcdm_base + PRCM_BOOT_STATUS);
616 }
617 
618 /**
619  * prcmu_set_rc_a2p - This function is used to run few power state sequences
620  * @val: Value to be set, i.e. transition requested
621  * Returns: 0 on success, -EINVAL on invalid argument
622  *
623  * This function is used to run the following power state sequences -
624  * any state to ApReset,  ApDeepSleep to ApExecute, ApExecute to ApDeepSleep
625  */
626 int prcmu_set_rc_a2p(enum romcode_write val)
627 {
628 	if (val < RDY_2_DS || val > RDY_2_XP70_RST)
629 		return -EINVAL;
630 	writeb(val, (tcdm_base + PRCM_ROMCODE_A2P));
631 	return 0;
632 }
633 
634 /**
635  * prcmu_get_rc_p2a - This function is used to get power state sequences
636  * Returns: the power transition that has last happened
637  *
638  * This function can return the following transitions-
639  * any state to ApReset,  ApDeepSleep to ApExecute, ApExecute to ApDeepSleep
640  */
641 enum romcode_read prcmu_get_rc_p2a(void)
642 {
643 	return readb(tcdm_base + PRCM_ROMCODE_P2A);
644 }
645 
646 /**
647  * prcmu_get_current_mode - Return the current XP70 power mode
648  * Returns: Returns the current AP(ARM) power mode: init,
649  * apBoot, apExecute, apDeepSleep, apSleep, apIdle, apReset
650  */
651 enum ap_pwrst prcmu_get_xp70_current_state(void)
652 {
653 	return readb(tcdm_base + PRCM_XP70_CUR_PWR_STATE);
654 }
655 
656 /**
657  * prcmu_config_clkout - Configure one of the programmable clock outputs.
658  * @clkout:	The CLKOUT number (0 or 1).
659  * @source:	The clock to be used (one of the PRCMU_CLKSRC_*).
660  * @div:	The divider to be applied.
661  *
662  * Configures one of the programmable clock outputs (CLKOUTs).
663  * @div should be in the range [1,63] to request a configuration, or 0 to
664  * inform that the configuration is no longer requested.
665  */
666 int prcmu_config_clkout(u8 clkout, u8 source, u8 div)
667 {
668 	static int requests[2];
669 	int r = 0;
670 	unsigned long flags;
671 	u32 val;
672 	u32 bits;
673 	u32 mask;
674 	u32 div_mask;
675 
676 	BUG_ON(clkout > 1);
677 	BUG_ON(div > 63);
678 	BUG_ON((clkout == 0) && (source > PRCMU_CLKSRC_CLK009));
679 
680 	if (!div && !requests[clkout])
681 		return -EINVAL;
682 
683 	switch (clkout) {
684 	case 0:
685 		div_mask = PRCM_CLKOCR_CLKODIV0_MASK;
686 		mask = (PRCM_CLKOCR_CLKODIV0_MASK | PRCM_CLKOCR_CLKOSEL0_MASK);
687 		bits = ((source << PRCM_CLKOCR_CLKOSEL0_SHIFT) |
688 			(div << PRCM_CLKOCR_CLKODIV0_SHIFT));
689 		break;
690 	case 1:
691 		div_mask = PRCM_CLKOCR_CLKODIV1_MASK;
692 		mask = (PRCM_CLKOCR_CLKODIV1_MASK | PRCM_CLKOCR_CLKOSEL1_MASK |
693 			PRCM_CLKOCR_CLK1TYPE);
694 		bits = ((source << PRCM_CLKOCR_CLKOSEL1_SHIFT) |
695 			(div << PRCM_CLKOCR_CLKODIV1_SHIFT));
696 		break;
697 	}
698 	bits &= mask;
699 
700 	spin_lock_irqsave(&clkout_lock, flags);
701 
702 	val = readl(_PRCMU_BASE + PRCM_CLKOCR);
703 	if (val & div_mask) {
704 		if (div) {
705 			if ((val & mask) != bits) {
706 				r = -EBUSY;
707 				goto unlock_and_return;
708 			}
709 		} else {
710 			if ((val & mask & ~div_mask) != bits) {
711 				r = -EINVAL;
712 				goto unlock_and_return;
713 			}
714 		}
715 	}
716 	writel((bits | (val & ~mask)), (_PRCMU_BASE + PRCM_CLKOCR));
717 	requests[clkout] += (div ? 1 : -1);
718 
719 unlock_and_return:
720 	spin_unlock_irqrestore(&clkout_lock, flags);
721 
722 	return r;
723 }
724 
725 int prcmu_set_power_state(u8 state, bool keep_ulp_clk, bool keep_ap_pll)
726 {
727 	unsigned long flags;
728 
729 	BUG_ON((state < PRCMU_AP_SLEEP) || (PRCMU_AP_DEEP_IDLE < state));
730 
731 	spin_lock_irqsave(&mb0_transfer.lock, flags);
732 
733 	while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(0))
734 		cpu_relax();
735 
736 	writeb(MB0H_POWER_STATE_TRANS, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB0));
737 	writeb(state, (tcdm_base + PRCM_REQ_MB0_AP_POWER_STATE));
738 	writeb((keep_ap_pll ? 1 : 0), (tcdm_base + PRCM_REQ_MB0_AP_PLL_STATE));
739 	writeb((keep_ulp_clk ? 1 : 0),
740 		(tcdm_base + PRCM_REQ_MB0_ULP_CLOCK_STATE));
741 	writeb(0, (tcdm_base + PRCM_REQ_MB0_DO_NOT_WFI));
742 	writel(MBOX_BIT(0), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
743 
744 	spin_unlock_irqrestore(&mb0_transfer.lock, flags);
745 
746 	return 0;
747 }
748 
749 /* This function should only be called while mb0_transfer.lock is held. */
750 static void config_wakeups(void)
751 {
752 	const u8 header[2] = {
753 		MB0H_CONFIG_WAKEUPS_EXE,
754 		MB0H_CONFIG_WAKEUPS_SLEEP
755 	};
756 	static u32 last_dbb_events;
757 	static u32 last_abb_events;
758 	u32 dbb_events;
759 	u32 abb_events;
760 	unsigned int i;
761 
762 	dbb_events = mb0_transfer.req.dbb_irqs | mb0_transfer.req.dbb_wakeups;
763 	dbb_events |= (WAKEUP_BIT_AC_WAKE_ACK | WAKEUP_BIT_AC_SLEEP_ACK);
764 
765 	abb_events = mb0_transfer.req.abb_events;
766 
767 	if ((dbb_events == last_dbb_events) && (abb_events == last_abb_events))
768 		return;
769 
770 	for (i = 0; i < 2; i++) {
771 		while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(0))
772 			cpu_relax();
773 		writel(dbb_events, (tcdm_base + PRCM_REQ_MB0_WAKEUP_8500));
774 		writel(abb_events, (tcdm_base + PRCM_REQ_MB0_WAKEUP_4500));
775 		writeb(header[i], (tcdm_base + PRCM_MBOX_HEADER_REQ_MB0));
776 		writel(MBOX_BIT(0), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
777 	}
778 	last_dbb_events = dbb_events;
779 	last_abb_events = abb_events;
780 }
781 
782 void prcmu_enable_wakeups(u32 wakeups)
783 {
784 	unsigned long flags;
785 	u32 bits;
786 	int i;
787 
788 	BUG_ON(wakeups != (wakeups & VALID_WAKEUPS));
789 
790 	for (i = 0, bits = 0; i < NUM_PRCMU_WAKEUP_INDICES; i++) {
791 		if (wakeups & BIT(i))
792 			bits |= prcmu_wakeup_bit[i];
793 	}
794 
795 	spin_lock_irqsave(&mb0_transfer.lock, flags);
796 
797 	mb0_transfer.req.dbb_wakeups = bits;
798 	config_wakeups();
799 
800 	spin_unlock_irqrestore(&mb0_transfer.lock, flags);
801 }
802 
803 void prcmu_config_abb_event_readout(u32 abb_events)
804 {
805 	unsigned long flags;
806 
807 	spin_lock_irqsave(&mb0_transfer.lock, flags);
808 
809 	mb0_transfer.req.abb_events = abb_events;
810 	config_wakeups();
811 
812 	spin_unlock_irqrestore(&mb0_transfer.lock, flags);
813 }
814 
815 void prcmu_get_abb_event_buffer(void __iomem **buf)
816 {
817 	if (readb(tcdm_base + PRCM_ACK_MB0_READ_POINTER) & 1)
818 		*buf = (tcdm_base + PRCM_ACK_MB0_WAKEUP_1_4500);
819 	else
820 		*buf = (tcdm_base + PRCM_ACK_MB0_WAKEUP_0_4500);
821 }
822 
823 /**
824  * prcmu_set_arm_opp - set the appropriate ARM OPP
825  * @opp: The new ARM operating point to which transition is to be made
826  * Returns: 0 on success, non-zero on failure
827  *
828  * This function sets the the operating point of the ARM.
829  */
830 int prcmu_set_arm_opp(u8 opp)
831 {
832 	int r;
833 
834 	if (opp < ARM_NO_CHANGE || opp > ARM_EXTCLK)
835 		return -EINVAL;
836 
837 	r = 0;
838 
839 	mutex_lock(&mb1_transfer.lock);
840 
841 	while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(1))
842 		cpu_relax();
843 
844 	writeb(MB1H_ARM_APE_OPP, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1));
845 	writeb(opp, (tcdm_base + PRCM_REQ_MB1_ARM_OPP));
846 	writeb(APE_NO_CHANGE, (tcdm_base + PRCM_REQ_MB1_APE_OPP));
847 
848 	writel(MBOX_BIT(1), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
849 	wait_for_completion(&mb1_transfer.work);
850 
851 	if ((mb1_transfer.ack.header != MB1H_ARM_APE_OPP) ||
852 		(mb1_transfer.ack.arm_opp != opp))
853 		r = -EIO;
854 
855 	mutex_unlock(&mb1_transfer.lock);
856 
857 	return r;
858 }
859 
860 /**
861  * prcmu_get_arm_opp - get the current ARM OPP
862  *
863  * Returns: the current ARM OPP
864  */
865 int prcmu_get_arm_opp(void)
866 {
867 	return readb(tcdm_base + PRCM_ACK_MB1_CURRENT_ARM_OPP);
868 }
869 
870 /**
871  * prcmu_get_ddr_opp - get the current DDR OPP
872  *
873  * Returns: the current DDR OPP
874  */
875 int prcmu_get_ddr_opp(void)
876 {
877 	return readb(_PRCMU_BASE + PRCM_DDR_SUBSYS_APE_MINBW);
878 }
879 
880 /**
881  * set_ddr_opp - set the appropriate DDR OPP
882  * @opp: The new DDR operating point to which transition is to be made
883  * Returns: 0 on success, non-zero on failure
884  *
885  * This function sets the operating point of the DDR.
886  */
887 int prcmu_set_ddr_opp(u8 opp)
888 {
889 	if (opp < DDR_100_OPP || opp > DDR_25_OPP)
890 		return -EINVAL;
891 	/* Changing the DDR OPP can hang the hardware pre-v21 */
892 	if (cpu_is_u8500v20_or_later() && !cpu_is_u8500v20())
893 		writeb(opp, (_PRCMU_BASE + PRCM_DDR_SUBSYS_APE_MINBW));
894 
895 	return 0;
896 }
897 /**
898  * set_ape_opp - set the appropriate APE OPP
899  * @opp: The new APE operating point to which transition is to be made
900  * Returns: 0 on success, non-zero on failure
901  *
902  * This function sets the operating point of the APE.
903  */
904 int prcmu_set_ape_opp(u8 opp)
905 {
906 	int r = 0;
907 
908 	mutex_lock(&mb1_transfer.lock);
909 
910 	while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(1))
911 		cpu_relax();
912 
913 	writeb(MB1H_ARM_APE_OPP, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1));
914 	writeb(ARM_NO_CHANGE, (tcdm_base + PRCM_REQ_MB1_ARM_OPP));
915 	writeb(opp, (tcdm_base + PRCM_REQ_MB1_APE_OPP));
916 
917 	writel(MBOX_BIT(1), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
918 	wait_for_completion(&mb1_transfer.work);
919 
920 	if ((mb1_transfer.ack.header != MB1H_ARM_APE_OPP) ||
921 		(mb1_transfer.ack.ape_opp != opp))
922 		r = -EIO;
923 
924 	mutex_unlock(&mb1_transfer.lock);
925 
926 	return r;
927 }
928 
929 /**
930  * prcmu_get_ape_opp - get the current APE OPP
931  *
932  * Returns: the current APE OPP
933  */
934 int prcmu_get_ape_opp(void)
935 {
936 	return readb(tcdm_base + PRCM_ACK_MB1_CURRENT_APE_OPP);
937 }
938 
939 /**
940  * prcmu_request_ape_opp_100_voltage - Request APE OPP 100% voltage
941  * @enable: true to request the higher voltage, false to drop a request.
942  *
943  * Calls to this function to enable and disable requests must be balanced.
944  */
945 int prcmu_request_ape_opp_100_voltage(bool enable)
946 {
947 	int r = 0;
948 	u8 header;
949 	static unsigned int requests;
950 
951 	mutex_lock(&mb1_transfer.lock);
952 
953 	if (enable) {
954 		if (0 != requests++)
955 			goto unlock_and_return;
956 		header = MB1H_REQUEST_APE_OPP_100_VOLT;
957 	} else {
958 		if (requests == 0) {
959 			r = -EIO;
960 			goto unlock_and_return;
961 		} else if (1 != requests--) {
962 			goto unlock_and_return;
963 		}
964 		header = MB1H_RELEASE_APE_OPP_100_VOLT;
965 	}
966 
967 	while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(1))
968 		cpu_relax();
969 
970 	writeb(header, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1));
971 
972 	writel(MBOX_BIT(1), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
973 	wait_for_completion(&mb1_transfer.work);
974 
975 	if ((mb1_transfer.ack.header != header) ||
976 		((mb1_transfer.ack.ape_voltage_status & BIT(0)) != 0))
977 		r = -EIO;
978 
979 unlock_and_return:
980 	mutex_unlock(&mb1_transfer.lock);
981 
982 	return r;
983 }
984 
985 /**
986  * prcmu_release_usb_wakeup_state - release the state required by a USB wakeup
987  *
988  * This function releases the power state requirements of a USB wakeup.
989  */
990 int prcmu_release_usb_wakeup_state(void)
991 {
992 	int r = 0;
993 
994 	mutex_lock(&mb1_transfer.lock);
995 
996 	while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(1))
997 		cpu_relax();
998 
999 	writeb(MB1H_RELEASE_USB_WAKEUP,
1000 		(tcdm_base + PRCM_MBOX_HEADER_REQ_MB1));
1001 
1002 	writel(MBOX_BIT(1), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
1003 	wait_for_completion(&mb1_transfer.work);
1004 
1005 	if ((mb1_transfer.ack.header != MB1H_RELEASE_USB_WAKEUP) ||
1006 		((mb1_transfer.ack.ape_voltage_status & BIT(0)) != 0))
1007 		r = -EIO;
1008 
1009 	mutex_unlock(&mb1_transfer.lock);
1010 
1011 	return r;
1012 }
1013 
1014 /**
1015  * prcmu_set_epod - set the state of a EPOD (power domain)
1016  * @epod_id: The EPOD to set
1017  * @epod_state: The new EPOD state
1018  *
1019  * This function sets the state of a EPOD (power domain). It may not be called
1020  * from interrupt context.
1021  */
1022 int prcmu_set_epod(u16 epod_id, u8 epod_state)
1023 {
1024 	int r = 0;
1025 	bool ram_retention = false;
1026 	int i;
1027 
1028 	/* check argument */
1029 	BUG_ON(epod_id >= NUM_EPOD_ID);
1030 
1031 	/* set flag if retention is possible */
1032 	switch (epod_id) {
1033 	case EPOD_ID_SVAMMDSP:
1034 	case EPOD_ID_SIAMMDSP:
1035 	case EPOD_ID_ESRAM12:
1036 	case EPOD_ID_ESRAM34:
1037 		ram_retention = true;
1038 		break;
1039 	}
1040 
1041 	/* check argument */
1042 	BUG_ON(epod_state > EPOD_STATE_ON);
1043 	BUG_ON(epod_state == EPOD_STATE_RAMRET && !ram_retention);
1044 
1045 	/* get lock */
1046 	mutex_lock(&mb2_transfer.lock);
1047 
1048 	/* wait for mailbox */
1049 	while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(2))
1050 		cpu_relax();
1051 
1052 	/* fill in mailbox */
1053 	for (i = 0; i < NUM_EPOD_ID; i++)
1054 		writeb(EPOD_STATE_NO_CHANGE, (tcdm_base + PRCM_REQ_MB2 + i));
1055 	writeb(epod_state, (tcdm_base + PRCM_REQ_MB2 + epod_id));
1056 
1057 	writeb(MB2H_DPS, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB2));
1058 
1059 	writel(MBOX_BIT(2), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
1060 
1061 	/*
1062 	 * The current firmware version does not handle errors correctly,
1063 	 * and we cannot recover if there is an error.
1064 	 * This is expected to change when the firmware is updated.
1065 	 */
1066 	if (!wait_for_completion_timeout(&mb2_transfer.work,
1067 			msecs_to_jiffies(20000))) {
1068 		pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n",
1069 			__func__);
1070 		r = -EIO;
1071 		goto unlock_and_return;
1072 	}
1073 
1074 	if (mb2_transfer.ack.status != HWACC_PWR_ST_OK)
1075 		r = -EIO;
1076 
1077 unlock_and_return:
1078 	mutex_unlock(&mb2_transfer.lock);
1079 	return r;
1080 }
1081 
1082 /**
1083  * prcmu_configure_auto_pm - Configure autonomous power management.
1084  * @sleep: Configuration for ApSleep.
1085  * @idle:  Configuration for ApIdle.
1086  */
1087 void prcmu_configure_auto_pm(struct prcmu_auto_pm_config *sleep,
1088 	struct prcmu_auto_pm_config *idle)
1089 {
1090 	u32 sleep_cfg;
1091 	u32 idle_cfg;
1092 	unsigned long flags;
1093 
1094 	BUG_ON((sleep == NULL) || (idle == NULL));
1095 
1096 	sleep_cfg = (sleep->sva_auto_pm_enable & 0xF);
1097 	sleep_cfg = ((sleep_cfg << 4) | (sleep->sia_auto_pm_enable & 0xF));
1098 	sleep_cfg = ((sleep_cfg << 8) | (sleep->sva_power_on & 0xFF));
1099 	sleep_cfg = ((sleep_cfg << 8) | (sleep->sia_power_on & 0xFF));
1100 	sleep_cfg = ((sleep_cfg << 4) | (sleep->sva_policy & 0xF));
1101 	sleep_cfg = ((sleep_cfg << 4) | (sleep->sia_policy & 0xF));
1102 
1103 	idle_cfg = (idle->sva_auto_pm_enable & 0xF);
1104 	idle_cfg = ((idle_cfg << 4) | (idle->sia_auto_pm_enable & 0xF));
1105 	idle_cfg = ((idle_cfg << 8) | (idle->sva_power_on & 0xFF));
1106 	idle_cfg = ((idle_cfg << 8) | (idle->sia_power_on & 0xFF));
1107 	idle_cfg = ((idle_cfg << 4) | (idle->sva_policy & 0xF));
1108 	idle_cfg = ((idle_cfg << 4) | (idle->sia_policy & 0xF));
1109 
1110 	spin_lock_irqsave(&mb2_transfer.auto_pm_lock, flags);
1111 
1112 	/*
1113 	 * The autonomous power management configuration is done through
1114 	 * fields in mailbox 2, but these fields are only used as shared
1115 	 * variables - i.e. there is no need to send a message.
1116 	 */
1117 	writel(sleep_cfg, (tcdm_base + PRCM_REQ_MB2_AUTO_PM_SLEEP));
1118 	writel(idle_cfg, (tcdm_base + PRCM_REQ_MB2_AUTO_PM_IDLE));
1119 
1120 	mb2_transfer.auto_pm_enabled =
1121 		((sleep->sva_auto_pm_enable == PRCMU_AUTO_PM_ON) ||
1122 		 (sleep->sia_auto_pm_enable == PRCMU_AUTO_PM_ON) ||
1123 		 (idle->sva_auto_pm_enable == PRCMU_AUTO_PM_ON) ||
1124 		 (idle->sia_auto_pm_enable == PRCMU_AUTO_PM_ON));
1125 
1126 	spin_unlock_irqrestore(&mb2_transfer.auto_pm_lock, flags);
1127 }
1128 EXPORT_SYMBOL(prcmu_configure_auto_pm);
1129 
1130 bool prcmu_is_auto_pm_enabled(void)
1131 {
1132 	return mb2_transfer.auto_pm_enabled;
1133 }
1134 
1135 static int request_sysclk(bool enable)
1136 {
1137 	int r;
1138 	unsigned long flags;
1139 
1140 	r = 0;
1141 
1142 	mutex_lock(&mb3_transfer.sysclk_lock);
1143 
1144 	spin_lock_irqsave(&mb3_transfer.lock, flags);
1145 
1146 	while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(3))
1147 		cpu_relax();
1148 
1149 	writeb((enable ? ON : OFF), (tcdm_base + PRCM_REQ_MB3_SYSCLK_MGT));
1150 
1151 	writeb(MB3H_SYSCLK, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB3));
1152 	writel(MBOX_BIT(3), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
1153 
1154 	spin_unlock_irqrestore(&mb3_transfer.lock, flags);
1155 
1156 	/*
1157 	 * The firmware only sends an ACK if we want to enable the
1158 	 * SysClk, and it succeeds.
1159 	 */
1160 	if (enable && !wait_for_completion_timeout(&mb3_transfer.sysclk_work,
1161 			msecs_to_jiffies(20000))) {
1162 		pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n",
1163 			__func__);
1164 		r = -EIO;
1165 	}
1166 
1167 	mutex_unlock(&mb3_transfer.sysclk_lock);
1168 
1169 	return r;
1170 }
1171 
1172 static int request_timclk(bool enable)
1173 {
1174 	u32 val = (PRCM_TCR_DOZE_MODE | PRCM_TCR_TENSEL_MASK);
1175 
1176 	if (!enable)
1177 		val |= PRCM_TCR_STOP_TIMERS;
1178 	writel(val, (_PRCMU_BASE + PRCM_TCR));
1179 
1180 	return 0;
1181 }
1182 
1183 static int request_reg_clock(u8 clock, bool enable)
1184 {
1185 	u32 val;
1186 	unsigned long flags;
1187 
1188 	spin_lock_irqsave(&clk_mgt_lock, flags);
1189 
1190 	/* Grab the HW semaphore. */
1191 	while ((readl(_PRCMU_BASE + PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0)
1192 		cpu_relax();
1193 
1194 	val = readl(_PRCMU_BASE + clk_mgt[clock].offset);
1195 	if (enable) {
1196 		val |= (PRCM_CLK_MGT_CLKEN | clk_mgt[clock].pllsw);
1197 	} else {
1198 		clk_mgt[clock].pllsw = (val & PRCM_CLK_MGT_CLKPLLSW_MASK);
1199 		val &= ~(PRCM_CLK_MGT_CLKEN | PRCM_CLK_MGT_CLKPLLSW_MASK);
1200 	}
1201 	writel(val, (_PRCMU_BASE + clk_mgt[clock].offset));
1202 
1203 	/* Release the HW semaphore. */
1204 	writel(0, (_PRCMU_BASE + PRCM_SEM));
1205 
1206 	spin_unlock_irqrestore(&clk_mgt_lock, flags);
1207 
1208 	return 0;
1209 }
1210 
1211 /**
1212  * prcmu_request_clock() - Request for a clock to be enabled or disabled.
1213  * @clock:      The clock for which the request is made.
1214  * @enable:     Whether the clock should be enabled (true) or disabled (false).
1215  *
1216  * This function should only be used by the clock implementation.
1217  * Do not use it from any other place!
1218  */
1219 int prcmu_request_clock(u8 clock, bool enable)
1220 {
1221 	if (clock < PRCMU_NUM_REG_CLOCKS)
1222 		return request_reg_clock(clock, enable);
1223 	else if (clock == PRCMU_TIMCLK)
1224 		return request_timclk(enable);
1225 	else if (clock == PRCMU_SYSCLK)
1226 		return request_sysclk(enable);
1227 	else
1228 		return -EINVAL;
1229 }
1230 
1231 int prcmu_config_esram0_deep_sleep(u8 state)
1232 {
1233 	if ((state > ESRAM0_DEEP_SLEEP_STATE_RET) ||
1234 	    (state < ESRAM0_DEEP_SLEEP_STATE_OFF))
1235 		return -EINVAL;
1236 
1237 	mutex_lock(&mb4_transfer.lock);
1238 
1239 	while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(4))
1240 		cpu_relax();
1241 
1242 	writeb(MB4H_MEM_ST, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB4));
1243 	writeb(((DDR_PWR_STATE_OFFHIGHLAT << 4) | DDR_PWR_STATE_ON),
1244 	       (tcdm_base + PRCM_REQ_MB4_DDR_ST_AP_SLEEP_IDLE));
1245 	writeb(DDR_PWR_STATE_ON,
1246 	       (tcdm_base + PRCM_REQ_MB4_DDR_ST_AP_DEEP_IDLE));
1247 	writeb(state, (tcdm_base + PRCM_REQ_MB4_ESRAM0_ST));
1248 
1249 	writel(MBOX_BIT(4), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
1250 	wait_for_completion(&mb4_transfer.work);
1251 
1252 	mutex_unlock(&mb4_transfer.lock);
1253 
1254 	return 0;
1255 }
1256 
1257 int prcmu_config_hotdog(u8 threshold)
1258 {
1259 	mutex_lock(&mb4_transfer.lock);
1260 
1261 	while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(4))
1262 		cpu_relax();
1263 
1264 	writeb(threshold, (tcdm_base + PRCM_REQ_MB4_HOTDOG_THRESHOLD));
1265 	writeb(MB4H_HOTDOG, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB4));
1266 
1267 	writel(MBOX_BIT(4), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
1268 	wait_for_completion(&mb4_transfer.work);
1269 
1270 	mutex_unlock(&mb4_transfer.lock);
1271 
1272 	return 0;
1273 }
1274 
1275 int prcmu_config_hotmon(u8 low, u8 high)
1276 {
1277 	mutex_lock(&mb4_transfer.lock);
1278 
1279 	while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(4))
1280 		cpu_relax();
1281 
1282 	writeb(low, (tcdm_base + PRCM_REQ_MB4_HOTMON_LOW));
1283 	writeb(high, (tcdm_base + PRCM_REQ_MB4_HOTMON_HIGH));
1284 	writeb((HOTMON_CONFIG_LOW | HOTMON_CONFIG_HIGH),
1285 		(tcdm_base + PRCM_REQ_MB4_HOTMON_CONFIG));
1286 	writeb(MB4H_HOTMON, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB4));
1287 
1288 	writel(MBOX_BIT(4), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
1289 	wait_for_completion(&mb4_transfer.work);
1290 
1291 	mutex_unlock(&mb4_transfer.lock);
1292 
1293 	return 0;
1294 }
1295 
1296 static int config_hot_period(u16 val)
1297 {
1298 	mutex_lock(&mb4_transfer.lock);
1299 
1300 	while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(4))
1301 		cpu_relax();
1302 
1303 	writew(val, (tcdm_base + PRCM_REQ_MB4_HOT_PERIOD));
1304 	writeb(MB4H_HOT_PERIOD, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB4));
1305 
1306 	writel(MBOX_BIT(4), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
1307 	wait_for_completion(&mb4_transfer.work);
1308 
1309 	mutex_unlock(&mb4_transfer.lock);
1310 
1311 	return 0;
1312 }
1313 
1314 int prcmu_start_temp_sense(u16 cycles32k)
1315 {
1316 	if (cycles32k == 0xFFFF)
1317 		return -EINVAL;
1318 
1319 	return config_hot_period(cycles32k);
1320 }
1321 
1322 int prcmu_stop_temp_sense(void)
1323 {
1324 	return config_hot_period(0xFFFF);
1325 }
1326 
1327 /**
1328  * prcmu_set_clock_divider() - Configure the clock divider.
1329  * @clock:	The clock for which the request is made.
1330  * @divider:	The clock divider. (< 32)
1331  *
1332  * This function should only be used by the clock implementation.
1333  * Do not use it from any other place!
1334  */
1335 int prcmu_set_clock_divider(u8 clock, u8 divider)
1336 {
1337 	u32 val;
1338 	unsigned long flags;
1339 
1340 	if ((clock >= PRCMU_NUM_REG_CLOCKS) || (divider < 1) || (31 < divider))
1341 		return -EINVAL;
1342 
1343 	spin_lock_irqsave(&clk_mgt_lock, flags);
1344 
1345 	/* Grab the HW semaphore. */
1346 	while ((readl(_PRCMU_BASE + PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0)
1347 		cpu_relax();
1348 
1349 	val = readl(_PRCMU_BASE + clk_mgt[clock].offset);
1350 	val &= ~(PRCM_CLK_MGT_CLKPLLDIV_MASK);
1351 	val |= (u32)divider;
1352 	writel(val, (_PRCMU_BASE + clk_mgt[clock].offset));
1353 
1354 	/* Release the HW semaphore. */
1355 	writel(0, (_PRCMU_BASE + PRCM_SEM));
1356 
1357 	spin_unlock_irqrestore(&clk_mgt_lock, flags);
1358 
1359 	return 0;
1360 }
1361 
1362 /**
1363  * prcmu_abb_read() - Read register value(s) from the ABB.
1364  * @slave:	The I2C slave address.
1365  * @reg:	The (start) register address.
1366  * @value:	The read out value(s).
1367  * @size:	The number of registers to read.
1368  *
1369  * Reads register value(s) from the ABB.
1370  * @size has to be 1 for the current firmware version.
1371  */
1372 int prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size)
1373 {
1374 	int r;
1375 
1376 	if (size != 1)
1377 		return -EINVAL;
1378 
1379 	mutex_lock(&mb5_transfer.lock);
1380 
1381 	while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(5))
1382 		cpu_relax();
1383 
1384 	writeb(PRCMU_I2C_READ(slave), (tcdm_base + PRCM_REQ_MB5_I2C_SLAVE_OP));
1385 	writeb(PRCMU_I2C_STOP_EN, (tcdm_base + PRCM_REQ_MB5_I2C_HW_BITS));
1386 	writeb(reg, (tcdm_base + PRCM_REQ_MB5_I2C_REG));
1387 	writeb(0, (tcdm_base + PRCM_REQ_MB5_I2C_VAL));
1388 
1389 	writel(MBOX_BIT(5), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
1390 
1391 	if (!wait_for_completion_timeout(&mb5_transfer.work,
1392 				msecs_to_jiffies(20000))) {
1393 		pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n",
1394 			__func__);
1395 		r = -EIO;
1396 	} else {
1397 		r = ((mb5_transfer.ack.status == I2C_RD_OK) ? 0 : -EIO);
1398 	}
1399 
1400 	if (!r)
1401 		*value = mb5_transfer.ack.value;
1402 
1403 	mutex_unlock(&mb5_transfer.lock);
1404 
1405 	return r;
1406 }
1407 
1408 /**
1409  * prcmu_abb_write() - Write register value(s) to the ABB.
1410  * @slave:	The I2C slave address.
1411  * @reg:	The (start) register address.
1412  * @value:	The value(s) to write.
1413  * @size:	The number of registers to write.
1414  *
1415  * Reads register value(s) from the ABB.
1416  * @size has to be 1 for the current firmware version.
1417  */
1418 int prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size)
1419 {
1420 	int r;
1421 
1422 	if (size != 1)
1423 		return -EINVAL;
1424 
1425 	mutex_lock(&mb5_transfer.lock);
1426 
1427 	while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(5))
1428 		cpu_relax();
1429 
1430 	writeb(PRCMU_I2C_WRITE(slave), (tcdm_base + PRCM_REQ_MB5_I2C_SLAVE_OP));
1431 	writeb(PRCMU_I2C_STOP_EN, (tcdm_base + PRCM_REQ_MB5_I2C_HW_BITS));
1432 	writeb(reg, (tcdm_base + PRCM_REQ_MB5_I2C_REG));
1433 	writeb(*value, (tcdm_base + PRCM_REQ_MB5_I2C_VAL));
1434 
1435 	writel(MBOX_BIT(5), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
1436 
1437 	if (!wait_for_completion_timeout(&mb5_transfer.work,
1438 				msecs_to_jiffies(20000))) {
1439 		pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n",
1440 			__func__);
1441 		r = -EIO;
1442 	} else {
1443 		r = ((mb5_transfer.ack.status == I2C_WR_OK) ? 0 : -EIO);
1444 	}
1445 
1446 	mutex_unlock(&mb5_transfer.lock);
1447 
1448 	return r;
1449 }
1450 
1451 /**
1452  * prcmu_ac_wake_req - should be called whenever ARM wants to wakeup Modem
1453  */
1454 void prcmu_ac_wake_req(void)
1455 {
1456 	u32 val;
1457 
1458 	mutex_lock(&mb0_transfer.ac_wake_lock);
1459 
1460 	val = readl(_PRCMU_BASE + PRCM_HOSTACCESS_REQ);
1461 	if (val & PRCM_HOSTACCESS_REQ_HOSTACCESS_REQ)
1462 		goto unlock_and_return;
1463 
1464 	atomic_set(&ac_wake_req_state, 1);
1465 
1466 	writel((val | PRCM_HOSTACCESS_REQ_HOSTACCESS_REQ),
1467 		(_PRCMU_BASE + PRCM_HOSTACCESS_REQ));
1468 
1469 	if (!wait_for_completion_timeout(&mb0_transfer.ac_wake_work,
1470 			msecs_to_jiffies(20000))) {
1471 		pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n",
1472 			__func__);
1473 	}
1474 
1475 unlock_and_return:
1476 	mutex_unlock(&mb0_transfer.ac_wake_lock);
1477 }
1478 
1479 /**
1480  * prcmu_ac_sleep_req - called when ARM no longer needs to talk to modem
1481  */
1482 void prcmu_ac_sleep_req()
1483 {
1484 	u32 val;
1485 
1486 	mutex_lock(&mb0_transfer.ac_wake_lock);
1487 
1488 	val = readl(_PRCMU_BASE + PRCM_HOSTACCESS_REQ);
1489 	if (!(val & PRCM_HOSTACCESS_REQ_HOSTACCESS_REQ))
1490 		goto unlock_and_return;
1491 
1492 	writel((val & ~PRCM_HOSTACCESS_REQ_HOSTACCESS_REQ),
1493 		(_PRCMU_BASE + PRCM_HOSTACCESS_REQ));
1494 
1495 	if (!wait_for_completion_timeout(&mb0_transfer.ac_wake_work,
1496 			msecs_to_jiffies(20000))) {
1497 		pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n",
1498 			__func__);
1499 	}
1500 
1501 	atomic_set(&ac_wake_req_state, 0);
1502 
1503 unlock_and_return:
1504 	mutex_unlock(&mb0_transfer.ac_wake_lock);
1505 }
1506 
1507 bool prcmu_is_ac_wake_requested(void)
1508 {
1509 	return (atomic_read(&ac_wake_req_state) != 0);
1510 }
1511 
1512 /**
1513  * prcmu_system_reset - System reset
1514  *
1515  * Saves the reset reason code and then sets the APE_SOFRST register which
1516  * fires interrupt to fw
1517  */
1518 void prcmu_system_reset(u16 reset_code)
1519 {
1520 	writew(reset_code, (tcdm_base + PRCM_SW_RST_REASON));
1521 	writel(1, (_PRCMU_BASE + PRCM_APE_SOFTRST));
1522 }
1523 
1524 /**
1525  * prcmu_reset_modem - ask the PRCMU to reset modem
1526  */
1527 void prcmu_modem_reset(void)
1528 {
1529 	mutex_lock(&mb1_transfer.lock);
1530 
1531 	while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(1))
1532 		cpu_relax();
1533 
1534 	writeb(MB1H_RESET_MODEM, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1));
1535 	writel(MBOX_BIT(1), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
1536 	wait_for_completion(&mb1_transfer.work);
1537 
1538 	/*
1539 	 * No need to check return from PRCMU as modem should go in reset state
1540 	 * This state is already managed by upper layer
1541 	 */
1542 
1543 	mutex_unlock(&mb1_transfer.lock);
1544 }
1545 
1546 static void ack_dbb_wakeup(void)
1547 {
1548 	unsigned long flags;
1549 
1550 	spin_lock_irqsave(&mb0_transfer.lock, flags);
1551 
1552 	while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(0))
1553 		cpu_relax();
1554 
1555 	writeb(MB0H_READ_WAKEUP_ACK, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB0));
1556 	writel(MBOX_BIT(0), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
1557 
1558 	spin_unlock_irqrestore(&mb0_transfer.lock, flags);
1559 }
1560 
1561 static inline void print_unknown_header_warning(u8 n, u8 header)
1562 {
1563 	pr_warning("prcmu: Unknown message header (%d) in mailbox %d.\n",
1564 		header, n);
1565 }
1566 
1567 static bool read_mailbox_0(void)
1568 {
1569 	bool r;
1570 	u32 ev;
1571 	unsigned int n;
1572 	u8 header;
1573 
1574 	header = readb(tcdm_base + PRCM_MBOX_HEADER_ACK_MB0);
1575 	switch (header) {
1576 	case MB0H_WAKEUP_EXE:
1577 	case MB0H_WAKEUP_SLEEP:
1578 		if (readb(tcdm_base + PRCM_ACK_MB0_READ_POINTER) & 1)
1579 			ev = readl(tcdm_base + PRCM_ACK_MB0_WAKEUP_1_8500);
1580 		else
1581 			ev = readl(tcdm_base + PRCM_ACK_MB0_WAKEUP_0_8500);
1582 
1583 		if (ev & (WAKEUP_BIT_AC_WAKE_ACK | WAKEUP_BIT_AC_SLEEP_ACK))
1584 			complete(&mb0_transfer.ac_wake_work);
1585 		if (ev & WAKEUP_BIT_SYSCLK_OK)
1586 			complete(&mb3_transfer.sysclk_work);
1587 
1588 		ev &= mb0_transfer.req.dbb_irqs;
1589 
1590 		for (n = 0; n < NUM_PRCMU_WAKEUPS; n++) {
1591 			if (ev & prcmu_irq_bit[n])
1592 				generic_handle_irq(IRQ_PRCMU_BASE + n);
1593 		}
1594 		r = true;
1595 		break;
1596 	default:
1597 		print_unknown_header_warning(0, header);
1598 		r = false;
1599 		break;
1600 	}
1601 	writel(MBOX_BIT(0), (_PRCMU_BASE + PRCM_ARM_IT1_CLR));
1602 	return r;
1603 }
1604 
1605 static bool read_mailbox_1(void)
1606 {
1607 	mb1_transfer.ack.header = readb(tcdm_base + PRCM_MBOX_HEADER_REQ_MB1);
1608 	mb1_transfer.ack.arm_opp = readb(tcdm_base +
1609 		PRCM_ACK_MB1_CURRENT_ARM_OPP);
1610 	mb1_transfer.ack.ape_opp = readb(tcdm_base +
1611 		PRCM_ACK_MB1_CURRENT_APE_OPP);
1612 	mb1_transfer.ack.ape_voltage_status = readb(tcdm_base +
1613 		PRCM_ACK_MB1_APE_VOLTAGE_STATUS);
1614 	writel(MBOX_BIT(1), (_PRCMU_BASE + PRCM_ARM_IT1_CLR));
1615 	complete(&mb1_transfer.work);
1616 	return false;
1617 }
1618 
1619 static bool read_mailbox_2(void)
1620 {
1621 	mb2_transfer.ack.status = readb(tcdm_base + PRCM_ACK_MB2_DPS_STATUS);
1622 	writel(MBOX_BIT(2), (_PRCMU_BASE + PRCM_ARM_IT1_CLR));
1623 	complete(&mb2_transfer.work);
1624 	return false;
1625 }
1626 
1627 static bool read_mailbox_3(void)
1628 {
1629 	writel(MBOX_BIT(3), (_PRCMU_BASE + PRCM_ARM_IT1_CLR));
1630 	return false;
1631 }
1632 
1633 static bool read_mailbox_4(void)
1634 {
1635 	u8 header;
1636 	bool do_complete = true;
1637 
1638 	header = readb(tcdm_base + PRCM_MBOX_HEADER_REQ_MB4);
1639 	switch (header) {
1640 	case MB4H_MEM_ST:
1641 	case MB4H_HOTDOG:
1642 	case MB4H_HOTMON:
1643 	case MB4H_HOT_PERIOD:
1644 		break;
1645 	default:
1646 		print_unknown_header_warning(4, header);
1647 		do_complete = false;
1648 		break;
1649 	}
1650 
1651 	writel(MBOX_BIT(4), (_PRCMU_BASE + PRCM_ARM_IT1_CLR));
1652 
1653 	if (do_complete)
1654 		complete(&mb4_transfer.work);
1655 
1656 	return false;
1657 }
1658 
1659 static bool read_mailbox_5(void)
1660 {
1661 	mb5_transfer.ack.status = readb(tcdm_base + PRCM_ACK_MB5_I2C_STATUS);
1662 	mb5_transfer.ack.value = readb(tcdm_base + PRCM_ACK_MB5_I2C_VAL);
1663 	writel(MBOX_BIT(5), (_PRCMU_BASE + PRCM_ARM_IT1_CLR));
1664 	complete(&mb5_transfer.work);
1665 	return false;
1666 }
1667 
1668 static bool read_mailbox_6(void)
1669 {
1670 	writel(MBOX_BIT(6), (_PRCMU_BASE + PRCM_ARM_IT1_CLR));
1671 	return false;
1672 }
1673 
1674 static bool read_mailbox_7(void)
1675 {
1676 	writel(MBOX_BIT(7), (_PRCMU_BASE + PRCM_ARM_IT1_CLR));
1677 	return false;
1678 }
1679 
1680 static bool (* const read_mailbox[NUM_MB])(void) = {
1681 	read_mailbox_0,
1682 	read_mailbox_1,
1683 	read_mailbox_2,
1684 	read_mailbox_3,
1685 	read_mailbox_4,
1686 	read_mailbox_5,
1687 	read_mailbox_6,
1688 	read_mailbox_7
1689 };
1690 
1691 static irqreturn_t prcmu_irq_handler(int irq, void *data)
1692 {
1693 	u32 bits;
1694 	u8 n;
1695 	irqreturn_t r;
1696 
1697 	bits = (readl(_PRCMU_BASE + PRCM_ARM_IT1_VAL) & ALL_MBOX_BITS);
1698 	if (unlikely(!bits))
1699 		return IRQ_NONE;
1700 
1701 	r = IRQ_HANDLED;
1702 	for (n = 0; bits; n++) {
1703 		if (bits & MBOX_BIT(n)) {
1704 			bits -= MBOX_BIT(n);
1705 			if (read_mailbox[n]())
1706 				r = IRQ_WAKE_THREAD;
1707 		}
1708 	}
1709 	return r;
1710 }
1711 
1712 static irqreturn_t prcmu_irq_thread_fn(int irq, void *data)
1713 {
1714 	ack_dbb_wakeup();
1715 	return IRQ_HANDLED;
1716 }
1717 
1718 static void prcmu_mask_work(struct work_struct *work)
1719 {
1720 	unsigned long flags;
1721 
1722 	spin_lock_irqsave(&mb0_transfer.lock, flags);
1723 
1724 	config_wakeups();
1725 
1726 	spin_unlock_irqrestore(&mb0_transfer.lock, flags);
1727 }
1728 
1729 static void prcmu_irq_mask(struct irq_data *d)
1730 {
1731 	unsigned long flags;
1732 
1733 	spin_lock_irqsave(&mb0_transfer.dbb_irqs_lock, flags);
1734 
1735 	mb0_transfer.req.dbb_irqs &= ~prcmu_irq_bit[d->irq - IRQ_PRCMU_BASE];
1736 
1737 	spin_unlock_irqrestore(&mb0_transfer.dbb_irqs_lock, flags);
1738 
1739 	if (d->irq != IRQ_PRCMU_CA_SLEEP)
1740 		schedule_work(&mb0_transfer.mask_work);
1741 }
1742 
1743 static void prcmu_irq_unmask(struct irq_data *d)
1744 {
1745 	unsigned long flags;
1746 
1747 	spin_lock_irqsave(&mb0_transfer.dbb_irqs_lock, flags);
1748 
1749 	mb0_transfer.req.dbb_irqs |= prcmu_irq_bit[d->irq - IRQ_PRCMU_BASE];
1750 
1751 	spin_unlock_irqrestore(&mb0_transfer.dbb_irqs_lock, flags);
1752 
1753 	if (d->irq != IRQ_PRCMU_CA_SLEEP)
1754 		schedule_work(&mb0_transfer.mask_work);
1755 }
1756 
1757 static void noop(struct irq_data *d)
1758 {
1759 }
1760 
1761 static struct irq_chip prcmu_irq_chip = {
1762 	.name		= "prcmu",
1763 	.irq_disable	= prcmu_irq_mask,
1764 	.irq_ack	= noop,
1765 	.irq_mask	= prcmu_irq_mask,
1766 	.irq_unmask	= prcmu_irq_unmask,
1767 };
1768 
1769 void __init prcmu_early_init(void)
1770 {
1771 	unsigned int i;
1772 
1773 	if (cpu_is_u8500v1()) {
1774 		tcdm_base = __io_address(U8500_PRCMU_TCDM_BASE_V1);
1775 	} else if (cpu_is_u8500v2()) {
1776 		void *tcpm_base = ioremap_nocache(U8500_PRCMU_TCPM_BASE, SZ_4K);
1777 
1778 		if (tcpm_base != NULL) {
1779 			int version;
1780 			version = readl(tcpm_base + PRCMU_FW_VERSION_OFFSET);
1781 			prcmu_version.project_number = version & 0xFF;
1782 			prcmu_version.api_version = (version >> 8) & 0xFF;
1783 			prcmu_version.func_version = (version >> 16) & 0xFF;
1784 			prcmu_version.errata = (version >> 24) & 0xFF;
1785 			pr_info("PRCMU firmware version %d.%d.%d\n",
1786 				(version >> 8) & 0xFF, (version >> 16) & 0xFF,
1787 				(version >> 24) & 0xFF);
1788 			iounmap(tcpm_base);
1789 		}
1790 
1791 		tcdm_base = __io_address(U8500_PRCMU_TCDM_BASE);
1792 	} else {
1793 		pr_err("prcmu: Unsupported chip version\n");
1794 		BUG();
1795 	}
1796 
1797 	spin_lock_init(&mb0_transfer.lock);
1798 	spin_lock_init(&mb0_transfer.dbb_irqs_lock);
1799 	mutex_init(&mb0_transfer.ac_wake_lock);
1800 	init_completion(&mb0_transfer.ac_wake_work);
1801 	mutex_init(&mb1_transfer.lock);
1802 	init_completion(&mb1_transfer.work);
1803 	mutex_init(&mb2_transfer.lock);
1804 	init_completion(&mb2_transfer.work);
1805 	spin_lock_init(&mb2_transfer.auto_pm_lock);
1806 	spin_lock_init(&mb3_transfer.lock);
1807 	mutex_init(&mb3_transfer.sysclk_lock);
1808 	init_completion(&mb3_transfer.sysclk_work);
1809 	mutex_init(&mb4_transfer.lock);
1810 	init_completion(&mb4_transfer.work);
1811 	mutex_init(&mb5_transfer.lock);
1812 	init_completion(&mb5_transfer.work);
1813 
1814 	INIT_WORK(&mb0_transfer.mask_work, prcmu_mask_work);
1815 
1816 	/* Initalize irqs. */
1817 	for (i = 0; i < NUM_PRCMU_WAKEUPS; i++) {
1818 		unsigned int irq;
1819 
1820 		irq = IRQ_PRCMU_BASE + i;
1821 		irq_set_chip_and_handler(irq, &prcmu_irq_chip,
1822 					 handle_simple_irq);
1823 		set_irq_flags(irq, IRQF_VALID);
1824 	}
1825 }
1826 
1827 static struct mfd_cell db8500_prcmu_devs[] = {
1828 	{
1829 		.name = "db8500-prcmu-regulators",
1830 	},
1831 	{
1832 		.name = "cpufreq-u8500",
1833 	},
1834 };
1835 
1836 /**
1837  * prcmu_fw_init - arch init call for the Linux PRCMU fw init logic
1838  *
1839  */
1840 static int __init db8500_prcmu_probe(struct platform_device *pdev)
1841 {
1842 	int err = 0;
1843 
1844 	if (ux500_is_svp())
1845 		return -ENODEV;
1846 
1847 	/* Clean up the mailbox interrupts after pre-kernel code. */
1848 	writel(ALL_MBOX_BITS, (_PRCMU_BASE + PRCM_ARM_IT1_CLR));
1849 
1850 	err = request_threaded_irq(IRQ_DB8500_PRCMU1, prcmu_irq_handler,
1851 		prcmu_irq_thread_fn, IRQF_NO_SUSPEND, "prcmu", NULL);
1852 	if (err < 0) {
1853 		pr_err("prcmu: Failed to allocate IRQ_DB8500_PRCMU1.\n");
1854 		err = -EBUSY;
1855 		goto no_irq_return;
1856 	}
1857 
1858 	if (cpu_is_u8500v20_or_later())
1859 		prcmu_config_esram0_deep_sleep(ESRAM0_DEEP_SLEEP_STATE_RET);
1860 
1861 	err = mfd_add_devices(&pdev->dev, 0, db8500_prcmu_devs,
1862 			      ARRAY_SIZE(db8500_prcmu_devs), NULL,
1863 			      0);
1864 
1865 	if (err)
1866 		pr_err("prcmu: Failed to add subdevices\n");
1867 	else
1868 		pr_info("DB8500 PRCMU initialized\n");
1869 
1870 no_irq_return:
1871 	return err;
1872 }
1873 
1874 static struct platform_driver db8500_prcmu_driver = {
1875 	.driver = {
1876 		.name = "db8500-prcmu",
1877 		.owner = THIS_MODULE,
1878 	},
1879 };
1880 
1881 static int __init db8500_prcmu_init(void)
1882 {
1883 	return platform_driver_probe(&db8500_prcmu_driver, db8500_prcmu_probe);
1884 }
1885 
1886 arch_initcall(db8500_prcmu_init);
1887 
1888 MODULE_AUTHOR("Mattias Nilsson <mattias.i.nilsson@stericsson.com>");
1889 MODULE_DESCRIPTION("DB8500 PRCM Unit driver");
1890 MODULE_LICENSE("GPL v2");
1891