xref: /openbmc/linux/drivers/iio/adc/xilinx-ams.c (revision 8e74a48d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Xilinx AMS driver
4  *
5  *  Copyright (C) 2021 Xilinx, Inc.
6  *
7  *  Manish Narani <mnarani@xilinx.com>
8  *  Rajnikant Bhojani <rajnikant.bhojani@xilinx.com>
9  */
10 
11 #include <linux/bits.h>
12 #include <linux/bitfield.h>
13 #include <linux/clk.h>
14 #include <linux/delay.h>
15 #include <linux/devm-helpers.h>
16 #include <linux/interrupt.h>
17 #include <linux/io.h>
18 #include <linux/iopoll.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/mod_devicetable.h>
22 #include <linux/overflow.h>
23 #include <linux/platform_device.h>
24 #include <linux/property.h>
25 #include <linux/slab.h>
26 
27 #include <linux/iio/events.h>
28 #include <linux/iio/iio.h>
29 
30 /* AMS registers definitions */
31 #define AMS_ISR_0			0x010
32 #define AMS_ISR_1			0x014
33 #define AMS_IER_0			0x020
34 #define AMS_IER_1			0x024
35 #define AMS_IDR_0			0x028
36 #define AMS_IDR_1			0x02C
37 #define AMS_PS_CSTS			0x040
38 #define AMS_PL_CSTS			0x044
39 
40 #define AMS_VCC_PSPLL0			0x060
41 #define AMS_VCC_PSPLL3			0x06C
42 #define AMS_VCCINT			0x078
43 #define AMS_VCCBRAM			0x07C
44 #define AMS_VCCAUX			0x080
45 #define AMS_PSDDRPLL			0x084
46 #define AMS_PSINTFPDDR			0x09C
47 
48 #define AMS_VCC_PSPLL0_CH		48
49 #define AMS_VCC_PSPLL3_CH		51
50 #define AMS_VCCINT_CH			54
51 #define AMS_VCCBRAM_CH			55
52 #define AMS_VCCAUX_CH			56
53 #define AMS_PSDDRPLL_CH			57
54 #define AMS_PSINTFPDDR_CH		63
55 
56 #define AMS_REG_CONFIG0			0x100
57 #define AMS_REG_CONFIG1			0x104
58 #define AMS_REG_CONFIG3			0x10C
59 #define AMS_REG_CONFIG4			0x110
60 #define AMS_REG_SEQ_CH0			0x120
61 #define AMS_REG_SEQ_CH1			0x124
62 #define AMS_REG_SEQ_CH2			0x118
63 
64 #define AMS_VUSER0_MASK			BIT(0)
65 #define AMS_VUSER1_MASK			BIT(1)
66 #define AMS_VUSER2_MASK			BIT(2)
67 #define AMS_VUSER3_MASK			BIT(3)
68 
69 #define AMS_TEMP			0x000
70 #define AMS_SUPPLY1			0x004
71 #define AMS_SUPPLY2			0x008
72 #define AMS_VP_VN			0x00C
73 #define AMS_VREFP			0x010
74 #define AMS_VREFN			0x014
75 #define AMS_SUPPLY3			0x018
76 #define AMS_SUPPLY4			0x034
77 #define AMS_SUPPLY5			0x038
78 #define AMS_SUPPLY6			0x03C
79 #define AMS_SUPPLY7			0x200
80 #define AMS_SUPPLY8			0x204
81 #define AMS_SUPPLY9			0x208
82 #define AMS_SUPPLY10			0x20C
83 #define AMS_VCCAMS			0x210
84 #define AMS_TEMP_REMOTE			0x214
85 
86 #define AMS_REG_VAUX(x)			(0x40 + 4 * (x))
87 
88 #define AMS_PS_RESET_VALUE		0xFFFF
89 #define AMS_PL_RESET_VALUE		0xFFFF
90 
91 #define AMS_CONF0_CHANNEL_NUM_MASK	GENMASK(6, 0)
92 
93 #define AMS_CONF1_SEQ_MASK		GENMASK(15, 12)
94 #define AMS_CONF1_SEQ_DEFAULT		FIELD_PREP(AMS_CONF1_SEQ_MASK, 0)
95 #define AMS_CONF1_SEQ_CONTINUOUS	FIELD_PREP(AMS_CONF1_SEQ_MASK, 1)
96 #define AMS_CONF1_SEQ_SINGLE_CHANNEL	FIELD_PREP(AMS_CONF1_SEQ_MASK, 2)
97 
98 #define AMS_REG_SEQ0_MASK		GENMASK(15, 0)
99 #define AMS_REG_SEQ2_MASK		GENMASK(21, 16)
100 #define AMS_REG_SEQ1_MASK		GENMASK_ULL(37, 22)
101 
102 #define AMS_PS_SEQ_MASK			GENMASK(21, 0)
103 #define AMS_PL_SEQ_MASK			GENMASK_ULL(59, 22)
104 
105 #define AMS_ALARM_TEMP			0x140
106 #define AMS_ALARM_SUPPLY1		0x144
107 #define AMS_ALARM_SUPPLY2		0x148
108 #define AMS_ALARM_SUPPLY3		0x160
109 #define AMS_ALARM_SUPPLY4		0x164
110 #define AMS_ALARM_SUPPLY5		0x168
111 #define AMS_ALARM_SUPPLY6		0x16C
112 #define AMS_ALARM_SUPPLY7		0x180
113 #define AMS_ALARM_SUPPLY8		0x184
114 #define AMS_ALARM_SUPPLY9		0x188
115 #define AMS_ALARM_SUPPLY10		0x18C
116 #define AMS_ALARM_VCCAMS		0x190
117 #define AMS_ALARM_TEMP_REMOTE		0x194
118 #define AMS_ALARM_THRESHOLD_OFF_10	0x10
119 #define AMS_ALARM_THRESHOLD_OFF_20	0x20
120 
121 #define AMS_ALARM_THR_DIRECT_MASK	BIT(1)
122 #define AMS_ALARM_THR_MIN		0x0000
123 #define AMS_ALARM_THR_MAX		(BIT(16) - 1)
124 
125 #define AMS_ALARM_MASK			GENMASK_ULL(63, 0)
126 #define AMS_NO_OF_ALARMS		32
127 #define AMS_PL_ALARM_START		16
128 #define AMS_PL_ALARM_MASK		GENMASK(31, 16)
129 #define AMS_ISR0_ALARM_MASK		GENMASK(31, 0)
130 #define AMS_ISR1_ALARM_MASK		(GENMASK(31, 29) | GENMASK(4, 0))
131 #define AMS_ISR1_EOC_MASK		BIT(3)
132 #define AMS_ISR1_INTR_MASK		GENMASK_ULL(63, 32)
133 #define AMS_ISR0_ALARM_2_TO_0_MASK	GENMASK(2, 0)
134 #define AMS_ISR0_ALARM_6_TO_3_MASK	GENMASK(6, 3)
135 #define AMS_ISR0_ALARM_12_TO_7_MASK	GENMASK(13, 8)
136 #define AMS_CONF1_ALARM_2_TO_0_MASK	GENMASK(3, 1)
137 #define AMS_CONF1_ALARM_6_TO_3_MASK	GENMASK(11, 8)
138 #define AMS_CONF1_ALARM_12_TO_7_MASK	GENMASK(5, 0)
139 #define AMS_REGCFG1_ALARM_MASK  \
140 	(AMS_CONF1_ALARM_2_TO_0_MASK | AMS_CONF1_ALARM_6_TO_3_MASK | BIT(0))
141 #define AMS_REGCFG3_ALARM_MASK		AMS_CONF1_ALARM_12_TO_7_MASK
142 
143 #define AMS_PS_CSTS_PS_READY		(BIT(27) | BIT(16))
144 #define AMS_PL_CSTS_ACCESS_MASK		BIT(1)
145 
146 #define AMS_PL_MAX_FIXED_CHANNEL	10
147 #define AMS_PL_MAX_EXT_CHANNEL		20
148 
149 #define AMS_INIT_POLL_TIME_US		200
150 #define AMS_INIT_TIMEOUT_US		10000
151 #define AMS_UNMASK_TIMEOUT_MS		500
152 
153 /*
154  * Following scale and offset value is derived from
155  * UG580 (v1.7) December 20, 2016
156  */
157 #define AMS_SUPPLY_SCALE_1VOLT_mV		1000
158 #define AMS_SUPPLY_SCALE_3VOLT_mV		3000
159 #define AMS_SUPPLY_SCALE_6VOLT_mV		6000
160 #define AMS_SUPPLY_SCALE_DIV_BIT	16
161 
162 #define AMS_TEMP_SCALE			509314
163 #define AMS_TEMP_SCALE_DIV_BIT		16
164 #define AMS_TEMP_OFFSET			-((280230LL << 16) / 509314)
165 
166 enum ams_alarm_bit {
167 	AMS_ALARM_BIT_TEMP = 0,
168 	AMS_ALARM_BIT_SUPPLY1 = 1,
169 	AMS_ALARM_BIT_SUPPLY2 = 2,
170 	AMS_ALARM_BIT_SUPPLY3 = 3,
171 	AMS_ALARM_BIT_SUPPLY4 = 4,
172 	AMS_ALARM_BIT_SUPPLY5 = 5,
173 	AMS_ALARM_BIT_SUPPLY6 = 6,
174 	AMS_ALARM_BIT_RESERVED = 7,
175 	AMS_ALARM_BIT_SUPPLY7 = 8,
176 	AMS_ALARM_BIT_SUPPLY8 = 9,
177 	AMS_ALARM_BIT_SUPPLY9 = 10,
178 	AMS_ALARM_BIT_SUPPLY10 = 11,
179 	AMS_ALARM_BIT_VCCAMS = 12,
180 	AMS_ALARM_BIT_TEMP_REMOTE = 13,
181 };
182 
183 enum ams_seq {
184 	AMS_SEQ_VCC_PSPLL = 0,
185 	AMS_SEQ_VCC_PSBATT = 1,
186 	AMS_SEQ_VCCINT = 2,
187 	AMS_SEQ_VCCBRAM = 3,
188 	AMS_SEQ_VCCAUX = 4,
189 	AMS_SEQ_PSDDRPLL = 5,
190 	AMS_SEQ_INTDDR = 6,
191 };
192 
193 enum ams_ps_pl_seq {
194 	AMS_SEQ_CALIB = 0,
195 	AMS_SEQ_RSVD_1 = 1,
196 	AMS_SEQ_RSVD_2 = 2,
197 	AMS_SEQ_TEST = 3,
198 	AMS_SEQ_RSVD_4 = 4,
199 	AMS_SEQ_SUPPLY4 = 5,
200 	AMS_SEQ_SUPPLY5 = 6,
201 	AMS_SEQ_SUPPLY6 = 7,
202 	AMS_SEQ_TEMP = 8,
203 	AMS_SEQ_SUPPLY2 = 9,
204 	AMS_SEQ_SUPPLY1 = 10,
205 	AMS_SEQ_VP_VN = 11,
206 	AMS_SEQ_VREFP = 12,
207 	AMS_SEQ_VREFN = 13,
208 	AMS_SEQ_SUPPLY3 = 14,
209 	AMS_SEQ_CURRENT_MON = 15,
210 	AMS_SEQ_SUPPLY7 = 16,
211 	AMS_SEQ_SUPPLY8 = 17,
212 	AMS_SEQ_SUPPLY9 = 18,
213 	AMS_SEQ_SUPPLY10 = 19,
214 	AMS_SEQ_VCCAMS = 20,
215 	AMS_SEQ_TEMP_REMOTE = 21,
216 	AMS_SEQ_MAX = 22
217 };
218 
219 #define AMS_PS_SEQ_MAX		AMS_SEQ_MAX
220 #define AMS_SEQ(x)		(AMS_SEQ_MAX + (x))
221 #define PS_SEQ(x)		(x)
222 #define PL_SEQ(x)		(AMS_PS_SEQ_MAX + (x))
223 #define AMS_CTRL_SEQ_BASE	(AMS_PS_SEQ_MAX * 3)
224 
225 #define AMS_CHAN_TEMP(_scan_index, _addr) { \
226 	.type = IIO_TEMP, \
227 	.indexed = 1, \
228 	.address = (_addr), \
229 	.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
230 		BIT(IIO_CHAN_INFO_SCALE) | \
231 		BIT(IIO_CHAN_INFO_OFFSET), \
232 	.event_spec = ams_temp_events, \
233 	.scan_index = _scan_index, \
234 	.num_event_specs = ARRAY_SIZE(ams_temp_events), \
235 }
236 
237 #define AMS_CHAN_VOLTAGE(_scan_index, _addr, _alarm) { \
238 	.type = IIO_VOLTAGE, \
239 	.indexed = 1, \
240 	.address = (_addr), \
241 	.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
242 		BIT(IIO_CHAN_INFO_SCALE), \
243 	.event_spec = (_alarm) ? ams_voltage_events : NULL, \
244 	.scan_index = _scan_index, \
245 	.num_event_specs = (_alarm) ? ARRAY_SIZE(ams_voltage_events) : 0, \
246 }
247 
248 #define AMS_PS_CHAN_TEMP(_scan_index, _addr) \
249 	AMS_CHAN_TEMP(PS_SEQ(_scan_index), _addr)
250 #define AMS_PS_CHAN_VOLTAGE(_scan_index, _addr) \
251 	AMS_CHAN_VOLTAGE(PS_SEQ(_scan_index), _addr, true)
252 
253 #define AMS_PL_CHAN_TEMP(_scan_index, _addr) \
254 	AMS_CHAN_TEMP(PL_SEQ(_scan_index), _addr)
255 #define AMS_PL_CHAN_VOLTAGE(_scan_index, _addr, _alarm) \
256 	AMS_CHAN_VOLTAGE(PL_SEQ(_scan_index), _addr, _alarm)
257 #define AMS_PL_AUX_CHAN_VOLTAGE(_auxno) \
258 	AMS_CHAN_VOLTAGE(PL_SEQ(AMS_SEQ(_auxno)), AMS_REG_VAUX(_auxno), false)
259 #define AMS_CTRL_CHAN_VOLTAGE(_scan_index, _addr) \
260 	AMS_CHAN_VOLTAGE(PL_SEQ(AMS_SEQ(AMS_SEQ(_scan_index))), _addr, false)
261 
262 /**
263  * struct ams - This structure contains necessary state for xilinx-ams to operate
264  * @base: physical base address of device
265  * @ps_base: physical base address of PS device
266  * @pl_base: physical base address of PL device
267  * @clk: clocks associated with the device
268  * @dev: pointer to device struct
269  * @lock: to handle multiple user interaction
270  * @intr_lock: to protect interrupt mask values
271  * @alarm_mask: alarm configuration
272  * @current_masked_alarm: currently masked due to alarm
273  * @intr_mask: interrupt configuration
274  * @ams_unmask_work: re-enables event once the event condition disappears
275  *
276  */
277 struct ams {
278 	void __iomem *base;
279 	void __iomem *ps_base;
280 	void __iomem *pl_base;
281 	struct clk *clk;
282 	struct device *dev;
283 	struct mutex lock;
284 	spinlock_t intr_lock;
285 	unsigned int alarm_mask;
286 	unsigned int current_masked_alarm;
287 	u64 intr_mask;
288 	struct delayed_work ams_unmask_work;
289 };
290 
291 static inline void ams_ps_update_reg(struct ams *ams, unsigned int offset,
292 				     u32 mask, u32 data)
293 {
294 	u32 val, regval;
295 
296 	val = readl(ams->ps_base + offset);
297 	regval = (val & ~mask) | (data & mask);
298 	writel(regval, ams->ps_base + offset);
299 }
300 
301 static inline void ams_pl_update_reg(struct ams *ams, unsigned int offset,
302 				     u32 mask, u32 data)
303 {
304 	u32 val, regval;
305 
306 	val = readl(ams->pl_base + offset);
307 	regval = (val & ~mask) | (data & mask);
308 	writel(regval, ams->pl_base + offset);
309 }
310 
311 static void ams_update_intrmask(struct ams *ams, u64 mask, u64 val)
312 {
313 	u32 regval;
314 
315 	ams->intr_mask = (ams->intr_mask & ~mask) | (val & mask);
316 
317 	regval = ~(ams->intr_mask | ams->current_masked_alarm);
318 	writel(regval, ams->base + AMS_IER_0);
319 
320 	regval = ~(FIELD_GET(AMS_ISR1_INTR_MASK, ams->intr_mask));
321 	writel(regval, ams->base + AMS_IER_1);
322 
323 	regval = ams->intr_mask | ams->current_masked_alarm;
324 	writel(regval, ams->base + AMS_IDR_0);
325 
326 	regval = FIELD_GET(AMS_ISR1_INTR_MASK, ams->intr_mask);
327 	writel(regval, ams->base + AMS_IDR_1);
328 }
329 
330 static void ams_disable_all_alarms(struct ams *ams)
331 {
332 	/* disable PS module alarm */
333 	if (ams->ps_base) {
334 		ams_ps_update_reg(ams, AMS_REG_CONFIG1, AMS_REGCFG1_ALARM_MASK,
335 				  AMS_REGCFG1_ALARM_MASK);
336 		ams_ps_update_reg(ams, AMS_REG_CONFIG3, AMS_REGCFG3_ALARM_MASK,
337 				  AMS_REGCFG3_ALARM_MASK);
338 	}
339 
340 	/* disable PL module alarm */
341 	if (ams->pl_base) {
342 		ams_pl_update_reg(ams, AMS_REG_CONFIG1, AMS_REGCFG1_ALARM_MASK,
343 				  AMS_REGCFG1_ALARM_MASK);
344 		ams_pl_update_reg(ams, AMS_REG_CONFIG3, AMS_REGCFG3_ALARM_MASK,
345 				  AMS_REGCFG3_ALARM_MASK);
346 	}
347 }
348 
349 static void ams_update_ps_alarm(struct ams *ams, unsigned long alarm_mask)
350 {
351 	u32 cfg;
352 	u32 val;
353 
354 	val = FIELD_GET(AMS_ISR0_ALARM_2_TO_0_MASK, alarm_mask);
355 	cfg = ~(FIELD_PREP(AMS_CONF1_ALARM_2_TO_0_MASK, val));
356 
357 	val = FIELD_GET(AMS_ISR0_ALARM_6_TO_3_MASK, alarm_mask);
358 	cfg &= ~(FIELD_PREP(AMS_CONF1_ALARM_6_TO_3_MASK, val));
359 
360 	ams_ps_update_reg(ams, AMS_REG_CONFIG1, AMS_REGCFG1_ALARM_MASK, cfg);
361 
362 	val = FIELD_GET(AMS_ISR0_ALARM_12_TO_7_MASK, alarm_mask);
363 	cfg = ~(FIELD_PREP(AMS_CONF1_ALARM_12_TO_7_MASK, val));
364 	ams_ps_update_reg(ams, AMS_REG_CONFIG3, AMS_REGCFG3_ALARM_MASK, cfg);
365 }
366 
367 static void ams_update_pl_alarm(struct ams *ams, unsigned long alarm_mask)
368 {
369 	unsigned long pl_alarm_mask;
370 	u32 cfg;
371 	u32 val;
372 
373 	pl_alarm_mask = FIELD_GET(AMS_PL_ALARM_MASK, alarm_mask);
374 
375 	val = FIELD_GET(AMS_ISR0_ALARM_2_TO_0_MASK, pl_alarm_mask);
376 	cfg = ~(FIELD_PREP(AMS_CONF1_ALARM_2_TO_0_MASK, val));
377 
378 	val = FIELD_GET(AMS_ISR0_ALARM_6_TO_3_MASK, pl_alarm_mask);
379 	cfg &= ~(FIELD_PREP(AMS_CONF1_ALARM_6_TO_3_MASK, val));
380 
381 	ams_pl_update_reg(ams, AMS_REG_CONFIG1, AMS_REGCFG1_ALARM_MASK, cfg);
382 
383 	val = FIELD_GET(AMS_ISR0_ALARM_12_TO_7_MASK, pl_alarm_mask);
384 	cfg = ~(FIELD_PREP(AMS_CONF1_ALARM_12_TO_7_MASK, val));
385 	ams_pl_update_reg(ams, AMS_REG_CONFIG3, AMS_REGCFG3_ALARM_MASK, cfg);
386 }
387 
388 static void ams_update_alarm(struct ams *ams, unsigned long alarm_mask)
389 {
390 	unsigned long flags;
391 
392 	if (ams->ps_base)
393 		ams_update_ps_alarm(ams, alarm_mask);
394 
395 	if (ams->pl_base)
396 		ams_update_pl_alarm(ams, alarm_mask);
397 
398 	spin_lock_irqsave(&ams->intr_lock, flags);
399 	ams_update_intrmask(ams, AMS_ISR0_ALARM_MASK, ~alarm_mask);
400 	spin_unlock_irqrestore(&ams->intr_lock, flags);
401 }
402 
403 static void ams_enable_channel_sequence(struct iio_dev *indio_dev)
404 {
405 	struct ams *ams = iio_priv(indio_dev);
406 	unsigned long long scan_mask;
407 	int i;
408 	u32 regval;
409 
410 	/*
411 	 * Enable channel sequence. First 22 bits of scan_mask represent
412 	 * PS channels, and next remaining bits represent PL channels.
413 	 */
414 
415 	/* Run calibration of PS & PL as part of the sequence */
416 	scan_mask = BIT(0) | BIT(AMS_PS_SEQ_MAX);
417 	for (i = 0; i < indio_dev->num_channels; i++)
418 		scan_mask |= BIT_ULL(indio_dev->channels[i].scan_index);
419 
420 	if (ams->ps_base) {
421 		/* put sysmon in a soft reset to change the sequence */
422 		ams_ps_update_reg(ams, AMS_REG_CONFIG1, AMS_CONF1_SEQ_MASK,
423 				  AMS_CONF1_SEQ_DEFAULT);
424 
425 		/* configure basic channels */
426 		regval = FIELD_GET(AMS_REG_SEQ0_MASK, scan_mask);
427 		writel(regval, ams->ps_base + AMS_REG_SEQ_CH0);
428 
429 		regval = FIELD_GET(AMS_REG_SEQ2_MASK, scan_mask);
430 		writel(regval, ams->ps_base + AMS_REG_SEQ_CH2);
431 
432 		/* set continuous sequence mode */
433 		ams_ps_update_reg(ams, AMS_REG_CONFIG1, AMS_CONF1_SEQ_MASK,
434 				  AMS_CONF1_SEQ_CONTINUOUS);
435 	}
436 
437 	if (ams->pl_base) {
438 		/* put sysmon in a soft reset to change the sequence */
439 		ams_pl_update_reg(ams, AMS_REG_CONFIG1, AMS_CONF1_SEQ_MASK,
440 				  AMS_CONF1_SEQ_DEFAULT);
441 
442 		/* configure basic channels */
443 		scan_mask = FIELD_GET(AMS_PL_SEQ_MASK, scan_mask);
444 
445 		regval = FIELD_GET(AMS_REG_SEQ0_MASK, scan_mask);
446 		writel(regval, ams->pl_base + AMS_REG_SEQ_CH0);
447 
448 		regval = FIELD_GET(AMS_REG_SEQ1_MASK, scan_mask);
449 		writel(regval, ams->pl_base + AMS_REG_SEQ_CH1);
450 
451 		regval = FIELD_GET(AMS_REG_SEQ2_MASK, scan_mask);
452 		writel(regval, ams->pl_base + AMS_REG_SEQ_CH2);
453 
454 		/* set continuous sequence mode */
455 		ams_pl_update_reg(ams, AMS_REG_CONFIG1, AMS_CONF1_SEQ_MASK,
456 				  AMS_CONF1_SEQ_CONTINUOUS);
457 	}
458 }
459 
460 static int ams_init_device(struct ams *ams)
461 {
462 	u32 expect = AMS_PS_CSTS_PS_READY;
463 	u32 reg, value;
464 	int ret;
465 
466 	/* reset AMS */
467 	if (ams->ps_base) {
468 		writel(AMS_PS_RESET_VALUE, ams->ps_base + AMS_VP_VN);
469 
470 		ret = readl_poll_timeout(ams->base + AMS_PS_CSTS, reg, (reg & expect),
471 					 AMS_INIT_POLL_TIME_US, AMS_INIT_TIMEOUT_US);
472 		if (ret)
473 			return ret;
474 
475 		/* put sysmon in a default state */
476 		ams_ps_update_reg(ams, AMS_REG_CONFIG1, AMS_CONF1_SEQ_MASK,
477 				  AMS_CONF1_SEQ_DEFAULT);
478 	}
479 
480 	if (ams->pl_base) {
481 		value = readl(ams->base + AMS_PL_CSTS);
482 		if (value == 0)
483 			return 0;
484 
485 		writel(AMS_PL_RESET_VALUE, ams->pl_base + AMS_VP_VN);
486 
487 		/* put sysmon in a default state */
488 		ams_pl_update_reg(ams, AMS_REG_CONFIG1, AMS_CONF1_SEQ_MASK,
489 				  AMS_CONF1_SEQ_DEFAULT);
490 	}
491 
492 	ams_disable_all_alarms(ams);
493 
494 	/* Disable interrupt */
495 	ams_update_intrmask(ams, AMS_ALARM_MASK, AMS_ALARM_MASK);
496 
497 	/* Clear any pending interrupt */
498 	writel(AMS_ISR0_ALARM_MASK, ams->base + AMS_ISR_0);
499 	writel(AMS_ISR1_ALARM_MASK, ams->base + AMS_ISR_1);
500 
501 	return 0;
502 }
503 
504 static int ams_enable_single_channel(struct ams *ams, unsigned int offset)
505 {
506 	u8 channel_num;
507 
508 	switch (offset) {
509 	case AMS_VCC_PSPLL0:
510 		channel_num = AMS_VCC_PSPLL0_CH;
511 		break;
512 	case AMS_VCC_PSPLL3:
513 		channel_num = AMS_VCC_PSPLL3_CH;
514 		break;
515 	case AMS_VCCINT:
516 		channel_num = AMS_VCCINT_CH;
517 		break;
518 	case AMS_VCCBRAM:
519 		channel_num = AMS_VCCBRAM_CH;
520 		break;
521 	case AMS_VCCAUX:
522 		channel_num = AMS_VCCAUX_CH;
523 		break;
524 	case AMS_PSDDRPLL:
525 		channel_num = AMS_PSDDRPLL_CH;
526 		break;
527 	case AMS_PSINTFPDDR:
528 		channel_num = AMS_PSINTFPDDR_CH;
529 		break;
530 	default:
531 		return -EINVAL;
532 	}
533 
534 	/* set single channel, sequencer off mode */
535 	ams_ps_update_reg(ams, AMS_REG_CONFIG1, AMS_CONF1_SEQ_MASK,
536 			  AMS_CONF1_SEQ_SINGLE_CHANNEL);
537 
538 	/* write the channel number */
539 	ams_ps_update_reg(ams, AMS_REG_CONFIG0, AMS_CONF0_CHANNEL_NUM_MASK,
540 			  channel_num);
541 
542 	return 0;
543 }
544 
545 static int ams_read_vcc_reg(struct ams *ams, unsigned int offset, u32 *data)
546 {
547 	u32 expect = AMS_ISR1_EOC_MASK;
548 	u32 reg;
549 	int ret;
550 
551 	ret = ams_enable_single_channel(ams, offset);
552 	if (ret)
553 		return ret;
554 
555 	ret = readl_poll_timeout(ams->base + AMS_ISR_1, reg, (reg & expect),
556 				 AMS_INIT_POLL_TIME_US, AMS_INIT_TIMEOUT_US);
557 	if (ret)
558 		return ret;
559 
560 	*data = readl(ams->base + offset);
561 
562 	return 0;
563 }
564 
565 static int ams_get_ps_scale(int address)
566 {
567 	int val;
568 
569 	switch (address) {
570 	case AMS_SUPPLY1:
571 	case AMS_SUPPLY2:
572 	case AMS_SUPPLY3:
573 	case AMS_SUPPLY4:
574 	case AMS_SUPPLY9:
575 	case AMS_SUPPLY10:
576 	case AMS_VCCAMS:
577 		val = AMS_SUPPLY_SCALE_3VOLT_mV;
578 		break;
579 	case AMS_SUPPLY5:
580 	case AMS_SUPPLY6:
581 	case AMS_SUPPLY7:
582 	case AMS_SUPPLY8:
583 		val = AMS_SUPPLY_SCALE_6VOLT_mV;
584 		break;
585 	default:
586 		val = AMS_SUPPLY_SCALE_1VOLT_mV;
587 		break;
588 	}
589 
590 	return val;
591 }
592 
593 static int ams_get_pl_scale(struct ams *ams, int address)
594 {
595 	int val, regval;
596 
597 	switch (address) {
598 	case AMS_SUPPLY1:
599 	case AMS_SUPPLY2:
600 	case AMS_SUPPLY3:
601 	case AMS_SUPPLY4:
602 	case AMS_SUPPLY5:
603 	case AMS_SUPPLY6:
604 	case AMS_VCCAMS:
605 	case AMS_VREFP:
606 	case AMS_VREFN:
607 		val = AMS_SUPPLY_SCALE_3VOLT_mV;
608 		break;
609 	case AMS_SUPPLY7:
610 		regval = readl(ams->pl_base + AMS_REG_CONFIG4);
611 		if (FIELD_GET(AMS_VUSER0_MASK, regval))
612 			val = AMS_SUPPLY_SCALE_6VOLT_mV;
613 		else
614 			val = AMS_SUPPLY_SCALE_3VOLT_mV;
615 		break;
616 	case AMS_SUPPLY8:
617 		regval = readl(ams->pl_base + AMS_REG_CONFIG4);
618 		if (FIELD_GET(AMS_VUSER1_MASK, regval))
619 			val = AMS_SUPPLY_SCALE_6VOLT_mV;
620 		else
621 			val = AMS_SUPPLY_SCALE_3VOLT_mV;
622 		break;
623 	case AMS_SUPPLY9:
624 		regval = readl(ams->pl_base + AMS_REG_CONFIG4);
625 		if (FIELD_GET(AMS_VUSER2_MASK, regval))
626 			val = AMS_SUPPLY_SCALE_6VOLT_mV;
627 		else
628 			val = AMS_SUPPLY_SCALE_3VOLT_mV;
629 		break;
630 	case AMS_SUPPLY10:
631 		regval = readl(ams->pl_base + AMS_REG_CONFIG4);
632 		if (FIELD_GET(AMS_VUSER3_MASK, regval))
633 			val = AMS_SUPPLY_SCALE_6VOLT_mV;
634 		else
635 			val = AMS_SUPPLY_SCALE_3VOLT_mV;
636 		break;
637 	case AMS_VP_VN:
638 	case AMS_REG_VAUX(0) ... AMS_REG_VAUX(15):
639 		val = AMS_SUPPLY_SCALE_1VOLT_mV;
640 		break;
641 	default:
642 		val = AMS_SUPPLY_SCALE_1VOLT_mV;
643 		break;
644 	}
645 
646 	return val;
647 }
648 
649 static int ams_get_ctrl_scale(int address)
650 {
651 	int val;
652 
653 	switch (address) {
654 	case AMS_VCC_PSPLL0:
655 	case AMS_VCC_PSPLL3:
656 	case AMS_VCCINT:
657 	case AMS_VCCBRAM:
658 	case AMS_VCCAUX:
659 	case AMS_PSDDRPLL:
660 	case AMS_PSINTFPDDR:
661 		val = AMS_SUPPLY_SCALE_3VOLT_mV;
662 		break;
663 	default:
664 		val = AMS_SUPPLY_SCALE_1VOLT_mV;
665 		break;
666 	}
667 
668 	return val;
669 }
670 
671 static int ams_read_raw(struct iio_dev *indio_dev,
672 			struct iio_chan_spec const *chan,
673 			int *val, int *val2, long mask)
674 {
675 	struct ams *ams = iio_priv(indio_dev);
676 	int ret;
677 
678 	switch (mask) {
679 	case IIO_CHAN_INFO_RAW:
680 		mutex_lock(&ams->lock);
681 		if (chan->scan_index >= AMS_CTRL_SEQ_BASE) {
682 			ret = ams_read_vcc_reg(ams, chan->address, val);
683 			if (ret)
684 				goto unlock_mutex;
685 			ams_enable_channel_sequence(indio_dev);
686 		} else if (chan->scan_index >= AMS_PS_SEQ_MAX)
687 			*val = readl(ams->pl_base + chan->address);
688 		else
689 			*val = readl(ams->ps_base + chan->address);
690 
691 		ret = IIO_VAL_INT;
692 unlock_mutex:
693 		mutex_unlock(&ams->lock);
694 		return ret;
695 	case IIO_CHAN_INFO_SCALE:
696 		switch (chan->type) {
697 		case IIO_VOLTAGE:
698 			if (chan->scan_index < AMS_PS_SEQ_MAX)
699 				*val = ams_get_ps_scale(chan->address);
700 			else if (chan->scan_index >= AMS_PS_SEQ_MAX &&
701 				 chan->scan_index < AMS_CTRL_SEQ_BASE)
702 				*val = ams_get_pl_scale(ams, chan->address);
703 			else
704 				*val = ams_get_ctrl_scale(chan->address);
705 
706 			*val2 = AMS_SUPPLY_SCALE_DIV_BIT;
707 			return IIO_VAL_FRACTIONAL_LOG2;
708 		case IIO_TEMP:
709 			*val = AMS_TEMP_SCALE;
710 			*val2 = AMS_TEMP_SCALE_DIV_BIT;
711 			return IIO_VAL_FRACTIONAL_LOG2;
712 		default:
713 			return -EINVAL;
714 		}
715 	case IIO_CHAN_INFO_OFFSET:
716 		/* Only the temperature channel has an offset */
717 		*val = AMS_TEMP_OFFSET;
718 		return IIO_VAL_INT;
719 	default:
720 		return -EINVAL;
721 	}
722 }
723 
724 static int ams_get_alarm_offset(int scan_index, enum iio_event_direction dir)
725 {
726 	int offset;
727 
728 	if (scan_index >= AMS_PS_SEQ_MAX)
729 		scan_index -= AMS_PS_SEQ_MAX;
730 
731 	if (dir == IIO_EV_DIR_FALLING) {
732 		if (scan_index < AMS_SEQ_SUPPLY7)
733 			offset = AMS_ALARM_THRESHOLD_OFF_10;
734 		else
735 			offset = AMS_ALARM_THRESHOLD_OFF_20;
736 	} else {
737 		offset = 0;
738 	}
739 
740 	switch (scan_index) {
741 	case AMS_SEQ_TEMP:
742 		return AMS_ALARM_TEMP + offset;
743 	case AMS_SEQ_SUPPLY1:
744 		return AMS_ALARM_SUPPLY1 + offset;
745 	case AMS_SEQ_SUPPLY2:
746 		return AMS_ALARM_SUPPLY2 + offset;
747 	case AMS_SEQ_SUPPLY3:
748 		return AMS_ALARM_SUPPLY3 + offset;
749 	case AMS_SEQ_SUPPLY4:
750 		return AMS_ALARM_SUPPLY4 + offset;
751 	case AMS_SEQ_SUPPLY5:
752 		return AMS_ALARM_SUPPLY5 + offset;
753 	case AMS_SEQ_SUPPLY6:
754 		return AMS_ALARM_SUPPLY6 + offset;
755 	case AMS_SEQ_SUPPLY7:
756 		return AMS_ALARM_SUPPLY7 + offset;
757 	case AMS_SEQ_SUPPLY8:
758 		return AMS_ALARM_SUPPLY8 + offset;
759 	case AMS_SEQ_SUPPLY9:
760 		return AMS_ALARM_SUPPLY9 + offset;
761 	case AMS_SEQ_SUPPLY10:
762 		return AMS_ALARM_SUPPLY10 + offset;
763 	case AMS_SEQ_VCCAMS:
764 		return AMS_ALARM_VCCAMS + offset;
765 	case AMS_SEQ_TEMP_REMOTE:
766 		return AMS_ALARM_TEMP_REMOTE + offset;
767 	default:
768 		return 0;
769 	}
770 }
771 
772 static const struct iio_chan_spec *ams_event_to_channel(struct iio_dev *dev,
773 							u32 event)
774 {
775 	int scan_index = 0, i;
776 
777 	if (event >= AMS_PL_ALARM_START) {
778 		event -= AMS_PL_ALARM_START;
779 		scan_index = AMS_PS_SEQ_MAX;
780 	}
781 
782 	switch (event) {
783 	case AMS_ALARM_BIT_TEMP:
784 		scan_index += AMS_SEQ_TEMP;
785 		break;
786 	case AMS_ALARM_BIT_SUPPLY1:
787 		scan_index += AMS_SEQ_SUPPLY1;
788 		break;
789 	case AMS_ALARM_BIT_SUPPLY2:
790 		scan_index += AMS_SEQ_SUPPLY2;
791 		break;
792 	case AMS_ALARM_BIT_SUPPLY3:
793 		scan_index += AMS_SEQ_SUPPLY3;
794 		break;
795 	case AMS_ALARM_BIT_SUPPLY4:
796 		scan_index += AMS_SEQ_SUPPLY4;
797 		break;
798 	case AMS_ALARM_BIT_SUPPLY5:
799 		scan_index += AMS_SEQ_SUPPLY5;
800 		break;
801 	case AMS_ALARM_BIT_SUPPLY6:
802 		scan_index += AMS_SEQ_SUPPLY6;
803 		break;
804 	case AMS_ALARM_BIT_SUPPLY7:
805 		scan_index += AMS_SEQ_SUPPLY7;
806 		break;
807 	case AMS_ALARM_BIT_SUPPLY8:
808 		scan_index += AMS_SEQ_SUPPLY8;
809 		break;
810 	case AMS_ALARM_BIT_SUPPLY9:
811 		scan_index += AMS_SEQ_SUPPLY9;
812 		break;
813 	case AMS_ALARM_BIT_SUPPLY10:
814 		scan_index += AMS_SEQ_SUPPLY10;
815 		break;
816 	case AMS_ALARM_BIT_VCCAMS:
817 		scan_index += AMS_SEQ_VCCAMS;
818 		break;
819 	case AMS_ALARM_BIT_TEMP_REMOTE:
820 		scan_index += AMS_SEQ_TEMP_REMOTE;
821 		break;
822 	default:
823 		break;
824 	}
825 
826 	for (i = 0; i < dev->num_channels; i++)
827 		if (dev->channels[i].scan_index == scan_index)
828 			break;
829 
830 	return &dev->channels[i];
831 }
832 
833 static int ams_get_alarm_mask(int scan_index)
834 {
835 	int bit = 0;
836 
837 	if (scan_index >= AMS_PS_SEQ_MAX) {
838 		bit = AMS_PL_ALARM_START;
839 		scan_index -= AMS_PS_SEQ_MAX;
840 	}
841 
842 	switch (scan_index) {
843 	case AMS_SEQ_TEMP:
844 		return BIT(AMS_ALARM_BIT_TEMP + bit);
845 	case AMS_SEQ_SUPPLY1:
846 		return BIT(AMS_ALARM_BIT_SUPPLY1 + bit);
847 	case AMS_SEQ_SUPPLY2:
848 		return BIT(AMS_ALARM_BIT_SUPPLY2 + bit);
849 	case AMS_SEQ_SUPPLY3:
850 		return BIT(AMS_ALARM_BIT_SUPPLY3 + bit);
851 	case AMS_SEQ_SUPPLY4:
852 		return BIT(AMS_ALARM_BIT_SUPPLY4 + bit);
853 	case AMS_SEQ_SUPPLY5:
854 		return BIT(AMS_ALARM_BIT_SUPPLY5 + bit);
855 	case AMS_SEQ_SUPPLY6:
856 		return BIT(AMS_ALARM_BIT_SUPPLY6 + bit);
857 	case AMS_SEQ_SUPPLY7:
858 		return BIT(AMS_ALARM_BIT_SUPPLY7 + bit);
859 	case AMS_SEQ_SUPPLY8:
860 		return BIT(AMS_ALARM_BIT_SUPPLY8 + bit);
861 	case AMS_SEQ_SUPPLY9:
862 		return BIT(AMS_ALARM_BIT_SUPPLY9 + bit);
863 	case AMS_SEQ_SUPPLY10:
864 		return BIT(AMS_ALARM_BIT_SUPPLY10 + bit);
865 	case AMS_SEQ_VCCAMS:
866 		return BIT(AMS_ALARM_BIT_VCCAMS + bit);
867 	case AMS_SEQ_TEMP_REMOTE:
868 		return BIT(AMS_ALARM_BIT_TEMP_REMOTE + bit);
869 	default:
870 		return 0;
871 	}
872 }
873 
874 static int ams_read_event_config(struct iio_dev *indio_dev,
875 				 const struct iio_chan_spec *chan,
876 				 enum iio_event_type type,
877 				 enum iio_event_direction dir)
878 {
879 	struct ams *ams = iio_priv(indio_dev);
880 
881 	return !!(ams->alarm_mask & ams_get_alarm_mask(chan->scan_index));
882 }
883 
884 static int ams_write_event_config(struct iio_dev *indio_dev,
885 				  const struct iio_chan_spec *chan,
886 				  enum iio_event_type type,
887 				  enum iio_event_direction dir,
888 				  int state)
889 {
890 	struct ams *ams = iio_priv(indio_dev);
891 	unsigned int alarm;
892 
893 	alarm = ams_get_alarm_mask(chan->scan_index);
894 
895 	mutex_lock(&ams->lock);
896 
897 	if (state)
898 		ams->alarm_mask |= alarm;
899 	else
900 		ams->alarm_mask &= ~alarm;
901 
902 	ams_update_alarm(ams, ams->alarm_mask);
903 
904 	mutex_unlock(&ams->lock);
905 
906 	return 0;
907 }
908 
909 static int ams_read_event_value(struct iio_dev *indio_dev,
910 				const struct iio_chan_spec *chan,
911 				enum iio_event_type type,
912 				enum iio_event_direction dir,
913 				enum iio_event_info info, int *val, int *val2)
914 {
915 	struct ams *ams = iio_priv(indio_dev);
916 	unsigned int offset = ams_get_alarm_offset(chan->scan_index, dir);
917 
918 	mutex_lock(&ams->lock);
919 
920 	if (chan->scan_index >= AMS_PS_SEQ_MAX)
921 		*val = readl(ams->pl_base + offset);
922 	else
923 		*val = readl(ams->ps_base + offset);
924 
925 	mutex_unlock(&ams->lock);
926 
927 	return IIO_VAL_INT;
928 }
929 
930 static int ams_write_event_value(struct iio_dev *indio_dev,
931 				 const struct iio_chan_spec *chan,
932 				 enum iio_event_type type,
933 				 enum iio_event_direction dir,
934 				 enum iio_event_info info, int val, int val2)
935 {
936 	struct ams *ams = iio_priv(indio_dev);
937 	unsigned int offset;
938 
939 	mutex_lock(&ams->lock);
940 
941 	/* Set temperature channel threshold to direct threshold */
942 	if (chan->type == IIO_TEMP) {
943 		offset = ams_get_alarm_offset(chan->scan_index, IIO_EV_DIR_FALLING);
944 
945 		if (chan->scan_index >= AMS_PS_SEQ_MAX)
946 			ams_pl_update_reg(ams, offset,
947 					  AMS_ALARM_THR_DIRECT_MASK,
948 					  AMS_ALARM_THR_DIRECT_MASK);
949 		else
950 			ams_ps_update_reg(ams, offset,
951 					  AMS_ALARM_THR_DIRECT_MASK,
952 					  AMS_ALARM_THR_DIRECT_MASK);
953 	}
954 
955 	offset = ams_get_alarm_offset(chan->scan_index, dir);
956 	if (chan->scan_index >= AMS_PS_SEQ_MAX)
957 		writel(val, ams->pl_base + offset);
958 	else
959 		writel(val, ams->ps_base + offset);
960 
961 	mutex_unlock(&ams->lock);
962 
963 	return 0;
964 }
965 
966 static void ams_handle_event(struct iio_dev *indio_dev, u32 event)
967 {
968 	const struct iio_chan_spec *chan;
969 
970 	chan = ams_event_to_channel(indio_dev, event);
971 
972 	if (chan->type == IIO_TEMP) {
973 		/*
974 		 * The temperature channel only supports over-temperature
975 		 * events.
976 		 */
977 		iio_push_event(indio_dev,
978 			       IIO_UNMOD_EVENT_CODE(chan->type, chan->channel,
979 						    IIO_EV_TYPE_THRESH,
980 						    IIO_EV_DIR_RISING),
981 			       iio_get_time_ns(indio_dev));
982 	} else {
983 		/*
984 		 * For other channels we don't know whether it is a upper or
985 		 * lower threshold event. Userspace will have to check the
986 		 * channel value if it wants to know.
987 		 */
988 		iio_push_event(indio_dev,
989 			       IIO_UNMOD_EVENT_CODE(chan->type, chan->channel,
990 						    IIO_EV_TYPE_THRESH,
991 						    IIO_EV_DIR_EITHER),
992 			       iio_get_time_ns(indio_dev));
993 	}
994 }
995 
996 static void ams_handle_events(struct iio_dev *indio_dev, unsigned long events)
997 {
998 	unsigned int bit;
999 
1000 	for_each_set_bit(bit, &events, AMS_NO_OF_ALARMS)
1001 		ams_handle_event(indio_dev, bit);
1002 }
1003 
1004 /**
1005  * ams_unmask_worker - ams alarm interrupt unmask worker
1006  * @work: work to be done
1007  *
1008  * The ZynqMP threshold interrupts are level sensitive. Since we can't make the
1009  * threshold condition go way from within the interrupt handler, this means as
1010  * soon as a threshold condition is present we would enter the interrupt handler
1011  * again and again. To work around this we mask all active threshold interrupts
1012  * in the interrupt handler and start a timer. In this timer we poll the
1013  * interrupt status and only if the interrupt is inactive we unmask it again.
1014  */
1015 static void ams_unmask_worker(struct work_struct *work)
1016 {
1017 	struct ams *ams = container_of(work, struct ams, ams_unmask_work.work);
1018 	unsigned int status, unmask;
1019 
1020 	spin_lock_irq(&ams->intr_lock);
1021 
1022 	status = readl(ams->base + AMS_ISR_0);
1023 
1024 	/* Clear those bits which are not active anymore */
1025 	unmask = (ams->current_masked_alarm ^ status) & ams->current_masked_alarm;
1026 
1027 	/* Clear status of disabled alarm */
1028 	unmask |= ams->intr_mask;
1029 
1030 	ams->current_masked_alarm &= status;
1031 
1032 	/* Also clear those which are masked out anyway */
1033 	ams->current_masked_alarm &= ~ams->intr_mask;
1034 
1035 	/* Clear the interrupts before we unmask them */
1036 	writel(unmask, ams->base + AMS_ISR_0);
1037 
1038 	ams_update_intrmask(ams, ~AMS_ALARM_MASK, ~AMS_ALARM_MASK);
1039 
1040 	spin_unlock_irq(&ams->intr_lock);
1041 
1042 	/* If still pending some alarm re-trigger the timer */
1043 	if (ams->current_masked_alarm)
1044 		schedule_delayed_work(&ams->ams_unmask_work,
1045 				      msecs_to_jiffies(AMS_UNMASK_TIMEOUT_MS));
1046 }
1047 
1048 static irqreturn_t ams_irq(int irq, void *data)
1049 {
1050 	struct iio_dev *indio_dev = data;
1051 	struct ams *ams = iio_priv(indio_dev);
1052 	u32 isr0;
1053 
1054 	spin_lock(&ams->intr_lock);
1055 
1056 	isr0 = readl(ams->base + AMS_ISR_0);
1057 
1058 	/* Only process alarms that are not masked */
1059 	isr0 &= ~((ams->intr_mask & AMS_ISR0_ALARM_MASK) | ams->current_masked_alarm);
1060 	if (!isr0) {
1061 		spin_unlock(&ams->intr_lock);
1062 		return IRQ_NONE;
1063 	}
1064 
1065 	/* Clear interrupt */
1066 	writel(isr0, ams->base + AMS_ISR_0);
1067 
1068 	/* Mask the alarm interrupts until cleared */
1069 	ams->current_masked_alarm |= isr0;
1070 	ams_update_intrmask(ams, ~AMS_ALARM_MASK, ~AMS_ALARM_MASK);
1071 
1072 	ams_handle_events(indio_dev, isr0);
1073 
1074 	schedule_delayed_work(&ams->ams_unmask_work,
1075 			      msecs_to_jiffies(AMS_UNMASK_TIMEOUT_MS));
1076 
1077 	spin_unlock(&ams->intr_lock);
1078 
1079 	return IRQ_HANDLED;
1080 }
1081 
1082 static const struct iio_event_spec ams_temp_events[] = {
1083 	{
1084 		.type = IIO_EV_TYPE_THRESH,
1085 		.dir = IIO_EV_DIR_RISING,
1086 		.mask_separate = BIT(IIO_EV_INFO_ENABLE) | BIT(IIO_EV_INFO_VALUE),
1087 	},
1088 };
1089 
1090 static const struct iio_event_spec ams_voltage_events[] = {
1091 	{
1092 		.type = IIO_EV_TYPE_THRESH,
1093 		.dir = IIO_EV_DIR_RISING,
1094 		.mask_separate = BIT(IIO_EV_INFO_VALUE),
1095 	},
1096 	{
1097 		.type = IIO_EV_TYPE_THRESH,
1098 		.dir = IIO_EV_DIR_FALLING,
1099 		.mask_separate = BIT(IIO_EV_INFO_VALUE),
1100 	},
1101 	{
1102 		.type = IIO_EV_TYPE_THRESH,
1103 		.dir = IIO_EV_DIR_EITHER,
1104 		.mask_separate = BIT(IIO_EV_INFO_ENABLE),
1105 	},
1106 };
1107 
1108 static const struct iio_chan_spec ams_ps_channels[] = {
1109 	AMS_PS_CHAN_TEMP(AMS_SEQ_TEMP, AMS_TEMP),
1110 	AMS_PS_CHAN_TEMP(AMS_SEQ_TEMP_REMOTE, AMS_TEMP_REMOTE),
1111 	AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY1, AMS_SUPPLY1),
1112 	AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY2, AMS_SUPPLY2),
1113 	AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY3, AMS_SUPPLY3),
1114 	AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY4, AMS_SUPPLY4),
1115 	AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY5, AMS_SUPPLY5),
1116 	AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY6, AMS_SUPPLY6),
1117 	AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY7, AMS_SUPPLY7),
1118 	AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY8, AMS_SUPPLY8),
1119 	AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY9, AMS_SUPPLY9),
1120 	AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY10, AMS_SUPPLY10),
1121 	AMS_PS_CHAN_VOLTAGE(AMS_SEQ_VCCAMS, AMS_VCCAMS),
1122 };
1123 
1124 static const struct iio_chan_spec ams_pl_channels[] = {
1125 	AMS_PL_CHAN_TEMP(AMS_SEQ_TEMP, AMS_TEMP),
1126 	AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY1, AMS_SUPPLY1, true),
1127 	AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY2, AMS_SUPPLY2, true),
1128 	AMS_PL_CHAN_VOLTAGE(AMS_SEQ_VREFP, AMS_VREFP, false),
1129 	AMS_PL_CHAN_VOLTAGE(AMS_SEQ_VREFN, AMS_VREFN, false),
1130 	AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY3, AMS_SUPPLY3, true),
1131 	AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY4, AMS_SUPPLY4, true),
1132 	AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY5, AMS_SUPPLY5, true),
1133 	AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY6, AMS_SUPPLY6, true),
1134 	AMS_PL_CHAN_VOLTAGE(AMS_SEQ_VCCAMS, AMS_VCCAMS, true),
1135 	AMS_PL_CHAN_VOLTAGE(AMS_SEQ_VP_VN, AMS_VP_VN, false),
1136 	AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY7, AMS_SUPPLY7, true),
1137 	AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY8, AMS_SUPPLY8, true),
1138 	AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY9, AMS_SUPPLY9, true),
1139 	AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY10, AMS_SUPPLY10, true),
1140 	AMS_PL_AUX_CHAN_VOLTAGE(0),
1141 	AMS_PL_AUX_CHAN_VOLTAGE(1),
1142 	AMS_PL_AUX_CHAN_VOLTAGE(2),
1143 	AMS_PL_AUX_CHAN_VOLTAGE(3),
1144 	AMS_PL_AUX_CHAN_VOLTAGE(4),
1145 	AMS_PL_AUX_CHAN_VOLTAGE(5),
1146 	AMS_PL_AUX_CHAN_VOLTAGE(6),
1147 	AMS_PL_AUX_CHAN_VOLTAGE(7),
1148 	AMS_PL_AUX_CHAN_VOLTAGE(8),
1149 	AMS_PL_AUX_CHAN_VOLTAGE(9),
1150 	AMS_PL_AUX_CHAN_VOLTAGE(10),
1151 	AMS_PL_AUX_CHAN_VOLTAGE(11),
1152 	AMS_PL_AUX_CHAN_VOLTAGE(12),
1153 	AMS_PL_AUX_CHAN_VOLTAGE(13),
1154 	AMS_PL_AUX_CHAN_VOLTAGE(14),
1155 	AMS_PL_AUX_CHAN_VOLTAGE(15),
1156 };
1157 
1158 static const struct iio_chan_spec ams_ctrl_channels[] = {
1159 	AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_VCC_PSPLL, AMS_VCC_PSPLL0),
1160 	AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_VCC_PSBATT, AMS_VCC_PSPLL3),
1161 	AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_VCCINT, AMS_VCCINT),
1162 	AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_VCCBRAM, AMS_VCCBRAM),
1163 	AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_VCCAUX, AMS_VCCAUX),
1164 	AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_PSDDRPLL, AMS_PSDDRPLL),
1165 	AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_INTDDR, AMS_PSINTFPDDR),
1166 };
1167 
1168 static int ams_get_ext_chan(struct fwnode_handle *chan_node,
1169 			    struct iio_chan_spec *channels, int num_channels)
1170 {
1171 	struct iio_chan_spec *chan;
1172 	struct fwnode_handle *child;
1173 	unsigned int reg, ext_chan;
1174 	int ret;
1175 
1176 	fwnode_for_each_child_node(chan_node, child) {
1177 		ret = fwnode_property_read_u32(child, "reg", &reg);
1178 		if (ret || reg > AMS_PL_MAX_EXT_CHANNEL + 30)
1179 			continue;
1180 
1181 		chan = &channels[num_channels];
1182 		ext_chan = reg + AMS_PL_MAX_FIXED_CHANNEL - 30;
1183 		memcpy(chan, &ams_pl_channels[ext_chan], sizeof(*channels));
1184 
1185 		if (fwnode_property_read_bool(child, "xlnx,bipolar"))
1186 			chan->scan_type.sign = 's';
1187 
1188 		num_channels++;
1189 	}
1190 
1191 	return num_channels;
1192 }
1193 
1194 static void ams_iounmap_ps(void *data)
1195 {
1196 	struct ams *ams = data;
1197 
1198 	iounmap(ams->ps_base);
1199 }
1200 
1201 static void ams_iounmap_pl(void *data)
1202 {
1203 	struct ams *ams = data;
1204 
1205 	iounmap(ams->pl_base);
1206 }
1207 
1208 static int ams_init_module(struct iio_dev *indio_dev,
1209 			   struct fwnode_handle *fwnode,
1210 			   struct iio_chan_spec *channels)
1211 {
1212 	struct device *dev = indio_dev->dev.parent;
1213 	struct ams *ams = iio_priv(indio_dev);
1214 	int num_channels = 0;
1215 	int ret;
1216 
1217 	if (fwnode_property_match_string(fwnode, "compatible",
1218 					 "xlnx,zynqmp-ams-ps") == 0) {
1219 		ams->ps_base = fwnode_iomap(fwnode, 0);
1220 		if (!ams->ps_base)
1221 			return -ENXIO;
1222 		ret = devm_add_action_or_reset(dev, ams_iounmap_ps, ams);
1223 		if (ret < 0)
1224 			return ret;
1225 
1226 		/* add PS channels to iio device channels */
1227 		memcpy(channels, ams_ps_channels, sizeof(ams_ps_channels));
1228 	} else if (fwnode_property_match_string(fwnode, "compatible",
1229 						"xlnx,zynqmp-ams-pl") == 0) {
1230 		ams->pl_base = fwnode_iomap(fwnode, 0);
1231 		if (!ams->pl_base)
1232 			return -ENXIO;
1233 
1234 		ret = devm_add_action_or_reset(dev, ams_iounmap_pl, ams);
1235 		if (ret < 0)
1236 			return ret;
1237 
1238 		/* Copy only first 10 fix channels */
1239 		memcpy(channels, ams_pl_channels, AMS_PL_MAX_FIXED_CHANNEL * sizeof(*channels));
1240 		num_channels += AMS_PL_MAX_FIXED_CHANNEL;
1241 		num_channels = ams_get_ext_chan(fwnode, channels,
1242 						num_channels);
1243 	} else if (fwnode_property_match_string(fwnode, "compatible",
1244 						"xlnx,zynqmp-ams") == 0) {
1245 		/* add AMS channels to iio device channels */
1246 		memcpy(channels, ams_ctrl_channels, sizeof(ams_ctrl_channels));
1247 		num_channels += ARRAY_SIZE(ams_ctrl_channels);
1248 	} else {
1249 		return -EINVAL;
1250 	}
1251 
1252 	return num_channels;
1253 }
1254 
1255 static int ams_parse_firmware(struct iio_dev *indio_dev)
1256 {
1257 	struct ams *ams = iio_priv(indio_dev);
1258 	struct iio_chan_spec *ams_channels, *dev_channels;
1259 	struct device *dev = indio_dev->dev.parent;
1260 	struct fwnode_handle *child = NULL;
1261 	struct fwnode_handle *fwnode = dev_fwnode(dev);
1262 	size_t ams_size, dev_size;
1263 	int ret, ch_cnt = 0, i, rising_off, falling_off;
1264 	unsigned int num_channels = 0;
1265 
1266 	ams_size = ARRAY_SIZE(ams_ps_channels) + ARRAY_SIZE(ams_pl_channels) +
1267 		ARRAY_SIZE(ams_ctrl_channels);
1268 
1269 	/* Initialize buffer for channel specification */
1270 	ams_channels = devm_kcalloc(dev, ams_size, sizeof(*ams_channels), GFP_KERNEL);
1271 	if (!ams_channels)
1272 		return -ENOMEM;
1273 
1274 	if (fwnode_device_is_available(fwnode)) {
1275 		ret = ams_init_module(indio_dev, fwnode, ams_channels);
1276 		if (ret < 0)
1277 			return ret;
1278 
1279 		num_channels += ret;
1280 	}
1281 
1282 	fwnode_for_each_child_node(fwnode, child) {
1283 		if (fwnode_device_is_available(child)) {
1284 			ret = ams_init_module(indio_dev, child, ams_channels + num_channels);
1285 			if (ret < 0) {
1286 				fwnode_handle_put(child);
1287 				return ret;
1288 			}
1289 
1290 			num_channels += ret;
1291 		}
1292 	}
1293 
1294 	for (i = 0; i < num_channels; i++) {
1295 		ams_channels[i].channel = ch_cnt++;
1296 
1297 		if (ams_channels[i].scan_index < AMS_CTRL_SEQ_BASE) {
1298 			/* set threshold to max and min for each channel */
1299 			falling_off =
1300 				ams_get_alarm_offset(ams_channels[i].scan_index,
1301 						     IIO_EV_DIR_FALLING);
1302 			rising_off =
1303 				ams_get_alarm_offset(ams_channels[i].scan_index,
1304 						     IIO_EV_DIR_RISING);
1305 			if (ams_channels[i].scan_index >= AMS_PS_SEQ_MAX) {
1306 				writel(AMS_ALARM_THR_MIN,
1307 				       ams->pl_base + falling_off);
1308 				writel(AMS_ALARM_THR_MAX,
1309 				       ams->pl_base + rising_off);
1310 			} else {
1311 				writel(AMS_ALARM_THR_MIN,
1312 				       ams->ps_base + falling_off);
1313 				writel(AMS_ALARM_THR_MAX,
1314 				       ams->ps_base + rising_off);
1315 			}
1316 		}
1317 	}
1318 
1319 	dev_size = array_size(sizeof(*dev_channels), num_channels);
1320 	if (dev_size == SIZE_MAX)
1321 		return -ENOMEM;
1322 
1323 	dev_channels = devm_krealloc(dev, ams_channels, dev_size, GFP_KERNEL);
1324 	if (!dev_channels)
1325 		ret = -ENOMEM;
1326 
1327 	indio_dev->channels = dev_channels;
1328 	indio_dev->num_channels = num_channels;
1329 
1330 	return 0;
1331 }
1332 
1333 static const struct iio_info iio_ams_info = {
1334 	.read_raw = &ams_read_raw,
1335 	.read_event_config = &ams_read_event_config,
1336 	.write_event_config = &ams_write_event_config,
1337 	.read_event_value = &ams_read_event_value,
1338 	.write_event_value = &ams_write_event_value,
1339 };
1340 
1341 static const struct of_device_id ams_of_match_table[] = {
1342 	{ .compatible = "xlnx,zynqmp-ams" },
1343 	{ }
1344 };
1345 MODULE_DEVICE_TABLE(of, ams_of_match_table);
1346 
1347 static void ams_clk_disable_unprepare(void *data)
1348 {
1349 	clk_disable_unprepare(data);
1350 }
1351 
1352 static int ams_probe(struct platform_device *pdev)
1353 {
1354 	struct iio_dev *indio_dev;
1355 	struct ams *ams;
1356 	int ret;
1357 	int irq;
1358 
1359 	indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*ams));
1360 	if (!indio_dev)
1361 		return -ENOMEM;
1362 
1363 	ams = iio_priv(indio_dev);
1364 	mutex_init(&ams->lock);
1365 	spin_lock_init(&ams->intr_lock);
1366 
1367 	indio_dev->name = "xilinx-ams";
1368 
1369 	indio_dev->info = &iio_ams_info;
1370 	indio_dev->modes = INDIO_DIRECT_MODE;
1371 
1372 	ams->base = devm_platform_ioremap_resource(pdev, 0);
1373 	if (IS_ERR(ams->base))
1374 		return PTR_ERR(ams->base);
1375 
1376 	ams->clk = devm_clk_get(&pdev->dev, NULL);
1377 	if (IS_ERR(ams->clk))
1378 		return PTR_ERR(ams->clk);
1379 
1380 	ret = clk_prepare_enable(ams->clk);
1381 	if (ret < 0)
1382 		return ret;
1383 
1384 	ret = devm_add_action_or_reset(&pdev->dev, ams_clk_disable_unprepare, ams->clk);
1385 	if (ret < 0)
1386 		return ret;
1387 
1388 	ret = devm_delayed_work_autocancel(&pdev->dev, &ams->ams_unmask_work,
1389 					   ams_unmask_worker);
1390 	if (ret < 0)
1391 		return ret;
1392 
1393 	ret = ams_parse_firmware(indio_dev);
1394 	if (ret)
1395 		return dev_err_probe(&pdev->dev, ret, "failure in parsing DT\n");
1396 
1397 	ret = ams_init_device(ams);
1398 	if (ret)
1399 		return dev_err_probe(&pdev->dev, ret, "failed to initialize AMS\n");
1400 
1401 	ams_enable_channel_sequence(indio_dev);
1402 
1403 	irq = platform_get_irq(pdev, 0);
1404 	if (irq < 0)
1405 		return ret;
1406 
1407 	ret = devm_request_irq(&pdev->dev, irq, &ams_irq, 0, "ams-irq",
1408 			       indio_dev);
1409 	if (ret < 0)
1410 		return dev_err_probe(&pdev->dev, ret, "failed to register interrupt\n");
1411 
1412 	platform_set_drvdata(pdev, indio_dev);
1413 
1414 	return devm_iio_device_register(&pdev->dev, indio_dev);
1415 }
1416 
1417 static int __maybe_unused ams_suspend(struct device *dev)
1418 {
1419 	struct ams *ams = iio_priv(dev_get_drvdata(dev));
1420 
1421 	clk_disable_unprepare(ams->clk);
1422 
1423 	return 0;
1424 }
1425 
1426 static int __maybe_unused ams_resume(struct device *dev)
1427 {
1428 	struct ams *ams = iio_priv(dev_get_drvdata(dev));
1429 
1430 	return clk_prepare_enable(ams->clk);
1431 }
1432 
1433 static SIMPLE_DEV_PM_OPS(ams_pm_ops, ams_suspend, ams_resume);
1434 
1435 static struct platform_driver ams_driver = {
1436 	.probe = ams_probe,
1437 	.driver = {
1438 		.name = "xilinx-ams",
1439 		.pm = &ams_pm_ops,
1440 		.of_match_table = ams_of_match_table,
1441 	},
1442 };
1443 module_platform_driver(ams_driver);
1444 
1445 MODULE_LICENSE("GPL v2");
1446 MODULE_AUTHOR("Xilinx, Inc.");
1447