xref: /openbmc/linux/drivers/iio/adc/xilinx-ams.c (revision 359745d7)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Xilinx AMS driver
4  *
5  *  Copyright (C) 2021 Xilinx, Inc.
6  *
7  *  Manish Narani <mnarani@xilinx.com>
8  *  Rajnikant Bhojani <rajnikant.bhojani@xilinx.com>
9  */
10 
11 #include <linux/bits.h>
12 #include <linux/bitfield.h>
13 #include <linux/clk.h>
14 #include <linux/delay.h>
15 #include <linux/interrupt.h>
16 #include <linux/io.h>
17 #include <linux/iopoll.h>
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/mod_devicetable.h>
21 #include <linux/overflow.h>
22 #include <linux/platform_device.h>
23 #include <linux/property.h>
24 #include <linux/slab.h>
25 
26 #include <linux/iio/events.h>
27 #include <linux/iio/iio.h>
28 
29 /* AMS registers definitions */
30 #define AMS_ISR_0			0x010
31 #define AMS_ISR_1			0x014
32 #define AMS_IER_0			0x020
33 #define AMS_IER_1			0x024
34 #define AMS_IDR_0			0x028
35 #define AMS_IDR_1			0x02C
36 #define AMS_PS_CSTS			0x040
37 #define AMS_PL_CSTS			0x044
38 
39 #define AMS_VCC_PSPLL0			0x060
40 #define AMS_VCC_PSPLL3			0x06C
41 #define AMS_VCCINT			0x078
42 #define AMS_VCCBRAM			0x07C
43 #define AMS_VCCAUX			0x080
44 #define AMS_PSDDRPLL			0x084
45 #define AMS_PSINTFPDDR			0x09C
46 
47 #define AMS_VCC_PSPLL0_CH		48
48 #define AMS_VCC_PSPLL3_CH		51
49 #define AMS_VCCINT_CH			54
50 #define AMS_VCCBRAM_CH			55
51 #define AMS_VCCAUX_CH			56
52 #define AMS_PSDDRPLL_CH			57
53 #define AMS_PSINTFPDDR_CH		63
54 
55 #define AMS_REG_CONFIG0			0x100
56 #define AMS_REG_CONFIG1			0x104
57 #define AMS_REG_CONFIG3			0x10C
58 #define AMS_REG_CONFIG4			0x110
59 #define AMS_REG_SEQ_CH0			0x120
60 #define AMS_REG_SEQ_CH1			0x124
61 #define AMS_REG_SEQ_CH2			0x118
62 
63 #define AMS_VUSER0_MASK			BIT(0)
64 #define AMS_VUSER1_MASK			BIT(1)
65 #define AMS_VUSER2_MASK			BIT(2)
66 #define AMS_VUSER3_MASK			BIT(3)
67 
68 #define AMS_TEMP			0x000
69 #define AMS_SUPPLY1			0x004
70 #define AMS_SUPPLY2			0x008
71 #define AMS_VP_VN			0x00C
72 #define AMS_VREFP			0x010
73 #define AMS_VREFN			0x014
74 #define AMS_SUPPLY3			0x018
75 #define AMS_SUPPLY4			0x034
76 #define AMS_SUPPLY5			0x038
77 #define AMS_SUPPLY6			0x03C
78 #define AMS_SUPPLY7			0x200
79 #define AMS_SUPPLY8			0x204
80 #define AMS_SUPPLY9			0x208
81 #define AMS_SUPPLY10			0x20C
82 #define AMS_VCCAMS			0x210
83 #define AMS_TEMP_REMOTE			0x214
84 
85 #define AMS_REG_VAUX(x)			(0x40 + 4 * (x))
86 
87 #define AMS_PS_RESET_VALUE		0xFFFF
88 #define AMS_PL_RESET_VALUE		0xFFFF
89 
90 #define AMS_CONF0_CHANNEL_NUM_MASK	GENMASK(6, 0)
91 
92 #define AMS_CONF1_SEQ_MASK		GENMASK(15, 12)
93 #define AMS_CONF1_SEQ_DEFAULT		FIELD_PREP(AMS_CONF1_SEQ_MASK, 0)
94 #define AMS_CONF1_SEQ_CONTINUOUS	FIELD_PREP(AMS_CONF1_SEQ_MASK, 1)
95 #define AMS_CONF1_SEQ_SINGLE_CHANNEL	FIELD_PREP(AMS_CONF1_SEQ_MASK, 2)
96 
97 #define AMS_REG_SEQ0_MASK		GENMASK(15, 0)
98 #define AMS_REG_SEQ2_MASK		GENMASK(21, 16)
99 #define AMS_REG_SEQ1_MASK		GENMASK_ULL(37, 22)
100 
101 #define AMS_PS_SEQ_MASK			GENMASK(21, 0)
102 #define AMS_PL_SEQ_MASK			GENMASK_ULL(59, 22)
103 
104 #define AMS_ALARM_TEMP			0x140
105 #define AMS_ALARM_SUPPLY1		0x144
106 #define AMS_ALARM_SUPPLY2		0x148
107 #define AMS_ALARM_SUPPLY3		0x160
108 #define AMS_ALARM_SUPPLY4		0x164
109 #define AMS_ALARM_SUPPLY5		0x168
110 #define AMS_ALARM_SUPPLY6		0x16C
111 #define AMS_ALARM_SUPPLY7		0x180
112 #define AMS_ALARM_SUPPLY8		0x184
113 #define AMS_ALARM_SUPPLY9		0x188
114 #define AMS_ALARM_SUPPLY10		0x18C
115 #define AMS_ALARM_VCCAMS		0x190
116 #define AMS_ALARM_TEMP_REMOTE		0x194
117 #define AMS_ALARM_THRESHOLD_OFF_10	0x10
118 #define AMS_ALARM_THRESHOLD_OFF_20	0x20
119 
120 #define AMS_ALARM_THR_DIRECT_MASK	BIT(1)
121 #define AMS_ALARM_THR_MIN		0x0000
122 #define AMS_ALARM_THR_MAX		(BIT(16) - 1)
123 
124 #define AMS_ALARM_MASK			GENMASK_ULL(63, 0)
125 #define AMS_NO_OF_ALARMS		32
126 #define AMS_PL_ALARM_START		16
127 #define AMS_PL_ALARM_MASK		GENMASK(31, 16)
128 #define AMS_ISR0_ALARM_MASK		GENMASK(31, 0)
129 #define AMS_ISR1_ALARM_MASK		(GENMASK(31, 29) | GENMASK(4, 0))
130 #define AMS_ISR1_EOC_MASK		BIT(3)
131 #define AMS_ISR1_INTR_MASK		GENMASK_ULL(63, 32)
132 #define AMS_ISR0_ALARM_2_TO_0_MASK	GENMASK(2, 0)
133 #define AMS_ISR0_ALARM_6_TO_3_MASK	GENMASK(6, 3)
134 #define AMS_ISR0_ALARM_12_TO_7_MASK	GENMASK(13, 8)
135 #define AMS_CONF1_ALARM_2_TO_0_MASK	GENMASK(3, 1)
136 #define AMS_CONF1_ALARM_6_TO_3_MASK	GENMASK(11, 8)
137 #define AMS_CONF1_ALARM_12_TO_7_MASK	GENMASK(5, 0)
138 #define AMS_REGCFG1_ALARM_MASK  \
139 	(AMS_CONF1_ALARM_2_TO_0_MASK | AMS_CONF1_ALARM_6_TO_3_MASK | BIT(0))
140 #define AMS_REGCFG3_ALARM_MASK		AMS_CONF1_ALARM_12_TO_7_MASK
141 
142 #define AMS_PS_CSTS_PS_READY		(BIT(27) | BIT(16))
143 #define AMS_PL_CSTS_ACCESS_MASK		BIT(1)
144 
145 #define AMS_PL_MAX_FIXED_CHANNEL	10
146 #define AMS_PL_MAX_EXT_CHANNEL		20
147 
148 #define AMS_INIT_POLL_TIME_US		200
149 #define AMS_INIT_TIMEOUT_US		10000
150 #define AMS_UNMASK_TIMEOUT_MS		500
151 
152 /*
153  * Following scale and offset value is derived from
154  * UG580 (v1.7) December 20, 2016
155  */
156 #define AMS_SUPPLY_SCALE_1VOLT_mV		1000
157 #define AMS_SUPPLY_SCALE_3VOLT_mV		3000
158 #define AMS_SUPPLY_SCALE_6VOLT_mV		6000
159 #define AMS_SUPPLY_SCALE_DIV_BIT	16
160 
161 #define AMS_TEMP_SCALE			509314
162 #define AMS_TEMP_SCALE_DIV_BIT		16
163 #define AMS_TEMP_OFFSET			-((280230LL << 16) / 509314)
164 
165 enum ams_alarm_bit {
166 	AMS_ALARM_BIT_TEMP = 0,
167 	AMS_ALARM_BIT_SUPPLY1 = 1,
168 	AMS_ALARM_BIT_SUPPLY2 = 2,
169 	AMS_ALARM_BIT_SUPPLY3 = 3,
170 	AMS_ALARM_BIT_SUPPLY4 = 4,
171 	AMS_ALARM_BIT_SUPPLY5 = 5,
172 	AMS_ALARM_BIT_SUPPLY6 = 6,
173 	AMS_ALARM_BIT_RESERVED = 7,
174 	AMS_ALARM_BIT_SUPPLY7 = 8,
175 	AMS_ALARM_BIT_SUPPLY8 = 9,
176 	AMS_ALARM_BIT_SUPPLY9 = 10,
177 	AMS_ALARM_BIT_SUPPLY10 = 11,
178 	AMS_ALARM_BIT_VCCAMS = 12,
179 	AMS_ALARM_BIT_TEMP_REMOTE = 13,
180 };
181 
182 enum ams_seq {
183 	AMS_SEQ_VCC_PSPLL = 0,
184 	AMS_SEQ_VCC_PSBATT = 1,
185 	AMS_SEQ_VCCINT = 2,
186 	AMS_SEQ_VCCBRAM = 3,
187 	AMS_SEQ_VCCAUX = 4,
188 	AMS_SEQ_PSDDRPLL = 5,
189 	AMS_SEQ_INTDDR = 6,
190 };
191 
192 enum ams_ps_pl_seq {
193 	AMS_SEQ_CALIB = 0,
194 	AMS_SEQ_RSVD_1 = 1,
195 	AMS_SEQ_RSVD_2 = 2,
196 	AMS_SEQ_TEST = 3,
197 	AMS_SEQ_RSVD_4 = 4,
198 	AMS_SEQ_SUPPLY4 = 5,
199 	AMS_SEQ_SUPPLY5 = 6,
200 	AMS_SEQ_SUPPLY6 = 7,
201 	AMS_SEQ_TEMP = 8,
202 	AMS_SEQ_SUPPLY2 = 9,
203 	AMS_SEQ_SUPPLY1 = 10,
204 	AMS_SEQ_VP_VN = 11,
205 	AMS_SEQ_VREFP = 12,
206 	AMS_SEQ_VREFN = 13,
207 	AMS_SEQ_SUPPLY3 = 14,
208 	AMS_SEQ_CURRENT_MON = 15,
209 	AMS_SEQ_SUPPLY7 = 16,
210 	AMS_SEQ_SUPPLY8 = 17,
211 	AMS_SEQ_SUPPLY9 = 18,
212 	AMS_SEQ_SUPPLY10 = 19,
213 	AMS_SEQ_VCCAMS = 20,
214 	AMS_SEQ_TEMP_REMOTE = 21,
215 	AMS_SEQ_MAX = 22
216 };
217 
218 #define AMS_PS_SEQ_MAX		AMS_SEQ_MAX
219 #define AMS_SEQ(x)		(AMS_SEQ_MAX + (x))
220 #define PS_SEQ(x)		(x)
221 #define PL_SEQ(x)		(AMS_PS_SEQ_MAX + (x))
222 #define AMS_CTRL_SEQ_BASE	(AMS_PS_SEQ_MAX * 3)
223 
224 #define AMS_CHAN_TEMP(_scan_index, _addr) { \
225 	.type = IIO_TEMP, \
226 	.indexed = 1, \
227 	.address = (_addr), \
228 	.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
229 		BIT(IIO_CHAN_INFO_SCALE) | \
230 		BIT(IIO_CHAN_INFO_OFFSET), \
231 	.event_spec = ams_temp_events, \
232 	.scan_index = _scan_index, \
233 	.num_event_specs = ARRAY_SIZE(ams_temp_events), \
234 }
235 
236 #define AMS_CHAN_VOLTAGE(_scan_index, _addr, _alarm) { \
237 	.type = IIO_VOLTAGE, \
238 	.indexed = 1, \
239 	.address = (_addr), \
240 	.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
241 		BIT(IIO_CHAN_INFO_SCALE), \
242 	.event_spec = (_alarm) ? ams_voltage_events : NULL, \
243 	.scan_index = _scan_index, \
244 	.num_event_specs = (_alarm) ? ARRAY_SIZE(ams_voltage_events) : 0, \
245 }
246 
247 #define AMS_PS_CHAN_TEMP(_scan_index, _addr) \
248 	AMS_CHAN_TEMP(PS_SEQ(_scan_index), _addr)
249 #define AMS_PS_CHAN_VOLTAGE(_scan_index, _addr) \
250 	AMS_CHAN_VOLTAGE(PS_SEQ(_scan_index), _addr, true)
251 
252 #define AMS_PL_CHAN_TEMP(_scan_index, _addr) \
253 	AMS_CHAN_TEMP(PL_SEQ(_scan_index), _addr)
254 #define AMS_PL_CHAN_VOLTAGE(_scan_index, _addr, _alarm) \
255 	AMS_CHAN_VOLTAGE(PL_SEQ(_scan_index), _addr, _alarm)
256 #define AMS_PL_AUX_CHAN_VOLTAGE(_auxno) \
257 	AMS_CHAN_VOLTAGE(PL_SEQ(AMS_SEQ(_auxno)), AMS_REG_VAUX(_auxno), false)
258 #define AMS_CTRL_CHAN_VOLTAGE(_scan_index, _addr) \
259 	AMS_CHAN_VOLTAGE(PL_SEQ(AMS_SEQ(AMS_SEQ(_scan_index))), _addr, false)
260 
261 /**
262  * struct ams - This structure contains necessary state for xilinx-ams to operate
263  * @base: physical base address of device
264  * @ps_base: physical base address of PS device
265  * @pl_base: physical base address of PL device
266  * @clk: clocks associated with the device
267  * @dev: pointer to device struct
268  * @lock: to handle multiple user interaction
269  * @intr_lock: to protect interrupt mask values
270  * @alarm_mask: alarm configuration
271  * @current_masked_alarm: currently masked due to alarm
272  * @intr_mask: interrupt configuration
273  * @ams_unmask_work: re-enables event once the event condition disappears
274  *
275  */
276 struct ams {
277 	void __iomem *base;
278 	void __iomem *ps_base;
279 	void __iomem *pl_base;
280 	struct clk *clk;
281 	struct device *dev;
282 	struct mutex lock;
283 	spinlock_t intr_lock;
284 	unsigned int alarm_mask;
285 	unsigned int current_masked_alarm;
286 	u64 intr_mask;
287 	struct delayed_work ams_unmask_work;
288 };
289 
290 static inline void ams_ps_update_reg(struct ams *ams, unsigned int offset,
291 				     u32 mask, u32 data)
292 {
293 	u32 val, regval;
294 
295 	val = readl(ams->ps_base + offset);
296 	regval = (val & ~mask) | (data & mask);
297 	writel(regval, ams->ps_base + offset);
298 }
299 
300 static inline void ams_pl_update_reg(struct ams *ams, unsigned int offset,
301 				     u32 mask, u32 data)
302 {
303 	u32 val, regval;
304 
305 	val = readl(ams->pl_base + offset);
306 	regval = (val & ~mask) | (data & mask);
307 	writel(regval, ams->pl_base + offset);
308 }
309 
310 static void ams_update_intrmask(struct ams *ams, u64 mask, u64 val)
311 {
312 	u32 regval;
313 
314 	ams->intr_mask = (ams->intr_mask & ~mask) | (val & mask);
315 
316 	regval = ~(ams->intr_mask | ams->current_masked_alarm);
317 	writel(regval, ams->base + AMS_IER_0);
318 
319 	regval = ~(FIELD_GET(AMS_ISR1_INTR_MASK, ams->intr_mask));
320 	writel(regval, ams->base + AMS_IER_1);
321 
322 	regval = ams->intr_mask | ams->current_masked_alarm;
323 	writel(regval, ams->base + AMS_IDR_0);
324 
325 	regval = FIELD_GET(AMS_ISR1_INTR_MASK, ams->intr_mask);
326 	writel(regval, ams->base + AMS_IDR_1);
327 }
328 
329 static void ams_disable_all_alarms(struct ams *ams)
330 {
331 	/* disable PS module alarm */
332 	if (ams->ps_base) {
333 		ams_ps_update_reg(ams, AMS_REG_CONFIG1, AMS_REGCFG1_ALARM_MASK,
334 				  AMS_REGCFG1_ALARM_MASK);
335 		ams_ps_update_reg(ams, AMS_REG_CONFIG3, AMS_REGCFG3_ALARM_MASK,
336 				  AMS_REGCFG3_ALARM_MASK);
337 	}
338 
339 	/* disable PL module alarm */
340 	if (ams->pl_base) {
341 		ams_pl_update_reg(ams, AMS_REG_CONFIG1, AMS_REGCFG1_ALARM_MASK,
342 				  AMS_REGCFG1_ALARM_MASK);
343 		ams_pl_update_reg(ams, AMS_REG_CONFIG3, AMS_REGCFG3_ALARM_MASK,
344 				  AMS_REGCFG3_ALARM_MASK);
345 	}
346 }
347 
348 static void ams_update_ps_alarm(struct ams *ams, unsigned long alarm_mask)
349 {
350 	u32 cfg;
351 	u32 val;
352 
353 	val = FIELD_GET(AMS_ISR0_ALARM_2_TO_0_MASK, alarm_mask);
354 	cfg = ~(FIELD_PREP(AMS_CONF1_ALARM_2_TO_0_MASK, val));
355 
356 	val = FIELD_GET(AMS_ISR0_ALARM_6_TO_3_MASK, alarm_mask);
357 	cfg &= ~(FIELD_PREP(AMS_CONF1_ALARM_6_TO_3_MASK, val));
358 
359 	ams_ps_update_reg(ams, AMS_REG_CONFIG1, AMS_REGCFG1_ALARM_MASK, cfg);
360 
361 	val = FIELD_GET(AMS_ISR0_ALARM_12_TO_7_MASK, alarm_mask);
362 	cfg = ~(FIELD_PREP(AMS_CONF1_ALARM_12_TO_7_MASK, val));
363 	ams_ps_update_reg(ams, AMS_REG_CONFIG3, AMS_REGCFG3_ALARM_MASK, cfg);
364 }
365 
366 static void ams_update_pl_alarm(struct ams *ams, unsigned long alarm_mask)
367 {
368 	unsigned long pl_alarm_mask;
369 	u32 cfg;
370 	u32 val;
371 
372 	pl_alarm_mask = FIELD_GET(AMS_PL_ALARM_MASK, alarm_mask);
373 
374 	val = FIELD_GET(AMS_ISR0_ALARM_2_TO_0_MASK, pl_alarm_mask);
375 	cfg = ~(FIELD_PREP(AMS_CONF1_ALARM_2_TO_0_MASK, val));
376 
377 	val = FIELD_GET(AMS_ISR0_ALARM_6_TO_3_MASK, pl_alarm_mask);
378 	cfg &= ~(FIELD_PREP(AMS_CONF1_ALARM_6_TO_3_MASK, val));
379 
380 	ams_pl_update_reg(ams, AMS_REG_CONFIG1, AMS_REGCFG1_ALARM_MASK, cfg);
381 
382 	val = FIELD_GET(AMS_ISR0_ALARM_12_TO_7_MASK, pl_alarm_mask);
383 	cfg = ~(FIELD_PREP(AMS_CONF1_ALARM_12_TO_7_MASK, val));
384 	ams_pl_update_reg(ams, AMS_REG_CONFIG3, AMS_REGCFG3_ALARM_MASK, cfg);
385 }
386 
387 static void ams_update_alarm(struct ams *ams, unsigned long alarm_mask)
388 {
389 	unsigned long flags;
390 
391 	if (ams->ps_base)
392 		ams_update_ps_alarm(ams, alarm_mask);
393 
394 	if (ams->pl_base)
395 		ams_update_pl_alarm(ams, alarm_mask);
396 
397 	spin_lock_irqsave(&ams->intr_lock, flags);
398 	ams_update_intrmask(ams, AMS_ISR0_ALARM_MASK, ~alarm_mask);
399 	spin_unlock_irqrestore(&ams->intr_lock, flags);
400 }
401 
402 static void ams_enable_channel_sequence(struct iio_dev *indio_dev)
403 {
404 	struct ams *ams = iio_priv(indio_dev);
405 	unsigned long long scan_mask;
406 	int i;
407 	u32 regval;
408 
409 	/*
410 	 * Enable channel sequence. First 22 bits of scan_mask represent
411 	 * PS channels, and next remaining bits represent PL channels.
412 	 */
413 
414 	/* Run calibration of PS & PL as part of the sequence */
415 	scan_mask = BIT(0) | BIT(AMS_PS_SEQ_MAX);
416 	for (i = 0; i < indio_dev->num_channels; i++)
417 		scan_mask |= BIT_ULL(indio_dev->channels[i].scan_index);
418 
419 	if (ams->ps_base) {
420 		/* put sysmon in a soft reset to change the sequence */
421 		ams_ps_update_reg(ams, AMS_REG_CONFIG1, AMS_CONF1_SEQ_MASK,
422 				  AMS_CONF1_SEQ_DEFAULT);
423 
424 		/* configure basic channels */
425 		regval = FIELD_GET(AMS_REG_SEQ0_MASK, scan_mask);
426 		writel(regval, ams->ps_base + AMS_REG_SEQ_CH0);
427 
428 		regval = FIELD_GET(AMS_REG_SEQ2_MASK, scan_mask);
429 		writel(regval, ams->ps_base + AMS_REG_SEQ_CH2);
430 
431 		/* set continuous sequence mode */
432 		ams_ps_update_reg(ams, AMS_REG_CONFIG1, AMS_CONF1_SEQ_MASK,
433 				  AMS_CONF1_SEQ_CONTINUOUS);
434 	}
435 
436 	if (ams->pl_base) {
437 		/* put sysmon in a soft reset to change the sequence */
438 		ams_pl_update_reg(ams, AMS_REG_CONFIG1, AMS_CONF1_SEQ_MASK,
439 				  AMS_CONF1_SEQ_DEFAULT);
440 
441 		/* configure basic channels */
442 		scan_mask = FIELD_GET(AMS_PL_SEQ_MASK, scan_mask);
443 
444 		regval = FIELD_GET(AMS_REG_SEQ0_MASK, scan_mask);
445 		writel(regval, ams->pl_base + AMS_REG_SEQ_CH0);
446 
447 		regval = FIELD_GET(AMS_REG_SEQ1_MASK, scan_mask);
448 		writel(regval, ams->pl_base + AMS_REG_SEQ_CH1);
449 
450 		regval = FIELD_GET(AMS_REG_SEQ2_MASK, scan_mask);
451 		writel(regval, ams->pl_base + AMS_REG_SEQ_CH2);
452 
453 		/* set continuous sequence mode */
454 		ams_pl_update_reg(ams, AMS_REG_CONFIG1, AMS_CONF1_SEQ_MASK,
455 				  AMS_CONF1_SEQ_CONTINUOUS);
456 	}
457 }
458 
459 static int ams_init_device(struct ams *ams)
460 {
461 	u32 expect = AMS_PS_CSTS_PS_READY;
462 	u32 reg, value;
463 	int ret;
464 
465 	/* reset AMS */
466 	if (ams->ps_base) {
467 		writel(AMS_PS_RESET_VALUE, ams->ps_base + AMS_VP_VN);
468 
469 		ret = readl_poll_timeout(ams->base + AMS_PS_CSTS, reg, (reg & expect),
470 					 AMS_INIT_POLL_TIME_US, AMS_INIT_TIMEOUT_US);
471 		if (ret)
472 			return ret;
473 
474 		/* put sysmon in a default state */
475 		ams_ps_update_reg(ams, AMS_REG_CONFIG1, AMS_CONF1_SEQ_MASK,
476 				  AMS_CONF1_SEQ_DEFAULT);
477 	}
478 
479 	if (ams->pl_base) {
480 		value = readl(ams->base + AMS_PL_CSTS);
481 		if (value == 0)
482 			return 0;
483 
484 		writel(AMS_PL_RESET_VALUE, ams->pl_base + AMS_VP_VN);
485 
486 		/* put sysmon in a default state */
487 		ams_pl_update_reg(ams, AMS_REG_CONFIG1, AMS_CONF1_SEQ_MASK,
488 				  AMS_CONF1_SEQ_DEFAULT);
489 	}
490 
491 	ams_disable_all_alarms(ams);
492 
493 	/* Disable interrupt */
494 	ams_update_intrmask(ams, AMS_ALARM_MASK, AMS_ALARM_MASK);
495 
496 	/* Clear any pending interrupt */
497 	writel(AMS_ISR0_ALARM_MASK, ams->base + AMS_ISR_0);
498 	writel(AMS_ISR1_ALARM_MASK, ams->base + AMS_ISR_1);
499 
500 	return 0;
501 }
502 
503 static int ams_enable_single_channel(struct ams *ams, unsigned int offset)
504 {
505 	u8 channel_num;
506 
507 	switch (offset) {
508 	case AMS_VCC_PSPLL0:
509 		channel_num = AMS_VCC_PSPLL0_CH;
510 		break;
511 	case AMS_VCC_PSPLL3:
512 		channel_num = AMS_VCC_PSPLL3_CH;
513 		break;
514 	case AMS_VCCINT:
515 		channel_num = AMS_VCCINT_CH;
516 		break;
517 	case AMS_VCCBRAM:
518 		channel_num = AMS_VCCBRAM_CH;
519 		break;
520 	case AMS_VCCAUX:
521 		channel_num = AMS_VCCAUX_CH;
522 		break;
523 	case AMS_PSDDRPLL:
524 		channel_num = AMS_PSDDRPLL_CH;
525 		break;
526 	case AMS_PSINTFPDDR:
527 		channel_num = AMS_PSINTFPDDR_CH;
528 		break;
529 	default:
530 		return -EINVAL;
531 	}
532 
533 	/* set single channel, sequencer off mode */
534 	ams_ps_update_reg(ams, AMS_REG_CONFIG1, AMS_CONF1_SEQ_MASK,
535 			  AMS_CONF1_SEQ_SINGLE_CHANNEL);
536 
537 	/* write the channel number */
538 	ams_ps_update_reg(ams, AMS_REG_CONFIG0, AMS_CONF0_CHANNEL_NUM_MASK,
539 			  channel_num);
540 
541 	return 0;
542 }
543 
544 static int ams_read_vcc_reg(struct ams *ams, unsigned int offset, u32 *data)
545 {
546 	u32 expect = AMS_ISR1_EOC_MASK;
547 	u32 reg;
548 	int ret;
549 
550 	ret = ams_enable_single_channel(ams, offset);
551 	if (ret)
552 		return ret;
553 
554 	ret = readl_poll_timeout(ams->base + AMS_ISR_1, reg, (reg & expect),
555 				 AMS_INIT_POLL_TIME_US, AMS_INIT_TIMEOUT_US);
556 	if (ret)
557 		return ret;
558 
559 	*data = readl(ams->base + offset);
560 
561 	return 0;
562 }
563 
564 static int ams_get_ps_scale(int address)
565 {
566 	int val;
567 
568 	switch (address) {
569 	case AMS_SUPPLY1:
570 	case AMS_SUPPLY2:
571 	case AMS_SUPPLY3:
572 	case AMS_SUPPLY4:
573 	case AMS_SUPPLY9:
574 	case AMS_SUPPLY10:
575 	case AMS_VCCAMS:
576 		val = AMS_SUPPLY_SCALE_3VOLT_mV;
577 		break;
578 	case AMS_SUPPLY5:
579 	case AMS_SUPPLY6:
580 	case AMS_SUPPLY7:
581 	case AMS_SUPPLY8:
582 		val = AMS_SUPPLY_SCALE_6VOLT_mV;
583 		break;
584 	default:
585 		val = AMS_SUPPLY_SCALE_1VOLT_mV;
586 		break;
587 	}
588 
589 	return val;
590 }
591 
592 static int ams_get_pl_scale(struct ams *ams, int address)
593 {
594 	int val, regval;
595 
596 	switch (address) {
597 	case AMS_SUPPLY1:
598 	case AMS_SUPPLY2:
599 	case AMS_SUPPLY3:
600 	case AMS_SUPPLY4:
601 	case AMS_SUPPLY5:
602 	case AMS_SUPPLY6:
603 	case AMS_VCCAMS:
604 	case AMS_VREFP:
605 	case AMS_VREFN:
606 		val = AMS_SUPPLY_SCALE_3VOLT_mV;
607 		break;
608 	case AMS_SUPPLY7:
609 		regval = readl(ams->pl_base + AMS_REG_CONFIG4);
610 		if (FIELD_GET(AMS_VUSER0_MASK, regval))
611 			val = AMS_SUPPLY_SCALE_6VOLT_mV;
612 		else
613 			val = AMS_SUPPLY_SCALE_3VOLT_mV;
614 		break;
615 	case AMS_SUPPLY8:
616 		regval = readl(ams->pl_base + AMS_REG_CONFIG4);
617 		if (FIELD_GET(AMS_VUSER1_MASK, regval))
618 			val = AMS_SUPPLY_SCALE_6VOLT_mV;
619 		else
620 			val = AMS_SUPPLY_SCALE_3VOLT_mV;
621 		break;
622 	case AMS_SUPPLY9:
623 		regval = readl(ams->pl_base + AMS_REG_CONFIG4);
624 		if (FIELD_GET(AMS_VUSER2_MASK, regval))
625 			val = AMS_SUPPLY_SCALE_6VOLT_mV;
626 		else
627 			val = AMS_SUPPLY_SCALE_3VOLT_mV;
628 		break;
629 	case AMS_SUPPLY10:
630 		regval = readl(ams->pl_base + AMS_REG_CONFIG4);
631 		if (FIELD_GET(AMS_VUSER3_MASK, regval))
632 			val = AMS_SUPPLY_SCALE_6VOLT_mV;
633 		else
634 			val = AMS_SUPPLY_SCALE_3VOLT_mV;
635 		break;
636 	case AMS_VP_VN:
637 	case AMS_REG_VAUX(0) ... AMS_REG_VAUX(15):
638 		val = AMS_SUPPLY_SCALE_1VOLT_mV;
639 		break;
640 	default:
641 		val = AMS_SUPPLY_SCALE_1VOLT_mV;
642 		break;
643 	}
644 
645 	return val;
646 }
647 
648 static int ams_get_ctrl_scale(int address)
649 {
650 	int val;
651 
652 	switch (address) {
653 	case AMS_VCC_PSPLL0:
654 	case AMS_VCC_PSPLL3:
655 	case AMS_VCCINT:
656 	case AMS_VCCBRAM:
657 	case AMS_VCCAUX:
658 	case AMS_PSDDRPLL:
659 	case AMS_PSINTFPDDR:
660 		val = AMS_SUPPLY_SCALE_3VOLT_mV;
661 		break;
662 	default:
663 		val = AMS_SUPPLY_SCALE_1VOLT_mV;
664 		break;
665 	}
666 
667 	return val;
668 }
669 
670 static int ams_read_raw(struct iio_dev *indio_dev,
671 			struct iio_chan_spec const *chan,
672 			int *val, int *val2, long mask)
673 {
674 	struct ams *ams = iio_priv(indio_dev);
675 	int ret;
676 
677 	switch (mask) {
678 	case IIO_CHAN_INFO_RAW:
679 		mutex_lock(&ams->lock);
680 		if (chan->scan_index >= AMS_CTRL_SEQ_BASE) {
681 			ret = ams_read_vcc_reg(ams, chan->address, val);
682 			if (ret)
683 				goto unlock_mutex;
684 			ams_enable_channel_sequence(indio_dev);
685 		} else if (chan->scan_index >= AMS_PS_SEQ_MAX)
686 			*val = readl(ams->pl_base + chan->address);
687 		else
688 			*val = readl(ams->ps_base + chan->address);
689 
690 		ret = IIO_VAL_INT;
691 unlock_mutex:
692 		mutex_unlock(&ams->lock);
693 		return ret;
694 	case IIO_CHAN_INFO_SCALE:
695 		switch (chan->type) {
696 		case IIO_VOLTAGE:
697 			if (chan->scan_index < AMS_PS_SEQ_MAX)
698 				*val = ams_get_ps_scale(chan->address);
699 			else if (chan->scan_index >= AMS_PS_SEQ_MAX &&
700 				 chan->scan_index < AMS_CTRL_SEQ_BASE)
701 				*val = ams_get_pl_scale(ams, chan->address);
702 			else
703 				*val = ams_get_ctrl_scale(chan->address);
704 
705 			*val2 = AMS_SUPPLY_SCALE_DIV_BIT;
706 			return IIO_VAL_FRACTIONAL_LOG2;
707 		case IIO_TEMP:
708 			*val = AMS_TEMP_SCALE;
709 			*val2 = AMS_TEMP_SCALE_DIV_BIT;
710 			return IIO_VAL_FRACTIONAL_LOG2;
711 		default:
712 			return -EINVAL;
713 		}
714 	case IIO_CHAN_INFO_OFFSET:
715 		/* Only the temperature channel has an offset */
716 		*val = AMS_TEMP_OFFSET;
717 		return IIO_VAL_INT;
718 	default:
719 		return -EINVAL;
720 	}
721 }
722 
723 static int ams_get_alarm_offset(int scan_index, enum iio_event_direction dir)
724 {
725 	int offset;
726 
727 	if (scan_index >= AMS_PS_SEQ_MAX)
728 		scan_index -= AMS_PS_SEQ_MAX;
729 
730 	if (dir == IIO_EV_DIR_FALLING) {
731 		if (scan_index < AMS_SEQ_SUPPLY7)
732 			offset = AMS_ALARM_THRESHOLD_OFF_10;
733 		else
734 			offset = AMS_ALARM_THRESHOLD_OFF_20;
735 	} else {
736 		offset = 0;
737 	}
738 
739 	switch (scan_index) {
740 	case AMS_SEQ_TEMP:
741 		return AMS_ALARM_TEMP + offset;
742 	case AMS_SEQ_SUPPLY1:
743 		return AMS_ALARM_SUPPLY1 + offset;
744 	case AMS_SEQ_SUPPLY2:
745 		return AMS_ALARM_SUPPLY2 + offset;
746 	case AMS_SEQ_SUPPLY3:
747 		return AMS_ALARM_SUPPLY3 + offset;
748 	case AMS_SEQ_SUPPLY4:
749 		return AMS_ALARM_SUPPLY4 + offset;
750 	case AMS_SEQ_SUPPLY5:
751 		return AMS_ALARM_SUPPLY5 + offset;
752 	case AMS_SEQ_SUPPLY6:
753 		return AMS_ALARM_SUPPLY6 + offset;
754 	case AMS_SEQ_SUPPLY7:
755 		return AMS_ALARM_SUPPLY7 + offset;
756 	case AMS_SEQ_SUPPLY8:
757 		return AMS_ALARM_SUPPLY8 + offset;
758 	case AMS_SEQ_SUPPLY9:
759 		return AMS_ALARM_SUPPLY9 + offset;
760 	case AMS_SEQ_SUPPLY10:
761 		return AMS_ALARM_SUPPLY10 + offset;
762 	case AMS_SEQ_VCCAMS:
763 		return AMS_ALARM_VCCAMS + offset;
764 	case AMS_SEQ_TEMP_REMOTE:
765 		return AMS_ALARM_TEMP_REMOTE + offset;
766 	default:
767 		return 0;
768 	}
769 }
770 
771 static const struct iio_chan_spec *ams_event_to_channel(struct iio_dev *dev,
772 							u32 event)
773 {
774 	int scan_index = 0, i;
775 
776 	if (event >= AMS_PL_ALARM_START) {
777 		event -= AMS_PL_ALARM_START;
778 		scan_index = AMS_PS_SEQ_MAX;
779 	}
780 
781 	switch (event) {
782 	case AMS_ALARM_BIT_TEMP:
783 		scan_index += AMS_SEQ_TEMP;
784 		break;
785 	case AMS_ALARM_BIT_SUPPLY1:
786 		scan_index += AMS_SEQ_SUPPLY1;
787 		break;
788 	case AMS_ALARM_BIT_SUPPLY2:
789 		scan_index += AMS_SEQ_SUPPLY2;
790 		break;
791 	case AMS_ALARM_BIT_SUPPLY3:
792 		scan_index += AMS_SEQ_SUPPLY3;
793 		break;
794 	case AMS_ALARM_BIT_SUPPLY4:
795 		scan_index += AMS_SEQ_SUPPLY4;
796 		break;
797 	case AMS_ALARM_BIT_SUPPLY5:
798 		scan_index += AMS_SEQ_SUPPLY5;
799 		break;
800 	case AMS_ALARM_BIT_SUPPLY6:
801 		scan_index += AMS_SEQ_SUPPLY6;
802 		break;
803 	case AMS_ALARM_BIT_SUPPLY7:
804 		scan_index += AMS_SEQ_SUPPLY7;
805 		break;
806 	case AMS_ALARM_BIT_SUPPLY8:
807 		scan_index += AMS_SEQ_SUPPLY8;
808 		break;
809 	case AMS_ALARM_BIT_SUPPLY9:
810 		scan_index += AMS_SEQ_SUPPLY9;
811 		break;
812 	case AMS_ALARM_BIT_SUPPLY10:
813 		scan_index += AMS_SEQ_SUPPLY10;
814 		break;
815 	case AMS_ALARM_BIT_VCCAMS:
816 		scan_index += AMS_SEQ_VCCAMS;
817 		break;
818 	case AMS_ALARM_BIT_TEMP_REMOTE:
819 		scan_index += AMS_SEQ_TEMP_REMOTE;
820 		break;
821 	default:
822 		break;
823 	}
824 
825 	for (i = 0; i < dev->num_channels; i++)
826 		if (dev->channels[i].scan_index == scan_index)
827 			break;
828 
829 	return &dev->channels[i];
830 }
831 
832 static int ams_get_alarm_mask(int scan_index)
833 {
834 	int bit = 0;
835 
836 	if (scan_index >= AMS_PS_SEQ_MAX) {
837 		bit = AMS_PL_ALARM_START;
838 		scan_index -= AMS_PS_SEQ_MAX;
839 	}
840 
841 	switch (scan_index) {
842 	case AMS_SEQ_TEMP:
843 		return BIT(AMS_ALARM_BIT_TEMP + bit);
844 	case AMS_SEQ_SUPPLY1:
845 		return BIT(AMS_ALARM_BIT_SUPPLY1 + bit);
846 	case AMS_SEQ_SUPPLY2:
847 		return BIT(AMS_ALARM_BIT_SUPPLY2 + bit);
848 	case AMS_SEQ_SUPPLY3:
849 		return BIT(AMS_ALARM_BIT_SUPPLY3 + bit);
850 	case AMS_SEQ_SUPPLY4:
851 		return BIT(AMS_ALARM_BIT_SUPPLY4 + bit);
852 	case AMS_SEQ_SUPPLY5:
853 		return BIT(AMS_ALARM_BIT_SUPPLY5 + bit);
854 	case AMS_SEQ_SUPPLY6:
855 		return BIT(AMS_ALARM_BIT_SUPPLY6 + bit);
856 	case AMS_SEQ_SUPPLY7:
857 		return BIT(AMS_ALARM_BIT_SUPPLY7 + bit);
858 	case AMS_SEQ_SUPPLY8:
859 		return BIT(AMS_ALARM_BIT_SUPPLY8 + bit);
860 	case AMS_SEQ_SUPPLY9:
861 		return BIT(AMS_ALARM_BIT_SUPPLY9 + bit);
862 	case AMS_SEQ_SUPPLY10:
863 		return BIT(AMS_ALARM_BIT_SUPPLY10 + bit);
864 	case AMS_SEQ_VCCAMS:
865 		return BIT(AMS_ALARM_BIT_VCCAMS + bit);
866 	case AMS_SEQ_TEMP_REMOTE:
867 		return BIT(AMS_ALARM_BIT_TEMP_REMOTE + bit);
868 	default:
869 		return 0;
870 	}
871 }
872 
873 static int ams_read_event_config(struct iio_dev *indio_dev,
874 				 const struct iio_chan_spec *chan,
875 				 enum iio_event_type type,
876 				 enum iio_event_direction dir)
877 {
878 	struct ams *ams = iio_priv(indio_dev);
879 
880 	return !!(ams->alarm_mask & ams_get_alarm_mask(chan->scan_index));
881 }
882 
883 static int ams_write_event_config(struct iio_dev *indio_dev,
884 				  const struct iio_chan_spec *chan,
885 				  enum iio_event_type type,
886 				  enum iio_event_direction dir,
887 				  int state)
888 {
889 	struct ams *ams = iio_priv(indio_dev);
890 	unsigned int alarm;
891 
892 	alarm = ams_get_alarm_mask(chan->scan_index);
893 
894 	mutex_lock(&ams->lock);
895 
896 	if (state)
897 		ams->alarm_mask |= alarm;
898 	else
899 		ams->alarm_mask &= ~alarm;
900 
901 	ams_update_alarm(ams, ams->alarm_mask);
902 
903 	mutex_unlock(&ams->lock);
904 
905 	return 0;
906 }
907 
908 static int ams_read_event_value(struct iio_dev *indio_dev,
909 				const struct iio_chan_spec *chan,
910 				enum iio_event_type type,
911 				enum iio_event_direction dir,
912 				enum iio_event_info info, int *val, int *val2)
913 {
914 	struct ams *ams = iio_priv(indio_dev);
915 	unsigned int offset = ams_get_alarm_offset(chan->scan_index, dir);
916 
917 	mutex_lock(&ams->lock);
918 
919 	if (chan->scan_index >= AMS_PS_SEQ_MAX)
920 		*val = readl(ams->pl_base + offset);
921 	else
922 		*val = readl(ams->ps_base + offset);
923 
924 	mutex_unlock(&ams->lock);
925 
926 	return IIO_VAL_INT;
927 }
928 
929 static int ams_write_event_value(struct iio_dev *indio_dev,
930 				 const struct iio_chan_spec *chan,
931 				 enum iio_event_type type,
932 				 enum iio_event_direction dir,
933 				 enum iio_event_info info, int val, int val2)
934 {
935 	struct ams *ams = iio_priv(indio_dev);
936 	unsigned int offset;
937 
938 	mutex_lock(&ams->lock);
939 
940 	/* Set temperature channel threshold to direct threshold */
941 	if (chan->type == IIO_TEMP) {
942 		offset = ams_get_alarm_offset(chan->scan_index, IIO_EV_DIR_FALLING);
943 
944 		if (chan->scan_index >= AMS_PS_SEQ_MAX)
945 			ams_pl_update_reg(ams, offset,
946 					  AMS_ALARM_THR_DIRECT_MASK,
947 					  AMS_ALARM_THR_DIRECT_MASK);
948 		else
949 			ams_ps_update_reg(ams, offset,
950 					  AMS_ALARM_THR_DIRECT_MASK,
951 					  AMS_ALARM_THR_DIRECT_MASK);
952 	}
953 
954 	offset = ams_get_alarm_offset(chan->scan_index, dir);
955 	if (chan->scan_index >= AMS_PS_SEQ_MAX)
956 		writel(val, ams->pl_base + offset);
957 	else
958 		writel(val, ams->ps_base + offset);
959 
960 	mutex_unlock(&ams->lock);
961 
962 	return 0;
963 }
964 
965 static void ams_handle_event(struct iio_dev *indio_dev, u32 event)
966 {
967 	const struct iio_chan_spec *chan;
968 
969 	chan = ams_event_to_channel(indio_dev, event);
970 
971 	if (chan->type == IIO_TEMP) {
972 		/*
973 		 * The temperature channel only supports over-temperature
974 		 * events.
975 		 */
976 		iio_push_event(indio_dev,
977 			       IIO_UNMOD_EVENT_CODE(chan->type, chan->channel,
978 						    IIO_EV_TYPE_THRESH,
979 						    IIO_EV_DIR_RISING),
980 			       iio_get_time_ns(indio_dev));
981 	} else {
982 		/*
983 		 * For other channels we don't know whether it is a upper or
984 		 * lower threshold event. Userspace will have to check the
985 		 * channel value if it wants to know.
986 		 */
987 		iio_push_event(indio_dev,
988 			       IIO_UNMOD_EVENT_CODE(chan->type, chan->channel,
989 						    IIO_EV_TYPE_THRESH,
990 						    IIO_EV_DIR_EITHER),
991 			       iio_get_time_ns(indio_dev));
992 	}
993 }
994 
995 static void ams_handle_events(struct iio_dev *indio_dev, unsigned long events)
996 {
997 	unsigned int bit;
998 
999 	for_each_set_bit(bit, &events, AMS_NO_OF_ALARMS)
1000 		ams_handle_event(indio_dev, bit);
1001 }
1002 
1003 /**
1004  * ams_unmask_worker - ams alarm interrupt unmask worker
1005  * @work: work to be done
1006  *
1007  * The ZynqMP threshold interrupts are level sensitive. Since we can't make the
1008  * threshold condition go way from within the interrupt handler, this means as
1009  * soon as a threshold condition is present we would enter the interrupt handler
1010  * again and again. To work around this we mask all active threshold interrupts
1011  * in the interrupt handler and start a timer. In this timer we poll the
1012  * interrupt status and only if the interrupt is inactive we unmask it again.
1013  */
1014 static void ams_unmask_worker(struct work_struct *work)
1015 {
1016 	struct ams *ams = container_of(work, struct ams, ams_unmask_work.work);
1017 	unsigned int status, unmask;
1018 
1019 	spin_lock_irq(&ams->intr_lock);
1020 
1021 	status = readl(ams->base + AMS_ISR_0);
1022 
1023 	/* Clear those bits which are not active anymore */
1024 	unmask = (ams->current_masked_alarm ^ status) & ams->current_masked_alarm;
1025 
1026 	/* Clear status of disabled alarm */
1027 	unmask |= ams->intr_mask;
1028 
1029 	ams->current_masked_alarm &= status;
1030 
1031 	/* Also clear those which are masked out anyway */
1032 	ams->current_masked_alarm &= ~ams->intr_mask;
1033 
1034 	/* Clear the interrupts before we unmask them */
1035 	writel(unmask, ams->base + AMS_ISR_0);
1036 
1037 	ams_update_intrmask(ams, ~AMS_ALARM_MASK, ~AMS_ALARM_MASK);
1038 
1039 	spin_unlock_irq(&ams->intr_lock);
1040 
1041 	/* If still pending some alarm re-trigger the timer */
1042 	if (ams->current_masked_alarm)
1043 		schedule_delayed_work(&ams->ams_unmask_work,
1044 				      msecs_to_jiffies(AMS_UNMASK_TIMEOUT_MS));
1045 }
1046 
1047 static irqreturn_t ams_irq(int irq, void *data)
1048 {
1049 	struct iio_dev *indio_dev = data;
1050 	struct ams *ams = iio_priv(indio_dev);
1051 	u32 isr0;
1052 
1053 	spin_lock(&ams->intr_lock);
1054 
1055 	isr0 = readl(ams->base + AMS_ISR_0);
1056 
1057 	/* Only process alarms that are not masked */
1058 	isr0 &= ~((ams->intr_mask & AMS_ISR0_ALARM_MASK) | ams->current_masked_alarm);
1059 	if (!isr0) {
1060 		spin_unlock(&ams->intr_lock);
1061 		return IRQ_NONE;
1062 	}
1063 
1064 	/* Clear interrupt */
1065 	writel(isr0, ams->base + AMS_ISR_0);
1066 
1067 	/* Mask the alarm interrupts until cleared */
1068 	ams->current_masked_alarm |= isr0;
1069 	ams_update_intrmask(ams, ~AMS_ALARM_MASK, ~AMS_ALARM_MASK);
1070 
1071 	ams_handle_events(indio_dev, isr0);
1072 
1073 	schedule_delayed_work(&ams->ams_unmask_work,
1074 			      msecs_to_jiffies(AMS_UNMASK_TIMEOUT_MS));
1075 
1076 	spin_unlock(&ams->intr_lock);
1077 
1078 	return IRQ_HANDLED;
1079 }
1080 
1081 static const struct iio_event_spec ams_temp_events[] = {
1082 	{
1083 		.type = IIO_EV_TYPE_THRESH,
1084 		.dir = IIO_EV_DIR_RISING,
1085 		.mask_separate = BIT(IIO_EV_INFO_ENABLE) | BIT(IIO_EV_INFO_VALUE),
1086 	},
1087 };
1088 
1089 static const struct iio_event_spec ams_voltage_events[] = {
1090 	{
1091 		.type = IIO_EV_TYPE_THRESH,
1092 		.dir = IIO_EV_DIR_RISING,
1093 		.mask_separate = BIT(IIO_EV_INFO_VALUE),
1094 	},
1095 	{
1096 		.type = IIO_EV_TYPE_THRESH,
1097 		.dir = IIO_EV_DIR_FALLING,
1098 		.mask_separate = BIT(IIO_EV_INFO_VALUE),
1099 	},
1100 	{
1101 		.type = IIO_EV_TYPE_THRESH,
1102 		.dir = IIO_EV_DIR_EITHER,
1103 		.mask_separate = BIT(IIO_EV_INFO_ENABLE),
1104 	},
1105 };
1106 
1107 static const struct iio_chan_spec ams_ps_channels[] = {
1108 	AMS_PS_CHAN_TEMP(AMS_SEQ_TEMP, AMS_TEMP),
1109 	AMS_PS_CHAN_TEMP(AMS_SEQ_TEMP_REMOTE, AMS_TEMP_REMOTE),
1110 	AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY1, AMS_SUPPLY1),
1111 	AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY2, AMS_SUPPLY2),
1112 	AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY3, AMS_SUPPLY3),
1113 	AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY4, AMS_SUPPLY4),
1114 	AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY5, AMS_SUPPLY5),
1115 	AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY6, AMS_SUPPLY6),
1116 	AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY7, AMS_SUPPLY7),
1117 	AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY8, AMS_SUPPLY8),
1118 	AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY9, AMS_SUPPLY9),
1119 	AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY10, AMS_SUPPLY10),
1120 	AMS_PS_CHAN_VOLTAGE(AMS_SEQ_VCCAMS, AMS_VCCAMS),
1121 };
1122 
1123 static const struct iio_chan_spec ams_pl_channels[] = {
1124 	AMS_PL_CHAN_TEMP(AMS_SEQ_TEMP, AMS_TEMP),
1125 	AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY1, AMS_SUPPLY1, true),
1126 	AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY2, AMS_SUPPLY2, true),
1127 	AMS_PL_CHAN_VOLTAGE(AMS_SEQ_VREFP, AMS_VREFP, false),
1128 	AMS_PL_CHAN_VOLTAGE(AMS_SEQ_VREFN, AMS_VREFN, false),
1129 	AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY3, AMS_SUPPLY3, true),
1130 	AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY4, AMS_SUPPLY4, true),
1131 	AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY5, AMS_SUPPLY5, true),
1132 	AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY6, AMS_SUPPLY6, true),
1133 	AMS_PL_CHAN_VOLTAGE(AMS_SEQ_VCCAMS, AMS_VCCAMS, true),
1134 	AMS_PL_CHAN_VOLTAGE(AMS_SEQ_VP_VN, AMS_VP_VN, false),
1135 	AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY7, AMS_SUPPLY7, true),
1136 	AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY8, AMS_SUPPLY8, true),
1137 	AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY9, AMS_SUPPLY9, true),
1138 	AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY10, AMS_SUPPLY10, true),
1139 	AMS_PL_AUX_CHAN_VOLTAGE(0),
1140 	AMS_PL_AUX_CHAN_VOLTAGE(1),
1141 	AMS_PL_AUX_CHAN_VOLTAGE(2),
1142 	AMS_PL_AUX_CHAN_VOLTAGE(3),
1143 	AMS_PL_AUX_CHAN_VOLTAGE(4),
1144 	AMS_PL_AUX_CHAN_VOLTAGE(5),
1145 	AMS_PL_AUX_CHAN_VOLTAGE(6),
1146 	AMS_PL_AUX_CHAN_VOLTAGE(7),
1147 	AMS_PL_AUX_CHAN_VOLTAGE(8),
1148 	AMS_PL_AUX_CHAN_VOLTAGE(9),
1149 	AMS_PL_AUX_CHAN_VOLTAGE(10),
1150 	AMS_PL_AUX_CHAN_VOLTAGE(11),
1151 	AMS_PL_AUX_CHAN_VOLTAGE(12),
1152 	AMS_PL_AUX_CHAN_VOLTAGE(13),
1153 	AMS_PL_AUX_CHAN_VOLTAGE(14),
1154 	AMS_PL_AUX_CHAN_VOLTAGE(15),
1155 };
1156 
1157 static const struct iio_chan_spec ams_ctrl_channels[] = {
1158 	AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_VCC_PSPLL, AMS_VCC_PSPLL0),
1159 	AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_VCC_PSBATT, AMS_VCC_PSPLL3),
1160 	AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_VCCINT, AMS_VCCINT),
1161 	AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_VCCBRAM, AMS_VCCBRAM),
1162 	AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_VCCAUX, AMS_VCCAUX),
1163 	AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_PSDDRPLL, AMS_PSDDRPLL),
1164 	AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_INTDDR, AMS_PSINTFPDDR),
1165 };
1166 
1167 static int ams_get_ext_chan(struct fwnode_handle *chan_node,
1168 			    struct iio_chan_spec *channels, int num_channels)
1169 {
1170 	struct iio_chan_spec *chan;
1171 	struct fwnode_handle *child;
1172 	unsigned int reg, ext_chan;
1173 	int ret;
1174 
1175 	fwnode_for_each_child_node(chan_node, child) {
1176 		ret = fwnode_property_read_u32(child, "reg", &reg);
1177 		if (ret || reg > AMS_PL_MAX_EXT_CHANNEL + 30)
1178 			continue;
1179 
1180 		chan = &channels[num_channels];
1181 		ext_chan = reg + AMS_PL_MAX_FIXED_CHANNEL - 30;
1182 		memcpy(chan, &ams_pl_channels[ext_chan], sizeof(*channels));
1183 
1184 		if (fwnode_property_read_bool(child, "xlnx,bipolar"))
1185 			chan->scan_type.sign = 's';
1186 
1187 		num_channels++;
1188 	}
1189 
1190 	return num_channels;
1191 }
1192 
1193 static void ams_iounmap_ps(void *data)
1194 {
1195 	struct ams *ams = data;
1196 
1197 	iounmap(ams->ps_base);
1198 }
1199 
1200 static void ams_iounmap_pl(void *data)
1201 {
1202 	struct ams *ams = data;
1203 
1204 	iounmap(ams->pl_base);
1205 }
1206 
1207 static int ams_init_module(struct iio_dev *indio_dev,
1208 			   struct fwnode_handle *fwnode,
1209 			   struct iio_chan_spec *channels)
1210 {
1211 	struct device *dev = indio_dev->dev.parent;
1212 	struct ams *ams = iio_priv(indio_dev);
1213 	int num_channels = 0;
1214 	int ret;
1215 
1216 	if (fwnode_property_match_string(fwnode, "compatible",
1217 					 "xlnx,zynqmp-ams-ps") == 0) {
1218 		ams->ps_base = fwnode_iomap(fwnode, 0);
1219 		if (!ams->ps_base)
1220 			return -ENXIO;
1221 		ret = devm_add_action_or_reset(dev, ams_iounmap_ps, ams);
1222 		if (ret < 0)
1223 			return ret;
1224 
1225 		/* add PS channels to iio device channels */
1226 		memcpy(channels, ams_ps_channels, sizeof(ams_ps_channels));
1227 	} else if (fwnode_property_match_string(fwnode, "compatible",
1228 						"xlnx,zynqmp-ams-pl") == 0) {
1229 		ams->pl_base = fwnode_iomap(fwnode, 0);
1230 		if (!ams->pl_base)
1231 			return -ENXIO;
1232 
1233 		ret = devm_add_action_or_reset(dev, ams_iounmap_pl, ams);
1234 		if (ret < 0)
1235 			return ret;
1236 
1237 		/* Copy only first 10 fix channels */
1238 		memcpy(channels, ams_pl_channels, AMS_PL_MAX_FIXED_CHANNEL * sizeof(*channels));
1239 		num_channels += AMS_PL_MAX_FIXED_CHANNEL;
1240 		num_channels = ams_get_ext_chan(fwnode, channels,
1241 						num_channels);
1242 	} else if (fwnode_property_match_string(fwnode, "compatible",
1243 						"xlnx,zynqmp-ams") == 0) {
1244 		/* add AMS channels to iio device channels */
1245 		memcpy(channels, ams_ctrl_channels, sizeof(ams_ctrl_channels));
1246 		num_channels += ARRAY_SIZE(ams_ctrl_channels);
1247 	} else {
1248 		return -EINVAL;
1249 	}
1250 
1251 	return num_channels;
1252 }
1253 
1254 static int ams_parse_firmware(struct iio_dev *indio_dev)
1255 {
1256 	struct ams *ams = iio_priv(indio_dev);
1257 	struct iio_chan_spec *ams_channels, *dev_channels;
1258 	struct device *dev = indio_dev->dev.parent;
1259 	struct fwnode_handle *child = NULL;
1260 	struct fwnode_handle *fwnode = dev_fwnode(dev);
1261 	size_t ams_size, dev_size;
1262 	int ret, ch_cnt = 0, i, rising_off, falling_off;
1263 	unsigned int num_channels = 0;
1264 
1265 	ams_size = ARRAY_SIZE(ams_ps_channels) + ARRAY_SIZE(ams_pl_channels) +
1266 		ARRAY_SIZE(ams_ctrl_channels);
1267 
1268 	/* Initialize buffer for channel specification */
1269 	ams_channels = devm_kcalloc(dev, ams_size, sizeof(*ams_channels), GFP_KERNEL);
1270 	if (!ams_channels)
1271 		return -ENOMEM;
1272 
1273 	if (fwnode_device_is_available(fwnode)) {
1274 		ret = ams_init_module(indio_dev, fwnode, ams_channels);
1275 		if (ret < 0)
1276 			return ret;
1277 
1278 		num_channels += ret;
1279 	}
1280 
1281 	fwnode_for_each_child_node(fwnode, child) {
1282 		if (fwnode_device_is_available(child)) {
1283 			ret = ams_init_module(indio_dev, child, ams_channels + num_channels);
1284 			if (ret < 0) {
1285 				fwnode_handle_put(child);
1286 				return ret;
1287 			}
1288 
1289 			num_channels += ret;
1290 		}
1291 	}
1292 
1293 	for (i = 0; i < num_channels; i++) {
1294 		ams_channels[i].channel = ch_cnt++;
1295 
1296 		if (ams_channels[i].scan_index < AMS_CTRL_SEQ_BASE) {
1297 			/* set threshold to max and min for each channel */
1298 			falling_off =
1299 				ams_get_alarm_offset(ams_channels[i].scan_index,
1300 						     IIO_EV_DIR_FALLING);
1301 			rising_off =
1302 				ams_get_alarm_offset(ams_channels[i].scan_index,
1303 						     IIO_EV_DIR_RISING);
1304 			if (ams_channels[i].scan_index >= AMS_PS_SEQ_MAX) {
1305 				writel(AMS_ALARM_THR_MIN,
1306 				       ams->pl_base + falling_off);
1307 				writel(AMS_ALARM_THR_MAX,
1308 				       ams->pl_base + rising_off);
1309 			} else {
1310 				writel(AMS_ALARM_THR_MIN,
1311 				       ams->ps_base + falling_off);
1312 				writel(AMS_ALARM_THR_MAX,
1313 				       ams->ps_base + rising_off);
1314 			}
1315 		}
1316 	}
1317 
1318 	dev_size = array_size(sizeof(*dev_channels), num_channels);
1319 	if (dev_size == SIZE_MAX)
1320 		return -ENOMEM;
1321 
1322 	dev_channels = devm_krealloc(dev, ams_channels, dev_size, GFP_KERNEL);
1323 	if (!dev_channels)
1324 		ret = -ENOMEM;
1325 
1326 	indio_dev->channels = dev_channels;
1327 	indio_dev->num_channels = num_channels;
1328 
1329 	return 0;
1330 }
1331 
1332 static const struct iio_info iio_ams_info = {
1333 	.read_raw = &ams_read_raw,
1334 	.read_event_config = &ams_read_event_config,
1335 	.write_event_config = &ams_write_event_config,
1336 	.read_event_value = &ams_read_event_value,
1337 	.write_event_value = &ams_write_event_value,
1338 };
1339 
1340 static const struct of_device_id ams_of_match_table[] = {
1341 	{ .compatible = "xlnx,zynqmp-ams" },
1342 	{ }
1343 };
1344 MODULE_DEVICE_TABLE(of, ams_of_match_table);
1345 
1346 static void ams_clk_disable_unprepare(void *data)
1347 {
1348 	clk_disable_unprepare(data);
1349 }
1350 
1351 static void ams_cancel_delayed_work(void *data)
1352 {
1353 	cancel_delayed_work(data);
1354 }
1355 
1356 static int ams_probe(struct platform_device *pdev)
1357 {
1358 	struct iio_dev *indio_dev;
1359 	struct ams *ams;
1360 	int ret;
1361 	int irq;
1362 
1363 	indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*ams));
1364 	if (!indio_dev)
1365 		return -ENOMEM;
1366 
1367 	ams = iio_priv(indio_dev);
1368 	mutex_init(&ams->lock);
1369 	spin_lock_init(&ams->intr_lock);
1370 
1371 	indio_dev->name = "xilinx-ams";
1372 
1373 	indio_dev->info = &iio_ams_info;
1374 	indio_dev->modes = INDIO_DIRECT_MODE;
1375 
1376 	ams->base = devm_platform_ioremap_resource(pdev, 0);
1377 	if (IS_ERR(ams->base))
1378 		return PTR_ERR(ams->base);
1379 
1380 	ams->clk = devm_clk_get(&pdev->dev, NULL);
1381 	if (IS_ERR(ams->clk))
1382 		return PTR_ERR(ams->clk);
1383 
1384 	ret = clk_prepare_enable(ams->clk);
1385 	if (ret < 0)
1386 		return ret;
1387 
1388 	ret = devm_add_action_or_reset(&pdev->dev, ams_clk_disable_unprepare, ams->clk);
1389 	if (ret < 0)
1390 		return ret;
1391 
1392 	INIT_DELAYED_WORK(&ams->ams_unmask_work, ams_unmask_worker);
1393 	ret = devm_add_action_or_reset(&pdev->dev, ams_cancel_delayed_work,
1394 				       &ams->ams_unmask_work);
1395 	if (ret < 0)
1396 		return ret;
1397 
1398 	ret = ams_parse_firmware(indio_dev);
1399 	if (ret)
1400 		return dev_err_probe(&pdev->dev, ret, "failure in parsing DT\n");
1401 
1402 	ret = ams_init_device(ams);
1403 	if (ret)
1404 		return dev_err_probe(&pdev->dev, ret, "failed to initialize AMS\n");
1405 
1406 	ams_enable_channel_sequence(indio_dev);
1407 
1408 	irq = platform_get_irq(pdev, 0);
1409 	if (irq < 0)
1410 		return ret;
1411 
1412 	ret = devm_request_irq(&pdev->dev, irq, &ams_irq, 0, "ams-irq",
1413 			       indio_dev);
1414 	if (ret < 0)
1415 		return dev_err_probe(&pdev->dev, ret, "failed to register interrupt\n");
1416 
1417 	platform_set_drvdata(pdev, indio_dev);
1418 
1419 	return devm_iio_device_register(&pdev->dev, indio_dev);
1420 }
1421 
1422 static int __maybe_unused ams_suspend(struct device *dev)
1423 {
1424 	struct ams *ams = iio_priv(dev_get_drvdata(dev));
1425 
1426 	clk_disable_unprepare(ams->clk);
1427 
1428 	return 0;
1429 }
1430 
1431 static int __maybe_unused ams_resume(struct device *dev)
1432 {
1433 	struct ams *ams = iio_priv(dev_get_drvdata(dev));
1434 
1435 	return clk_prepare_enable(ams->clk);
1436 }
1437 
1438 static SIMPLE_DEV_PM_OPS(ams_pm_ops, ams_suspend, ams_resume);
1439 
1440 static struct platform_driver ams_driver = {
1441 	.probe = ams_probe,
1442 	.driver = {
1443 		.name = "xilinx-ams",
1444 		.pm = &ams_pm_ops,
1445 		.of_match_table = ams_of_match_table,
1446 	},
1447 };
1448 module_platform_driver(ams_driver);
1449 
1450 MODULE_LICENSE("GPL v2");
1451 MODULE_AUTHOR("Xilinx, Inc.");
1452