xref: /openbmc/linux/drivers/iio/adc/ti-ads131e08.c (revision 0bf49ffb)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Texas Instruments ADS131E0x 4-, 6- and 8-Channel ADCs
4  *
5  * Copyright (c) 2020 AVL DiTEST GmbH
6  *   Tomislav Denis <tomislav.denis@avl.com>
7  *
8  * Datasheet: https://www.ti.com/lit/ds/symlink/ads131e08.pdf
9  */
10 
11 #include <linux/bitfield.h>
12 #include <linux/clk.h>
13 #include <linux/delay.h>
14 #include <linux/module.h>
15 
16 #include <linux/iio/buffer.h>
17 #include <linux/iio/iio.h>
18 #include <linux/iio/sysfs.h>
19 #include <linux/iio/trigger.h>
20 #include <linux/iio/trigger_consumer.h>
21 #include <linux/iio/triggered_buffer.h>
22 
23 #include <linux/regulator/consumer.h>
24 #include <linux/spi/spi.h>
25 
26 #include <asm/unaligned.h>
27 
28 /* Commands */
29 #define ADS131E08_CMD_RESET		0x06
30 #define ADS131E08_CMD_START		0x08
31 #define ADS131E08_CMD_STOP		0x0A
32 #define ADS131E08_CMD_OFFSETCAL		0x1A
33 #define ADS131E08_CMD_SDATAC		0x11
34 #define ADS131E08_CMD_RDATA		0x12
35 #define ADS131E08_CMD_RREG(r)		(BIT(5) | (r & GENMASK(4, 0)))
36 #define ADS131E08_CMD_WREG(r)		(BIT(6) | (r & GENMASK(4, 0)))
37 
38 /* Registers */
39 #define ADS131E08_ADR_CFG1R		0x01
40 #define ADS131E08_ADR_CFG3R		0x03
41 #define ADS131E08_ADR_CH0R		0x05
42 
43 /* Configuration register 1 */
44 #define ADS131E08_CFG1R_DR_MASK		GENMASK(2, 0)
45 
46 /* Configuration register 3 */
47 #define ADS131E08_CFG3R_PDB_REFBUF_MASK	BIT(7)
48 #define ADS131E08_CFG3R_VREF_4V_MASK	BIT(5)
49 
50 /* Channel settings register */
51 #define ADS131E08_CHR_GAIN_MASK		GENMASK(6, 4)
52 #define ADS131E08_CHR_MUX_MASK		GENMASK(2, 0)
53 #define ADS131E08_CHR_PWD_MASK		BIT(7)
54 
55 /* ADC  misc */
56 #define ADS131E08_DEFAULT_DATA_RATE	1
57 #define ADS131E08_DEFAULT_PGA_GAIN	1
58 #define ADS131E08_DEFAULT_MUX		0
59 
60 #define ADS131E08_VREF_2V4_mV		2400
61 #define ADS131E08_VREF_4V_mV		4000
62 
63 #define ADS131E08_WAIT_RESET_CYCLES	18
64 #define ADS131E08_WAIT_SDECODE_CYCLES	4
65 #define ADS131E08_WAIT_OFFSETCAL_MS	153
66 #define ADS131E08_MAX_SETTLING_TIME_MS	6
67 
68 #define ADS131E08_NUM_STATUS_BYTES	3
69 #define ADS131E08_NUM_DATA_BYTES_MAX	24
70 #define ADS131E08_NUM_DATA_BYTES(dr)	(((dr) >= 32) ? 2 : 3)
71 #define ADS131E08_NUM_DATA_BITS(dr)	(ADS131E08_NUM_DATA_BYTES(dr) * 8)
72 #define ADS131E08_NUM_STORAGE_BYTES	4
73 
74 enum ads131e08_ids {
75 	ads131e04,
76 	ads131e06,
77 	ads131e08,
78 };
79 
80 struct ads131e08_info {
81 	unsigned int max_channels;
82 	const char *name;
83 };
84 
85 struct ads131e08_channel_config {
86 	unsigned int pga_gain;
87 	unsigned int mux;
88 };
89 
90 struct ads131e08_state {
91 	const struct ads131e08_info *info;
92 	struct spi_device *spi;
93 	struct iio_trigger *trig;
94 	struct clk *adc_clk;
95 	struct regulator *vref_reg;
96 	struct ads131e08_channel_config *channel_config;
97 	unsigned int data_rate;
98 	unsigned int vref_mv;
99 	unsigned int sdecode_delay_us;
100 	unsigned int reset_delay_us;
101 	unsigned int readback_len;
102 	struct completion completion;
103 	struct {
104 		u8 data[ADS131E08_NUM_DATA_BYTES_MAX];
105 		s64 ts __aligned(8);
106 	} tmp_buf;
107 
108 	u8 tx_buf[3] ____cacheline_aligned;
109 	/*
110 	 * Add extra one padding byte to be able to access the last channel
111 	 * value using u32 pointer
112 	 */
113 	u8 rx_buf[ADS131E08_NUM_STATUS_BYTES +
114 		ADS131E08_NUM_DATA_BYTES_MAX + 1];
115 };
116 
117 static const struct ads131e08_info ads131e08_info_tbl[] = {
118 	[ads131e04] = {
119 		.max_channels = 4,
120 		.name = "ads131e04",
121 	},
122 	[ads131e06] = {
123 		.max_channels = 6,
124 		.name = "ads131e06",
125 	},
126 	[ads131e08] = {
127 		.max_channels = 8,
128 		.name = "ads131e08",
129 	},
130 };
131 
132 struct ads131e08_data_rate_desc {
133 	unsigned int rate;  /* data rate in kSPS */
134 	u8 reg;             /* reg value */
135 };
136 
137 static const struct ads131e08_data_rate_desc ads131e08_data_rate_tbl[] = {
138 	{ .rate = 64,   .reg = 0x00 },
139 	{ .rate = 32,   .reg = 0x01 },
140 	{ .rate = 16,   .reg = 0x02 },
141 	{ .rate = 8,    .reg = 0x03 },
142 	{ .rate = 4,    .reg = 0x04 },
143 	{ .rate = 2,    .reg = 0x05 },
144 	{ .rate = 1,    .reg = 0x06 },
145 };
146 
147 struct ads131e08_pga_gain_desc {
148 	unsigned int gain;  /* PGA gain value */
149 	u8 reg;             /* field value */
150 };
151 
152 static const struct ads131e08_pga_gain_desc ads131e08_pga_gain_tbl[] = {
153 	{ .gain = 1,   .reg = 0x01 },
154 	{ .gain = 2,   .reg = 0x02 },
155 	{ .gain = 4,   .reg = 0x04 },
156 	{ .gain = 8,   .reg = 0x05 },
157 	{ .gain = 12,  .reg = 0x06 },
158 };
159 
160 static const u8 ads131e08_valid_channel_mux_values[] = { 0, 1, 3, 4 };
161 
162 static int ads131e08_exec_cmd(struct ads131e08_state *st, u8 cmd)
163 {
164 	int ret;
165 
166 	ret = spi_write_then_read(st->spi, &cmd, 1, NULL, 0);
167 	if (ret)
168 		dev_err(&st->spi->dev, "Exec cmd(%02x) failed\n", cmd);
169 
170 	return ret;
171 }
172 
173 static int ads131e08_read_reg(struct ads131e08_state *st, u8 reg)
174 {
175 	int ret;
176 	struct spi_transfer transfer[] = {
177 		{
178 			.tx_buf = &st->tx_buf,
179 			.len = 2,
180 			.delay_usecs = st->sdecode_delay_us,
181 		}, {
182 			.rx_buf = &st->rx_buf,
183 			.len = 1,
184 		},
185 	};
186 
187 	st->tx_buf[0] = ADS131E08_CMD_RREG(reg);
188 	st->tx_buf[1] = 0;
189 
190 	ret = spi_sync_transfer(st->spi, transfer, ARRAY_SIZE(transfer));
191 	if (ret) {
192 		dev_err(&st->spi->dev, "Read register failed\n");
193 		return ret;
194 	}
195 
196 	return st->rx_buf[0];
197 }
198 
199 static int ads131e08_write_reg(struct ads131e08_state *st, u8 reg, u8 value)
200 {
201 	int ret;
202 	struct spi_transfer transfer[] = {
203 		{
204 			.tx_buf = &st->tx_buf,
205 			.len = 3,
206 			.delay_usecs = st->sdecode_delay_us,
207 		}
208 	};
209 
210 	st->tx_buf[0] = ADS131E08_CMD_WREG(reg);
211 	st->tx_buf[1] = 0;
212 	st->tx_buf[2] = value;
213 
214 	ret = spi_sync_transfer(st->spi, transfer, ARRAY_SIZE(transfer));
215 	if (ret)
216 		dev_err(&st->spi->dev, "Write register failed\n");
217 
218 	return ret;
219 }
220 
221 static int ads131e08_read_data(struct ads131e08_state *st, int rx_len)
222 {
223 	int ret;
224 	struct spi_transfer transfer[] = {
225 		{
226 			.tx_buf = &st->tx_buf,
227 			.len = 1,
228 		}, {
229 			.rx_buf = &st->rx_buf,
230 			.len = rx_len,
231 		},
232 	};
233 
234 	st->tx_buf[0] = ADS131E08_CMD_RDATA;
235 
236 	ret = spi_sync_transfer(st->spi, transfer, ARRAY_SIZE(transfer));
237 	if (ret)
238 		dev_err(&st->spi->dev, "Read data failed\n");
239 
240 	return ret;
241 }
242 
243 static int ads131e08_set_data_rate(struct ads131e08_state *st, int data_rate)
244 {
245 	int i, reg, ret;
246 
247 	for (i = 0; i < ARRAY_SIZE(ads131e08_data_rate_tbl); i++) {
248 		if (ads131e08_data_rate_tbl[i].rate == data_rate)
249 			break;
250 	}
251 
252 	if (i == ARRAY_SIZE(ads131e08_data_rate_tbl)) {
253 		dev_err(&st->spi->dev, "invalid data rate value\n");
254 		return -EINVAL;
255 	}
256 
257 	reg = ads131e08_read_reg(st, ADS131E08_ADR_CFG1R);
258 	if (reg < 0)
259 		return reg;
260 
261 	reg &= ~ADS131E08_CFG1R_DR_MASK;
262 	reg |= FIELD_PREP(ADS131E08_CFG1R_DR_MASK,
263 		ads131e08_data_rate_tbl[i].reg);
264 
265 	ret = ads131e08_write_reg(st, ADS131E08_ADR_CFG1R, reg);
266 	if (ret)
267 		return ret;
268 
269 	st->data_rate = data_rate;
270 	st->readback_len = ADS131E08_NUM_STATUS_BYTES +
271 		ADS131E08_NUM_DATA_BYTES(st->data_rate) *
272 		st->info->max_channels;
273 
274 	return 0;
275 }
276 
277 static int ads131e08_pga_gain_to_field_value(struct ads131e08_state *st,
278 	unsigned int pga_gain)
279 {
280 	int i;
281 
282 	for (i = 0; i < ARRAY_SIZE(ads131e08_pga_gain_tbl); i++) {
283 		if (ads131e08_pga_gain_tbl[i].gain == pga_gain)
284 			break;
285 	}
286 
287 	if (i == ARRAY_SIZE(ads131e08_pga_gain_tbl)) {
288 		dev_err(&st->spi->dev, "invalid PGA gain value\n");
289 		return -EINVAL;
290 	}
291 
292 	return ads131e08_pga_gain_tbl[i].reg;
293 }
294 
295 static int ads131e08_set_pga_gain(struct ads131e08_state *st,
296 	unsigned int channel, unsigned int pga_gain)
297 {
298 	int field_value, reg;
299 
300 	field_value = ads131e08_pga_gain_to_field_value(st, pga_gain);
301 	if (field_value < 0)
302 		return field_value;
303 
304 	reg = ads131e08_read_reg(st, ADS131E08_ADR_CH0R + channel);
305 	if (reg < 0)
306 		return reg;
307 
308 	reg &= ~ADS131E08_CHR_GAIN_MASK;
309 	reg |= FIELD_PREP(ADS131E08_CHR_GAIN_MASK, field_value);
310 
311 	return ads131e08_write_reg(st, ADS131E08_ADR_CH0R + channel, reg);
312 }
313 
314 static int ads131e08_validate_channel_mux(struct ads131e08_state *st,
315 	unsigned int mux)
316 {
317 	int i;
318 
319 	for (i = 0; i < ARRAY_SIZE(ads131e08_valid_channel_mux_values); i++) {
320 		if (ads131e08_valid_channel_mux_values[i] == mux)
321 			break;
322 	}
323 
324 	if (i == ARRAY_SIZE(ads131e08_valid_channel_mux_values)) {
325 		dev_err(&st->spi->dev, "invalid channel mux value\n");
326 		return -EINVAL;
327 	}
328 
329 	return 0;
330 }
331 
332 static int ads131e08_set_channel_mux(struct ads131e08_state *st,
333 	unsigned int channel, unsigned int mux)
334 {
335 	int reg;
336 
337 	reg = ads131e08_read_reg(st, ADS131E08_ADR_CH0R + channel);
338 	if (reg < 0)
339 		return reg;
340 
341 	reg &= ~ADS131E08_CHR_MUX_MASK;
342 	reg |= FIELD_PREP(ADS131E08_CHR_MUX_MASK, mux);
343 
344 	return ads131e08_write_reg(st, ADS131E08_ADR_CH0R + channel, reg);
345 }
346 
347 static int ads131e08_power_down_channel(struct ads131e08_state *st,
348 	unsigned int channel, bool value)
349 {
350 	int reg;
351 
352 	reg = ads131e08_read_reg(st, ADS131E08_ADR_CH0R + channel);
353 	if (reg < 0)
354 		return reg;
355 
356 	reg &= ~ADS131E08_CHR_PWD_MASK;
357 	reg |= FIELD_PREP(ADS131E08_CHR_PWD_MASK, value);
358 
359 	return ads131e08_write_reg(st, ADS131E08_ADR_CH0R + channel, reg);
360 }
361 
362 static int ads131e08_config_reference_voltage(struct ads131e08_state *st)
363 {
364 	int reg;
365 
366 	reg = ads131e08_read_reg(st, ADS131E08_ADR_CFG3R);
367 	if (reg < 0)
368 		return reg;
369 
370 	reg &= ~ADS131E08_CFG3R_PDB_REFBUF_MASK;
371 	if (!st->vref_reg) {
372 		reg |= FIELD_PREP(ADS131E08_CFG3R_PDB_REFBUF_MASK, 1);
373 		reg &= ~ADS131E08_CFG3R_VREF_4V_MASK;
374 		reg |= FIELD_PREP(ADS131E08_CFG3R_VREF_4V_MASK,
375 			st->vref_mv == ADS131E08_VREF_4V_mV);
376 	}
377 
378 	return ads131e08_write_reg(st, ADS131E08_ADR_CFG3R, reg);
379 }
380 
381 static int ads131e08_initial_config(struct iio_dev *indio_dev)
382 {
383 	const struct iio_chan_spec *channel = indio_dev->channels;
384 	struct ads131e08_state *st = iio_priv(indio_dev);
385 	unsigned long active_channels = 0;
386 	int ret, i;
387 
388 	ret = ads131e08_exec_cmd(st, ADS131E08_CMD_RESET);
389 	if (ret)
390 		return ret;
391 
392 	udelay(st->reset_delay_us);
393 
394 	/* Disable read data in continuous mode (enabled by default) */
395 	ret = ads131e08_exec_cmd(st, ADS131E08_CMD_SDATAC);
396 	if (ret)
397 		return ret;
398 
399 	ret = ads131e08_set_data_rate(st, ADS131E08_DEFAULT_DATA_RATE);
400 	if (ret)
401 		return ret;
402 
403 	ret = ads131e08_config_reference_voltage(st);
404 	if (ret)
405 		return ret;
406 
407 	for (i = 0;  i < indio_dev->num_channels; i++) {
408 		ret = ads131e08_set_pga_gain(st, channel->channel,
409 			st->channel_config[i].pga_gain);
410 		if (ret)
411 			return ret;
412 
413 		ret = ads131e08_set_channel_mux(st, channel->channel,
414 			st->channel_config[i].mux);
415 		if (ret)
416 			return ret;
417 
418 		active_channels |= BIT(channel->channel);
419 		channel++;
420 	}
421 
422 	/* Power down unused channels */
423 	for_each_clear_bit(i, &active_channels, st->info->max_channels) {
424 		ret = ads131e08_power_down_channel(st, i, true);
425 		if (ret)
426 			return ret;
427 	}
428 
429 	/* Request channel offset calibration */
430 	ret = ads131e08_exec_cmd(st, ADS131E08_CMD_OFFSETCAL);
431 	if (ret)
432 		return ret;
433 
434 	/*
435 	 * Channel offset calibration is triggered with the first START
436 	 * command. Since calibration takes more time than settling operation,
437 	 * this causes timeout error when command START is sent first
438 	 * time (e.g. first call of the ads131e08_read_direct method).
439 	 * To avoid this problem offset calibration is triggered here.
440 	 */
441 	ret = ads131e08_exec_cmd(st, ADS131E08_CMD_START);
442 	if (ret)
443 		return ret;
444 
445 	msleep(ADS131E08_WAIT_OFFSETCAL_MS);
446 
447 	return ads131e08_exec_cmd(st, ADS131E08_CMD_STOP);
448 }
449 
450 static int ads131e08_pool_data(struct ads131e08_state *st)
451 {
452 	unsigned long timeout;
453 	int ret;
454 
455 	reinit_completion(&st->completion);
456 
457 	ret = ads131e08_exec_cmd(st, ADS131E08_CMD_START);
458 	if (ret)
459 		return ret;
460 
461 	timeout = msecs_to_jiffies(ADS131E08_MAX_SETTLING_TIME_MS);
462 	ret = wait_for_completion_timeout(&st->completion, timeout);
463 	if (!ret)
464 		return -ETIMEDOUT;
465 
466 	ret = ads131e08_read_data(st, st->readback_len);
467 	if (ret)
468 		return ret;
469 
470 	return ads131e08_exec_cmd(st, ADS131E08_CMD_STOP);
471 }
472 
473 static int ads131e08_read_direct(struct iio_dev *indio_dev,
474 	struct iio_chan_spec const *channel, int *value)
475 {
476 	struct ads131e08_state *st = iio_priv(indio_dev);
477 	u8 num_bits, *src;
478 	int ret;
479 
480 	ret = ads131e08_pool_data(st);
481 	if (ret)
482 		return ret;
483 
484 	src = st->rx_buf + ADS131E08_NUM_STATUS_BYTES +
485 		channel->channel * ADS131E08_NUM_DATA_BYTES(st->data_rate);
486 
487 	num_bits = ADS131E08_NUM_DATA_BITS(st->data_rate);
488 	*value = sign_extend32(get_unaligned_be32(src) >> (32 - num_bits), num_bits - 1);
489 
490 	return 0;
491 }
492 
493 static int ads131e08_read_raw(struct iio_dev *indio_dev,
494 	struct iio_chan_spec const *channel, int *value,
495 	int *value2, long mask)
496 {
497 	struct ads131e08_state *st = iio_priv(indio_dev);
498 	int ret;
499 
500 	switch (mask) {
501 	case IIO_CHAN_INFO_RAW:
502 		ret = iio_device_claim_direct_mode(indio_dev);
503 		if (ret)
504 			return ret;
505 
506 		ret = ads131e08_read_direct(indio_dev, channel, value);
507 		iio_device_release_direct_mode(indio_dev);
508 		if (ret)
509 			return ret;
510 
511 		return IIO_VAL_INT;
512 
513 	case IIO_CHAN_INFO_SCALE:
514 		if (st->vref_reg) {
515 			ret = regulator_get_voltage(st->vref_reg);
516 			if (ret < 0)
517 				return ret;
518 
519 			*value = ret / 1000;
520 		} else {
521 			*value = st->vref_mv;
522 		}
523 
524 		*value /= st->channel_config[channel->address].pga_gain;
525 		*value2 = ADS131E08_NUM_DATA_BITS(st->data_rate) - 1;
526 
527 		return IIO_VAL_FRACTIONAL_LOG2;
528 
529 	case IIO_CHAN_INFO_SAMP_FREQ:
530 		*value = st->data_rate;
531 
532 		return IIO_VAL_INT;
533 
534 	default:
535 		return -EINVAL;
536 	}
537 }
538 
539 static int ads131e08_write_raw(struct iio_dev *indio_dev,
540 	struct iio_chan_spec const *channel, int value,
541 	int value2, long mask)
542 {
543 	struct ads131e08_state *st = iio_priv(indio_dev);
544 	int ret;
545 
546 	switch (mask) {
547 	case IIO_CHAN_INFO_SAMP_FREQ:
548 		ret = iio_device_claim_direct_mode(indio_dev);
549 		if (ret)
550 			return ret;
551 
552 		ret = ads131e08_set_data_rate(st, value);
553 		iio_device_release_direct_mode(indio_dev);
554 		return ret;
555 
556 	default:
557 		return -EINVAL;
558 	}
559 }
560 
561 static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("1 2 4 8 16 32 64");
562 
563 static struct attribute *ads131e08_attributes[] = {
564 	&iio_const_attr_sampling_frequency_available.dev_attr.attr,
565 	NULL
566 };
567 
568 static const struct attribute_group ads131e08_attribute_group = {
569 	.attrs = ads131e08_attributes,
570 };
571 
572 static int ads131e08_debugfs_reg_access(struct iio_dev *indio_dev,
573 	unsigned int reg, unsigned int writeval, unsigned int *readval)
574 {
575 	struct ads131e08_state *st = iio_priv(indio_dev);
576 
577 	if (readval) {
578 		int ret = ads131e08_read_reg(st, reg);
579 		*readval = ret;
580 		return ret;
581 	}
582 
583 	return ads131e08_write_reg(st, reg, writeval);
584 }
585 
586 static const struct iio_info ads131e08_iio_info = {
587 	.read_raw = ads131e08_read_raw,
588 	.write_raw = ads131e08_write_raw,
589 	.attrs = &ads131e08_attribute_group,
590 	.debugfs_reg_access = &ads131e08_debugfs_reg_access,
591 };
592 
593 static int ads131e08_set_trigger_state(struct iio_trigger *trig, bool state)
594 {
595 	struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
596 	struct ads131e08_state *st = iio_priv(indio_dev);
597 	u8 cmd = state ? ADS131E08_CMD_START : ADS131E08_CMD_STOP;
598 
599 	return ads131e08_exec_cmd(st, cmd);
600 }
601 
602 static const struct iio_trigger_ops ads131e08_trigger_ops = {
603 	.set_trigger_state = &ads131e08_set_trigger_state,
604 	.validate_device = &iio_trigger_validate_own_device,
605 };
606 
607 static irqreturn_t ads131e08_trigger_handler(int irq, void *private)
608 {
609 	struct iio_poll_func *pf = private;
610 	struct iio_dev *indio_dev = pf->indio_dev;
611 	struct ads131e08_state *st = iio_priv(indio_dev);
612 	unsigned int chn, i = 0;
613 	u8 *src, *dest;
614 	int ret;
615 
616 	/*
617 	 * The number of data bits per channel depends on the data rate.
618 	 * For 32 and 64 ksps data rates, number of data bits per channel
619 	 * is 16. This case is not compliant with used (fixed) scan element
620 	 * type (be:s24/32>>8). So we use a little tweak to pack properly
621 	 * 16 bits of data into the buffer.
622 	 */
623 	unsigned int num_bytes = ADS131E08_NUM_DATA_BYTES(st->data_rate);
624 	u8 tweek_offset = num_bytes == 2 ? 1 : 0;
625 
626 	if (iio_trigger_using_own(indio_dev))
627 		ret = ads131e08_read_data(st, st->readback_len);
628 	else
629 		ret = ads131e08_pool_data(st);
630 
631 	if (ret)
632 		goto out;
633 
634 	for_each_set_bit(chn, indio_dev->active_scan_mask, indio_dev->masklength) {
635 		src = st->rx_buf + ADS131E08_NUM_STATUS_BYTES + chn * num_bytes;
636 		dest = st->tmp_buf.data + i * ADS131E08_NUM_STORAGE_BYTES;
637 
638 		/*
639 		 * Tweek offset is 0:
640 		 * +---+---+---+---+
641 		 * |D0 |D1 |D2 | X | (3 data bytes)
642 		 * +---+---+---+---+
643 		 *  a+0 a+1 a+2 a+3
644 		 *
645 		 * Tweek offset is 1:
646 		 * +---+---+---+---+
647 		 * |P0 |D0 |D1 | X | (one padding byte and 2 data bytes)
648 		 * +---+---+---+---+
649 		 *  a+0 a+1 a+2 a+3
650 		 */
651 		memcpy(dest + tweek_offset, src, num_bytes);
652 
653 		/*
654 		 * Data conversion from 16 bits of data to 24 bits of data
655 		 * is done by sign extension (properly filling padding byte).
656 		 */
657 		if (tweek_offset)
658 			*dest = *src & BIT(7) ? 0xff : 0x00;
659 
660 		i++;
661 	}
662 
663 	iio_push_to_buffers_with_timestamp(indio_dev, st->tmp_buf.data,
664 		iio_get_time_ns(indio_dev));
665 
666 out:
667 	iio_trigger_notify_done(indio_dev->trig);
668 
669 	return IRQ_HANDLED;
670 }
671 
672 static irqreturn_t ads131e08_interrupt(int irq, void *private)
673 {
674 	struct iio_dev *indio_dev = private;
675 	struct ads131e08_state *st = iio_priv(indio_dev);
676 
677 	if (iio_buffer_enabled(indio_dev) && iio_trigger_using_own(indio_dev))
678 		iio_trigger_poll(st->trig);
679 	else
680 		complete(&st->completion);
681 
682 	return IRQ_HANDLED;
683 }
684 
685 static int ads131e08_alloc_channels(struct iio_dev *indio_dev)
686 {
687 	struct ads131e08_state *st = iio_priv(indio_dev);
688 	struct ads131e08_channel_config *channel_config;
689 	struct device *dev = &st->spi->dev;
690 	struct iio_chan_spec *channels;
691 	struct fwnode_handle *node;
692 	unsigned int channel, tmp;
693 	int num_channels, i, ret;
694 
695 	ret = device_property_read_u32(dev, "ti,vref-internal", &tmp);
696 	if (ret)
697 		tmp = 0;
698 
699 	switch (tmp) {
700 	case 0:
701 		st->vref_mv = ADS131E08_VREF_2V4_mV;
702 		break;
703 	case 1:
704 		st->vref_mv = ADS131E08_VREF_4V_mV;
705 		break;
706 	default:
707 		dev_err(&st->spi->dev, "invalid internal voltage reference\n");
708 		return -EINVAL;
709 	}
710 
711 	num_channels = device_get_child_node_count(dev);
712 	if (num_channels == 0) {
713 		dev_err(&st->spi->dev, "no channel children\n");
714 		return -ENODEV;
715 	}
716 
717 	if (num_channels > st->info->max_channels) {
718 		dev_err(&st->spi->dev, "num of channel children out of range\n");
719 		return -EINVAL;
720 	}
721 
722 	channels = devm_kcalloc(&st->spi->dev, num_channels,
723 		sizeof(*channels), GFP_KERNEL);
724 	if (!channels)
725 		return -ENOMEM;
726 
727 	channel_config = devm_kcalloc(&st->spi->dev, num_channels,
728 		sizeof(*channel_config), GFP_KERNEL);
729 	if (!channel_config)
730 		return -ENOMEM;
731 
732 	i = 0;
733 	device_for_each_child_node(dev, node) {
734 		ret = fwnode_property_read_u32(node, "reg", &channel);
735 		if (ret)
736 			return ret;
737 
738 		ret = fwnode_property_read_u32(node, "ti,gain", &tmp);
739 		if (ret) {
740 			channel_config[i].pga_gain = ADS131E08_DEFAULT_PGA_GAIN;
741 		} else {
742 			ret = ads131e08_pga_gain_to_field_value(st, tmp);
743 			if (ret < 0)
744 				return ret;
745 
746 			channel_config[i].pga_gain = tmp;
747 		}
748 
749 		ret = fwnode_property_read_u32(node, "ti,mux", &tmp);
750 		if (ret) {
751 			channel_config[i].mux = ADS131E08_DEFAULT_MUX;
752 		} else {
753 			ret = ads131e08_validate_channel_mux(st, tmp);
754 			if (ret)
755 				return ret;
756 
757 			channel_config[i].mux = tmp;
758 		}
759 
760 		channels[i].type = IIO_VOLTAGE;
761 		channels[i].indexed = 1;
762 		channels[i].channel = channel;
763 		channels[i].address = i;
764 		channels[i].info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
765 						BIT(IIO_CHAN_INFO_SCALE);
766 		channels[i].info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ);
767 		channels[i].scan_index = channel;
768 		channels[i].scan_type.sign = 's';
769 		channels[i].scan_type.realbits = 24;
770 		channels[i].scan_type.storagebits = 32;
771 		channels[i].scan_type.shift = 8;
772 		channels[i].scan_type.endianness = IIO_BE;
773 		i++;
774 	}
775 
776 	indio_dev->channels = channels;
777 	indio_dev->num_channels = num_channels;
778 	st->channel_config = channel_config;
779 
780 	return 0;
781 }
782 
783 static void ads131e08_regulator_disable(void *data)
784 {
785 	struct ads131e08_state *st = data;
786 
787 	regulator_disable(st->vref_reg);
788 }
789 
790 static void ads131e08_clk_disable(void *data)
791 {
792 	struct ads131e08_state *st = data;
793 
794 	clk_disable_unprepare(st->adc_clk);
795 }
796 
797 static int ads131e08_probe(struct spi_device *spi)
798 {
799 	const struct ads131e08_info *info;
800 	struct ads131e08_state *st;
801 	struct iio_dev *indio_dev;
802 	unsigned long adc_clk_hz;
803 	unsigned long adc_clk_ns;
804 	int ret;
805 
806 	info = device_get_match_data(&spi->dev);
807 	if (!info) {
808 		dev_err(&spi->dev, "failed to get match data\n");
809 		return -ENODEV;
810 	}
811 
812 	indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
813 	if (!indio_dev) {
814 		dev_err(&spi->dev, "failed to allocate IIO device\n");
815 		return -ENOMEM;
816 	}
817 
818 	st = iio_priv(indio_dev);
819 	st->info = info;
820 	st->spi = spi;
821 
822 	ret = ads131e08_alloc_channels(indio_dev);
823 	if (ret)
824 		return ret;
825 
826 	indio_dev->name = st->info->name;
827 	indio_dev->dev.parent = &spi->dev;
828 	indio_dev->info = &ads131e08_iio_info;
829 	indio_dev->modes = INDIO_DIRECT_MODE;
830 
831 	init_completion(&st->completion);
832 
833 	if (spi->irq) {
834 		ret = devm_request_irq(&spi->dev, spi->irq,
835 			ads131e08_interrupt,
836 			IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
837 			spi->dev.driver->name, indio_dev);
838 		if (ret)
839 			return dev_err_probe(&spi->dev, ret,
840 					     "request irq failed\n");
841 	} else {
842 		dev_err(&spi->dev, "data ready IRQ missing\n");
843 		return -ENODEV;
844 	}
845 
846 	st->trig = devm_iio_trigger_alloc(&spi->dev, "%s-dev%d",
847 		indio_dev->name, indio_dev->id);
848 	if (!st->trig) {
849 		dev_err(&spi->dev, "failed to allocate IIO trigger\n");
850 		return -ENOMEM;
851 	}
852 
853 	st->trig->ops = &ads131e08_trigger_ops;
854 	st->trig->dev.parent = &spi->dev;
855 	iio_trigger_set_drvdata(st->trig, indio_dev);
856 	ret = devm_iio_trigger_register(&spi->dev, st->trig);
857 	if (ret) {
858 		dev_err(&spi->dev, "failed to register IIO trigger\n");
859 		return -ENOMEM;
860 	}
861 
862 	indio_dev->trig = iio_trigger_get(st->trig);
863 
864 	ret = devm_iio_triggered_buffer_setup(&spi->dev, indio_dev,
865 		NULL, &ads131e08_trigger_handler, NULL);
866 	if (ret) {
867 		dev_err(&spi->dev, "failed to setup IIO buffer\n");
868 		return ret;
869 	}
870 
871 	st->vref_reg = devm_regulator_get_optional(&spi->dev, "vref");
872 	if (!IS_ERR(st->vref_reg)) {
873 		ret = regulator_enable(st->vref_reg);
874 		if (ret) {
875 			dev_err(&spi->dev,
876 				"failed to enable external vref supply\n");
877 			return ret;
878 		}
879 
880 		ret = devm_add_action_or_reset(&spi->dev, ads131e08_regulator_disable, st);
881 		if (ret)
882 			return ret;
883 	} else {
884 		if (PTR_ERR(st->vref_reg) != -ENODEV)
885 			return PTR_ERR(st->vref_reg);
886 
887 		st->vref_reg = NULL;
888 	}
889 
890 	st->adc_clk = devm_clk_get(&spi->dev, "adc-clk");
891 	if (IS_ERR(st->adc_clk))
892 		return dev_err_probe(&spi->dev, PTR_ERR(st->adc_clk),
893 				     "failed to get the ADC clock\n");
894 
895 	ret = clk_prepare_enable(st->adc_clk);
896 	if (ret) {
897 		dev_err(&spi->dev, "failed to prepare/enable the ADC clock\n");
898 		return ret;
899 	}
900 
901 	ret = devm_add_action_or_reset(&spi->dev, ads131e08_clk_disable, st);
902 	if (ret)
903 		return ret;
904 
905 	adc_clk_hz = clk_get_rate(st->adc_clk);
906 	if (!adc_clk_hz) {
907 		dev_err(&spi->dev, "failed to get the ADC clock rate\n");
908 		return  -EINVAL;
909 	}
910 
911 	adc_clk_ns = NSEC_PER_SEC / adc_clk_hz;
912 	st->sdecode_delay_us = DIV_ROUND_UP(
913 		ADS131E08_WAIT_SDECODE_CYCLES * adc_clk_ns, NSEC_PER_USEC);
914 	st->reset_delay_us = DIV_ROUND_UP(
915 		ADS131E08_WAIT_RESET_CYCLES * adc_clk_ns, NSEC_PER_USEC);
916 
917 	ret = ads131e08_initial_config(indio_dev);
918 	if (ret) {
919 		dev_err(&spi->dev, "initial configuration failed\n");
920 		return ret;
921 	}
922 
923 	return devm_iio_device_register(&spi->dev, indio_dev);
924 }
925 
926 static const struct of_device_id ads131e08_of_match[] = {
927 	{ .compatible = "ti,ads131e04",
928 	  .data = &ads131e08_info_tbl[ads131e04], },
929 	{ .compatible = "ti,ads131e06",
930 	  .data = &ads131e08_info_tbl[ads131e06], },
931 	{ .compatible = "ti,ads131e08",
932 	  .data = &ads131e08_info_tbl[ads131e08], },
933 	{}
934 };
935 MODULE_DEVICE_TABLE(of, ads131e08_of_match);
936 
937 static struct spi_driver ads131e08_driver = {
938 	.driver = {
939 		.name = "ads131e08",
940 		.of_match_table = ads131e08_of_match,
941 	},
942 	.probe = ads131e08_probe,
943 };
944 module_spi_driver(ads131e08_driver);
945 
946 MODULE_AUTHOR("Tomislav Denis <tomislav.denis@avl.com>");
947 MODULE_DESCRIPTION("Driver for ADS131E0x ADC family");
948 MODULE_LICENSE("GPL v2");
949