1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * TI ADC MFD driver 4 * 5 * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com/ 6 */ 7 8 #include <linux/kernel.h> 9 #include <linux/err.h> 10 #include <linux/module.h> 11 #include <linux/slab.h> 12 #include <linux/interrupt.h> 13 #include <linux/platform_device.h> 14 #include <linux/io.h> 15 #include <linux/iio/iio.h> 16 #include <linux/of.h> 17 #include <linux/of_device.h> 18 #include <linux/iio/machine.h> 19 #include <linux/iio/driver.h> 20 #include <linux/iopoll.h> 21 22 #include <linux/mfd/ti_am335x_tscadc.h> 23 #include <linux/iio/buffer.h> 24 #include <linux/iio/kfifo_buf.h> 25 26 #include <linux/dmaengine.h> 27 #include <linux/dma-mapping.h> 28 29 #define DMA_BUFFER_SIZE SZ_2K 30 31 struct tiadc_dma { 32 struct dma_slave_config conf; 33 struct dma_chan *chan; 34 dma_addr_t addr; 35 dma_cookie_t cookie; 36 u8 *buf; 37 int current_period; 38 int period_size; 39 u8 fifo_thresh; 40 }; 41 42 struct tiadc_device { 43 struct ti_tscadc_dev *mfd_tscadc; 44 struct tiadc_dma dma; 45 struct mutex fifo1_lock; /* to protect fifo access */ 46 int channels; 47 int total_ch_enabled; 48 u8 channel_line[8]; 49 u8 channel_step[8]; 50 int buffer_en_ch_steps; 51 u16 data[8]; 52 u32 open_delay[8], sample_delay[8], step_avg[8]; 53 }; 54 55 static unsigned int tiadc_readl(struct tiadc_device *adc, unsigned int reg) 56 { 57 return readl(adc->mfd_tscadc->tscadc_base + reg); 58 } 59 60 static void tiadc_writel(struct tiadc_device *adc, unsigned int reg, 61 unsigned int val) 62 { 63 writel(val, adc->mfd_tscadc->tscadc_base + reg); 64 } 65 66 static u32 get_adc_step_mask(struct tiadc_device *adc_dev) 67 { 68 u32 step_en; 69 70 step_en = ((1 << adc_dev->channels) - 1); 71 step_en <<= TOTAL_STEPS - adc_dev->channels + 1; 72 return step_en; 73 } 74 75 static u32 get_adc_chan_step_mask(struct tiadc_device *adc_dev, 76 struct iio_chan_spec const *chan) 77 { 78 int i; 79 80 for (i = 0; i < ARRAY_SIZE(adc_dev->channel_step); i++) { 81 if (chan->channel == adc_dev->channel_line[i]) { 82 u32 step; 83 84 step = adc_dev->channel_step[i]; 85 /* +1 for the charger */ 86 return 1 << (step + 1); 87 } 88 } 89 WARN_ON(1); 90 return 0; 91 } 92 93 static u32 get_adc_step_bit(struct tiadc_device *adc_dev, int chan) 94 { 95 return 1 << adc_dev->channel_step[chan]; 96 } 97 98 static int tiadc_wait_idle(struct tiadc_device *adc_dev) 99 { 100 u32 val; 101 102 return readl_poll_timeout(adc_dev->mfd_tscadc->tscadc_base + REG_ADCFSM, 103 val, !(val & SEQ_STATUS), 10, 104 IDLE_TIMEOUT_MS * 1000 * adc_dev->channels); 105 } 106 107 static void tiadc_step_config(struct iio_dev *indio_dev) 108 { 109 struct tiadc_device *adc_dev = iio_priv(indio_dev); 110 unsigned int stepconfig; 111 int i, steps = 0; 112 113 /* 114 * There are 16 configurable steps and 8 analog input 115 * lines available which are shared between Touchscreen and ADC. 116 * 117 * Steps forwards i.e. from 0 towards 16 are used by ADC 118 * depending on number of input lines needed. 119 * Channel would represent which analog input 120 * needs to be given to ADC to digitalize data. 121 */ 122 for (i = 0; i < adc_dev->channels; i++) { 123 int chan; 124 125 chan = adc_dev->channel_line[i]; 126 127 if (adc_dev->step_avg[i]) 128 stepconfig = STEPCONFIG_AVG(ffs(adc_dev->step_avg[i]) - 1) | 129 STEPCONFIG_FIFO1; 130 else 131 stepconfig = STEPCONFIG_FIFO1; 132 133 if (iio_buffer_enabled(indio_dev)) 134 stepconfig |= STEPCONFIG_MODE_SWCNT; 135 136 tiadc_writel(adc_dev, REG_STEPCONFIG(steps), 137 stepconfig | STEPCONFIG_INP(chan) | 138 STEPCONFIG_INM_ADCREFM | STEPCONFIG_RFP_VREFP | 139 STEPCONFIG_RFM_VREFN); 140 141 tiadc_writel(adc_dev, REG_STEPDELAY(steps), 142 STEPDELAY_OPEN(adc_dev->open_delay[i]) | 143 STEPDELAY_SAMPLE(adc_dev->sample_delay[i])); 144 145 adc_dev->channel_step[i] = steps; 146 steps++; 147 } 148 } 149 150 static irqreturn_t tiadc_irq_h(int irq, void *private) 151 { 152 struct iio_dev *indio_dev = private; 153 struct tiadc_device *adc_dev = iio_priv(indio_dev); 154 unsigned int status, config, adc_fsm; 155 unsigned short count = 0; 156 157 status = tiadc_readl(adc_dev, REG_IRQSTATUS); 158 159 /* 160 * ADC and touchscreen share the IRQ line. 161 * FIFO0 interrupts are used by TSC. Handle FIFO1 IRQs here only 162 */ 163 if (status & IRQENB_FIFO1OVRRUN) { 164 /* FIFO Overrun. Clear flag. Disable/Enable ADC to recover */ 165 config = tiadc_readl(adc_dev, REG_CTRL); 166 config &= ~(CNTRLREG_SSENB); 167 tiadc_writel(adc_dev, REG_CTRL, config); 168 tiadc_writel(adc_dev, REG_IRQSTATUS, 169 IRQENB_FIFO1OVRRUN | IRQENB_FIFO1UNDRFLW | 170 IRQENB_FIFO1THRES); 171 172 /* 173 * Wait for the idle state. 174 * ADC needs to finish the current conversion 175 * before disabling the module 176 */ 177 do { 178 adc_fsm = tiadc_readl(adc_dev, REG_ADCFSM); 179 } while (adc_fsm != 0x10 && count++ < 100); 180 181 tiadc_writel(adc_dev, REG_CTRL, (config | CNTRLREG_SSENB)); 182 return IRQ_HANDLED; 183 } else if (status & IRQENB_FIFO1THRES) { 184 /* Disable irq and wake worker thread */ 185 tiadc_writel(adc_dev, REG_IRQCLR, IRQENB_FIFO1THRES); 186 return IRQ_WAKE_THREAD; 187 } 188 189 return IRQ_NONE; 190 } 191 192 static irqreturn_t tiadc_worker_h(int irq, void *private) 193 { 194 struct iio_dev *indio_dev = private; 195 struct tiadc_device *adc_dev = iio_priv(indio_dev); 196 int i, k, fifo1count, read; 197 u16 *data = adc_dev->data; 198 199 fifo1count = tiadc_readl(adc_dev, REG_FIFO1CNT); 200 for (k = 0; k < fifo1count; k = k + i) { 201 for (i = 0; i < indio_dev->scan_bytes / 2; i++) { 202 read = tiadc_readl(adc_dev, REG_FIFO1); 203 data[i] = read & FIFOREAD_DATA_MASK; 204 } 205 iio_push_to_buffers(indio_dev, (u8 *)data); 206 } 207 208 tiadc_writel(adc_dev, REG_IRQSTATUS, IRQENB_FIFO1THRES); 209 tiadc_writel(adc_dev, REG_IRQENABLE, IRQENB_FIFO1THRES); 210 211 return IRQ_HANDLED; 212 } 213 214 static void tiadc_dma_rx_complete(void *param) 215 { 216 struct iio_dev *indio_dev = param; 217 struct tiadc_device *adc_dev = iio_priv(indio_dev); 218 struct tiadc_dma *dma = &adc_dev->dma; 219 u8 *data; 220 int i; 221 222 data = dma->buf + dma->current_period * dma->period_size; 223 dma->current_period = 1 - dma->current_period; /* swap the buffer ID */ 224 225 for (i = 0; i < dma->period_size; i += indio_dev->scan_bytes) { 226 iio_push_to_buffers(indio_dev, data); 227 data += indio_dev->scan_bytes; 228 } 229 } 230 231 static int tiadc_start_dma(struct iio_dev *indio_dev) 232 { 233 struct tiadc_device *adc_dev = iio_priv(indio_dev); 234 struct tiadc_dma *dma = &adc_dev->dma; 235 struct dma_async_tx_descriptor *desc; 236 237 dma->current_period = 0; /* We start to fill period 0 */ 238 239 /* 240 * Make the fifo thresh as the multiple of total number of 241 * channels enabled, so make sure that cyclic DMA period 242 * length is also a multiple of total number of channels 243 * enabled. This ensures that no invalid data is reported 244 * to the stack via iio_push_to_buffers(). 245 */ 246 dma->fifo_thresh = rounddown(FIFO1_THRESHOLD + 1, 247 adc_dev->total_ch_enabled) - 1; 248 249 /* Make sure that period length is multiple of fifo thresh level */ 250 dma->period_size = rounddown(DMA_BUFFER_SIZE / 2, 251 (dma->fifo_thresh + 1) * sizeof(u16)); 252 253 dma->conf.src_maxburst = dma->fifo_thresh + 1; 254 dmaengine_slave_config(dma->chan, &dma->conf); 255 256 desc = dmaengine_prep_dma_cyclic(dma->chan, dma->addr, 257 dma->period_size * 2, 258 dma->period_size, DMA_DEV_TO_MEM, 259 DMA_PREP_INTERRUPT); 260 if (!desc) 261 return -EBUSY; 262 263 desc->callback = tiadc_dma_rx_complete; 264 desc->callback_param = indio_dev; 265 266 dma->cookie = dmaengine_submit(desc); 267 268 dma_async_issue_pending(dma->chan); 269 270 tiadc_writel(adc_dev, REG_FIFO1THR, dma->fifo_thresh); 271 tiadc_writel(adc_dev, REG_DMA1REQ, dma->fifo_thresh); 272 tiadc_writel(adc_dev, REG_DMAENABLE_SET, DMA_FIFO1); 273 274 return 0; 275 } 276 277 static int tiadc_buffer_preenable(struct iio_dev *indio_dev) 278 { 279 struct tiadc_device *adc_dev = iio_priv(indio_dev); 280 int i, fifo1count; 281 int ret; 282 283 ret = tiadc_wait_idle(adc_dev); 284 if (ret) 285 return ret; 286 287 tiadc_writel(adc_dev, REG_IRQCLR, 288 IRQENB_FIFO1THRES | IRQENB_FIFO1OVRRUN | 289 IRQENB_FIFO1UNDRFLW); 290 291 /* Flush FIFO. Needed in corner cases in simultaneous tsc/adc use */ 292 fifo1count = tiadc_readl(adc_dev, REG_FIFO1CNT); 293 for (i = 0; i < fifo1count; i++) 294 tiadc_readl(adc_dev, REG_FIFO1); 295 296 return 0; 297 } 298 299 static int tiadc_buffer_postenable(struct iio_dev *indio_dev) 300 { 301 struct tiadc_device *adc_dev = iio_priv(indio_dev); 302 struct tiadc_dma *dma = &adc_dev->dma; 303 unsigned int irq_enable; 304 unsigned int enb = 0; 305 u8 bit; 306 307 tiadc_step_config(indio_dev); 308 for_each_set_bit(bit, indio_dev->active_scan_mask, adc_dev->channels) { 309 enb |= (get_adc_step_bit(adc_dev, bit) << 1); 310 adc_dev->total_ch_enabled++; 311 } 312 adc_dev->buffer_en_ch_steps = enb; 313 314 if (dma->chan) 315 tiadc_start_dma(indio_dev); 316 317 am335x_tsc_se_set_cache(adc_dev->mfd_tscadc, enb); 318 319 tiadc_writel(adc_dev, REG_IRQSTATUS, 320 IRQENB_FIFO1THRES | IRQENB_FIFO1OVRRUN | 321 IRQENB_FIFO1UNDRFLW); 322 323 irq_enable = IRQENB_FIFO1OVRRUN; 324 if (!dma->chan) 325 irq_enable |= IRQENB_FIFO1THRES; 326 tiadc_writel(adc_dev, REG_IRQENABLE, irq_enable); 327 328 return 0; 329 } 330 331 static int tiadc_buffer_predisable(struct iio_dev *indio_dev) 332 { 333 struct tiadc_device *adc_dev = iio_priv(indio_dev); 334 struct tiadc_dma *dma = &adc_dev->dma; 335 int fifo1count, i; 336 337 tiadc_writel(adc_dev, REG_IRQCLR, 338 IRQENB_FIFO1THRES | IRQENB_FIFO1OVRRUN | 339 IRQENB_FIFO1UNDRFLW); 340 am335x_tsc_se_clr(adc_dev->mfd_tscadc, adc_dev->buffer_en_ch_steps); 341 adc_dev->buffer_en_ch_steps = 0; 342 adc_dev->total_ch_enabled = 0; 343 if (dma->chan) { 344 tiadc_writel(adc_dev, REG_DMAENABLE_CLEAR, 0x2); 345 dmaengine_terminate_async(dma->chan); 346 } 347 348 /* Flush FIFO of leftover data in the time it takes to disable adc */ 349 fifo1count = tiadc_readl(adc_dev, REG_FIFO1CNT); 350 for (i = 0; i < fifo1count; i++) 351 tiadc_readl(adc_dev, REG_FIFO1); 352 353 return 0; 354 } 355 356 static int tiadc_buffer_postdisable(struct iio_dev *indio_dev) 357 { 358 tiadc_step_config(indio_dev); 359 360 return 0; 361 } 362 363 static const struct iio_buffer_setup_ops tiadc_buffer_setup_ops = { 364 .preenable = &tiadc_buffer_preenable, 365 .postenable = &tiadc_buffer_postenable, 366 .predisable = &tiadc_buffer_predisable, 367 .postdisable = &tiadc_buffer_postdisable, 368 }; 369 370 static int tiadc_iio_buffered_hardware_setup(struct device *dev, 371 struct iio_dev *indio_dev, 372 irqreturn_t (*pollfunc_bh)(int irq, void *p), 373 irqreturn_t (*pollfunc_th)(int irq, void *p), 374 int irq, unsigned long flags, 375 const struct iio_buffer_setup_ops *setup_ops) 376 { 377 int ret; 378 379 ret = devm_iio_kfifo_buffer_setup(dev, indio_dev, 380 INDIO_BUFFER_SOFTWARE, 381 setup_ops); 382 if (ret) 383 return ret; 384 385 return devm_request_threaded_irq(dev, irq, pollfunc_th, pollfunc_bh, 386 flags, indio_dev->name, indio_dev); 387 } 388 389 static const char * const chan_name_ain[] = { 390 "AIN0", 391 "AIN1", 392 "AIN2", 393 "AIN3", 394 "AIN4", 395 "AIN5", 396 "AIN6", 397 "AIN7", 398 }; 399 400 static int tiadc_channel_init(struct device *dev, struct iio_dev *indio_dev, 401 int channels) 402 { 403 struct tiadc_device *adc_dev = iio_priv(indio_dev); 404 struct iio_chan_spec *chan_array; 405 struct iio_chan_spec *chan; 406 int i; 407 408 indio_dev->num_channels = channels; 409 chan_array = devm_kcalloc(dev, channels, sizeof(*chan_array), 410 GFP_KERNEL); 411 if (!chan_array) 412 return -ENOMEM; 413 414 chan = chan_array; 415 for (i = 0; i < channels; i++, chan++) { 416 chan->type = IIO_VOLTAGE; 417 chan->indexed = 1; 418 chan->channel = adc_dev->channel_line[i]; 419 chan->info_mask_separate = BIT(IIO_CHAN_INFO_RAW); 420 chan->info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE); 421 chan->datasheet_name = chan_name_ain[chan->channel]; 422 chan->scan_index = i; 423 chan->scan_type.sign = 'u'; 424 chan->scan_type.realbits = 12; 425 chan->scan_type.storagebits = 16; 426 } 427 428 indio_dev->channels = chan_array; 429 430 return 0; 431 } 432 433 static int tiadc_read_raw(struct iio_dev *indio_dev, 434 struct iio_chan_spec const *chan, int *val, int *val2, 435 long mask) 436 { 437 struct tiadc_device *adc_dev = iio_priv(indio_dev); 438 int i, map_val; 439 unsigned int fifo1count, read, stepid; 440 bool found = false; 441 u32 step_en; 442 unsigned long timeout; 443 int ret; 444 445 switch (mask) { 446 case IIO_CHAN_INFO_RAW: 447 break; 448 case IIO_CHAN_INFO_SCALE: 449 switch (chan->type) { 450 case IIO_VOLTAGE: 451 *val = 1800; 452 *val2 = chan->scan_type.realbits; 453 return IIO_VAL_FRACTIONAL_LOG2; 454 default: 455 return -EINVAL; 456 } 457 break; 458 default: 459 return -EINVAL; 460 } 461 462 if (iio_buffer_enabled(indio_dev)) 463 return -EBUSY; 464 465 step_en = get_adc_chan_step_mask(adc_dev, chan); 466 if (!step_en) 467 return -EINVAL; 468 469 mutex_lock(&adc_dev->fifo1_lock); 470 471 ret = tiadc_wait_idle(adc_dev); 472 if (ret) 473 goto err_unlock; 474 475 fifo1count = tiadc_readl(adc_dev, REG_FIFO1CNT); 476 while (fifo1count--) 477 tiadc_readl(adc_dev, REG_FIFO1); 478 479 am335x_tsc_se_set_once(adc_dev->mfd_tscadc, step_en); 480 481 /* Wait for Fifo threshold interrupt */ 482 timeout = jiffies + msecs_to_jiffies(IDLE_TIMEOUT_MS * adc_dev->channels); 483 while (1) { 484 fifo1count = tiadc_readl(adc_dev, REG_FIFO1CNT); 485 if (fifo1count) 486 break; 487 488 if (time_after(jiffies, timeout)) { 489 am335x_tsc_se_adc_done(adc_dev->mfd_tscadc); 490 ret = -EAGAIN; 491 goto err_unlock; 492 } 493 } 494 495 map_val = adc_dev->channel_step[chan->scan_index]; 496 497 /* 498 * We check the complete FIFO. We programmed just one entry but in case 499 * something went wrong we left empty handed (-EAGAIN previously) and 500 * then the value apeared somehow in the FIFO we would have two entries. 501 * Therefore we read every item and keep only the latest version of the 502 * requested channel. 503 */ 504 for (i = 0; i < fifo1count; i++) { 505 read = tiadc_readl(adc_dev, REG_FIFO1); 506 stepid = read & FIFOREAD_CHNLID_MASK; 507 stepid = stepid >> 0x10; 508 509 if (stepid == map_val) { 510 read = read & FIFOREAD_DATA_MASK; 511 found = true; 512 *val = (u16)read; 513 } 514 } 515 516 am335x_tsc_se_adc_done(adc_dev->mfd_tscadc); 517 518 if (!found) 519 ret = -EBUSY; 520 521 err_unlock: 522 mutex_unlock(&adc_dev->fifo1_lock); 523 return ret ? ret : IIO_VAL_INT; 524 } 525 526 static const struct iio_info tiadc_info = { 527 .read_raw = &tiadc_read_raw, 528 }; 529 530 static int tiadc_request_dma(struct platform_device *pdev, 531 struct tiadc_device *adc_dev) 532 { 533 struct tiadc_dma *dma = &adc_dev->dma; 534 dma_cap_mask_t mask; 535 536 /* Default slave configuration parameters */ 537 dma->conf.direction = DMA_DEV_TO_MEM; 538 dma->conf.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; 539 dma->conf.src_addr = adc_dev->mfd_tscadc->tscadc_phys_base + REG_FIFO1; 540 541 dma_cap_zero(mask); 542 dma_cap_set(DMA_CYCLIC, mask); 543 544 /* Get a channel for RX */ 545 dma->chan = dma_request_chan(adc_dev->mfd_tscadc->dev, "fifo1"); 546 if (IS_ERR(dma->chan)) { 547 int ret = PTR_ERR(dma->chan); 548 549 dma->chan = NULL; 550 return ret; 551 } 552 553 /* RX buffer */ 554 dma->buf = dma_alloc_coherent(dma->chan->device->dev, DMA_BUFFER_SIZE, 555 &dma->addr, GFP_KERNEL); 556 if (!dma->buf) 557 goto err; 558 559 return 0; 560 561 err: 562 dma_release_channel(dma->chan); 563 return -ENOMEM; 564 } 565 566 static int tiadc_parse_dt(struct platform_device *pdev, 567 struct tiadc_device *adc_dev) 568 { 569 struct device_node *node = pdev->dev.of_node; 570 struct property *prop; 571 const __be32 *cur; 572 int channels = 0; 573 u32 val; 574 int i; 575 576 of_property_for_each_u32(node, "ti,adc-channels", prop, cur, val) { 577 adc_dev->channel_line[channels] = val; 578 579 /* Set Default values for optional DT parameters */ 580 adc_dev->open_delay[channels] = STEPCONFIG_OPENDLY; 581 adc_dev->sample_delay[channels] = STEPCONFIG_SAMPLEDLY; 582 adc_dev->step_avg[channels] = 16; 583 584 channels++; 585 } 586 587 adc_dev->channels = channels; 588 589 of_property_read_u32_array(node, "ti,chan-step-avg", 590 adc_dev->step_avg, channels); 591 of_property_read_u32_array(node, "ti,chan-step-opendelay", 592 adc_dev->open_delay, channels); 593 of_property_read_u32_array(node, "ti,chan-step-sampledelay", 594 adc_dev->sample_delay, channels); 595 596 for (i = 0; i < adc_dev->channels; i++) { 597 int chan; 598 599 chan = adc_dev->channel_line[i]; 600 601 if (adc_dev->step_avg[i] > STEPCONFIG_AVG_16) { 602 dev_warn(&pdev->dev, 603 "chan %d: wrong step avg, truncated to %ld\n", 604 chan, STEPCONFIG_AVG_16); 605 adc_dev->step_avg[i] = STEPCONFIG_AVG_16; 606 } 607 608 if (adc_dev->open_delay[i] > STEPCONFIG_MAX_OPENDLY) { 609 dev_warn(&pdev->dev, 610 "chan %d: wrong open delay, truncated to 0x%lX\n", 611 chan, STEPCONFIG_MAX_OPENDLY); 612 adc_dev->open_delay[i] = STEPCONFIG_MAX_OPENDLY; 613 } 614 615 if (adc_dev->sample_delay[i] > STEPCONFIG_MAX_SAMPLE) { 616 dev_warn(&pdev->dev, 617 "chan %d: wrong sample delay, truncated to 0x%lX\n", 618 chan, STEPCONFIG_MAX_SAMPLE); 619 adc_dev->sample_delay[i] = STEPCONFIG_MAX_SAMPLE; 620 } 621 } 622 623 return 0; 624 } 625 626 static int tiadc_probe(struct platform_device *pdev) 627 { 628 struct iio_dev *indio_dev; 629 struct tiadc_device *adc_dev; 630 struct device_node *node = pdev->dev.of_node; 631 int err; 632 633 if (!node) { 634 dev_err(&pdev->dev, "Could not find valid DT data.\n"); 635 return -EINVAL; 636 } 637 638 indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*adc_dev)); 639 if (!indio_dev) { 640 dev_err(&pdev->dev, "failed to allocate iio device\n"); 641 return -ENOMEM; 642 } 643 adc_dev = iio_priv(indio_dev); 644 645 adc_dev->mfd_tscadc = ti_tscadc_dev_get(pdev); 646 tiadc_parse_dt(pdev, adc_dev); 647 648 indio_dev->name = dev_name(&pdev->dev); 649 indio_dev->modes = INDIO_DIRECT_MODE; 650 indio_dev->info = &tiadc_info; 651 652 tiadc_step_config(indio_dev); 653 tiadc_writel(adc_dev, REG_FIFO1THR, FIFO1_THRESHOLD); 654 mutex_init(&adc_dev->fifo1_lock); 655 656 err = tiadc_channel_init(&pdev->dev, indio_dev, adc_dev->channels); 657 if (err < 0) 658 return err; 659 660 err = tiadc_iio_buffered_hardware_setup(&pdev->dev, indio_dev, 661 &tiadc_worker_h, 662 &tiadc_irq_h, 663 adc_dev->mfd_tscadc->irq, 664 IRQF_SHARED, 665 &tiadc_buffer_setup_ops); 666 if (err) 667 return err; 668 669 err = iio_device_register(indio_dev); 670 if (err) 671 return err; 672 673 platform_set_drvdata(pdev, indio_dev); 674 675 err = tiadc_request_dma(pdev, adc_dev); 676 if (err && err == -EPROBE_DEFER) 677 goto err_dma; 678 679 return 0; 680 681 err_dma: 682 iio_device_unregister(indio_dev); 683 684 return err; 685 } 686 687 static int tiadc_remove(struct platform_device *pdev) 688 { 689 struct iio_dev *indio_dev = platform_get_drvdata(pdev); 690 struct tiadc_device *adc_dev = iio_priv(indio_dev); 691 struct tiadc_dma *dma = &adc_dev->dma; 692 u32 step_en; 693 694 if (dma->chan) { 695 dma_free_coherent(dma->chan->device->dev, DMA_BUFFER_SIZE, 696 dma->buf, dma->addr); 697 dma_release_channel(dma->chan); 698 } 699 iio_device_unregister(indio_dev); 700 701 step_en = get_adc_step_mask(adc_dev); 702 am335x_tsc_se_clr(adc_dev->mfd_tscadc, step_en); 703 704 return 0; 705 } 706 707 static int __maybe_unused tiadc_suspend(struct device *dev) 708 { 709 struct iio_dev *indio_dev = dev_get_drvdata(dev); 710 struct tiadc_device *adc_dev = iio_priv(indio_dev); 711 unsigned int idle; 712 713 idle = tiadc_readl(adc_dev, REG_CTRL); 714 idle &= ~(CNTRLREG_SSENB); 715 tiadc_writel(adc_dev, REG_CTRL, idle | CNTRLREG_POWERDOWN); 716 717 return 0; 718 } 719 720 static int __maybe_unused tiadc_resume(struct device *dev) 721 { 722 struct iio_dev *indio_dev = dev_get_drvdata(dev); 723 struct tiadc_device *adc_dev = iio_priv(indio_dev); 724 unsigned int restore; 725 726 /* Make sure ADC is powered up */ 727 restore = tiadc_readl(adc_dev, REG_CTRL); 728 restore &= ~CNTRLREG_POWERDOWN; 729 tiadc_writel(adc_dev, REG_CTRL, restore); 730 731 tiadc_step_config(indio_dev); 732 am335x_tsc_se_set_cache(adc_dev->mfd_tscadc, 733 adc_dev->buffer_en_ch_steps); 734 return 0; 735 } 736 737 static SIMPLE_DEV_PM_OPS(tiadc_pm_ops, tiadc_suspend, tiadc_resume); 738 739 static const struct of_device_id ti_adc_dt_ids[] = { 740 { .compatible = "ti,am3359-adc", }, 741 { .compatible = "ti,am4372-adc", }, 742 { } 743 }; 744 MODULE_DEVICE_TABLE(of, ti_adc_dt_ids); 745 746 static struct platform_driver tiadc_driver = { 747 .driver = { 748 .name = "TI-am335x-adc", 749 .pm = &tiadc_pm_ops, 750 .of_match_table = ti_adc_dt_ids, 751 }, 752 .probe = tiadc_probe, 753 .remove = tiadc_remove, 754 }; 755 module_platform_driver(tiadc_driver); 756 757 MODULE_DESCRIPTION("TI ADC controller driver"); 758 MODULE_AUTHOR("Rachna Patil <rachna@ti.com>"); 759 MODULE_LICENSE("GPL"); 760