1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * TI ADC MFD driver 4 * 5 * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com/ 6 */ 7 8 #include <linux/kernel.h> 9 #include <linux/err.h> 10 #include <linux/module.h> 11 #include <linux/slab.h> 12 #include <linux/interrupt.h> 13 #include <linux/platform_device.h> 14 #include <linux/io.h> 15 #include <linux/iio/iio.h> 16 #include <linux/of.h> 17 #include <linux/iio/machine.h> 18 #include <linux/iio/driver.h> 19 #include <linux/iopoll.h> 20 21 #include <linux/mfd/ti_am335x_tscadc.h> 22 #include <linux/iio/buffer.h> 23 #include <linux/iio/kfifo_buf.h> 24 25 #include <linux/dmaengine.h> 26 #include <linux/dma-mapping.h> 27 28 #define DMA_BUFFER_SIZE SZ_2K 29 30 struct tiadc_dma { 31 struct dma_slave_config conf; 32 struct dma_chan *chan; 33 dma_addr_t addr; 34 dma_cookie_t cookie; 35 u8 *buf; 36 int current_period; 37 int period_size; 38 u8 fifo_thresh; 39 }; 40 41 struct tiadc_device { 42 struct ti_tscadc_dev *mfd_tscadc; 43 struct tiadc_dma dma; 44 struct mutex fifo1_lock; /* to protect fifo access */ 45 int channels; 46 int total_ch_enabled; 47 u8 channel_line[8]; 48 u8 channel_step[8]; 49 int buffer_en_ch_steps; 50 u16 data[8]; 51 u32 open_delay[8], sample_delay[8], step_avg[8]; 52 }; 53 54 static unsigned int tiadc_readl(struct tiadc_device *adc, unsigned int reg) 55 { 56 return readl(adc->mfd_tscadc->tscadc_base + reg); 57 } 58 59 static void tiadc_writel(struct tiadc_device *adc, unsigned int reg, 60 unsigned int val) 61 { 62 writel(val, adc->mfd_tscadc->tscadc_base + reg); 63 } 64 65 static u32 get_adc_step_mask(struct tiadc_device *adc_dev) 66 { 67 u32 step_en; 68 69 step_en = ((1 << adc_dev->channels) - 1); 70 step_en <<= TOTAL_STEPS - adc_dev->channels + 1; 71 return step_en; 72 } 73 74 static u32 get_adc_chan_step_mask(struct tiadc_device *adc_dev, 75 struct iio_chan_spec const *chan) 76 { 77 int i; 78 79 for (i = 0; i < ARRAY_SIZE(adc_dev->channel_step); i++) { 80 if (chan->channel == adc_dev->channel_line[i]) { 81 u32 step; 82 83 step = adc_dev->channel_step[i]; 84 /* +1 for the charger */ 85 return 1 << (step + 1); 86 } 87 } 88 WARN_ON(1); 89 return 0; 90 } 91 92 static u32 get_adc_step_bit(struct tiadc_device *adc_dev, int chan) 93 { 94 return 1 << adc_dev->channel_step[chan]; 95 } 96 97 static int tiadc_wait_idle(struct tiadc_device *adc_dev) 98 { 99 u32 val; 100 101 return readl_poll_timeout(adc_dev->mfd_tscadc->tscadc_base + REG_ADCFSM, 102 val, !(val & SEQ_STATUS), 10, 103 IDLE_TIMEOUT_MS * 1000 * adc_dev->channels); 104 } 105 106 static void tiadc_step_config(struct iio_dev *indio_dev) 107 { 108 struct tiadc_device *adc_dev = iio_priv(indio_dev); 109 unsigned int stepconfig; 110 int i, steps = 0; 111 112 /* 113 * There are 16 configurable steps and 8 analog input 114 * lines available which are shared between Touchscreen and ADC. 115 * 116 * Steps forwards i.e. from 0 towards 16 are used by ADC 117 * depending on number of input lines needed. 118 * Channel would represent which analog input 119 * needs to be given to ADC to digitalize data. 120 */ 121 for (i = 0; i < adc_dev->channels; i++) { 122 int chan; 123 124 chan = adc_dev->channel_line[i]; 125 126 if (adc_dev->step_avg[i]) 127 stepconfig = STEPCONFIG_AVG(ffs(adc_dev->step_avg[i]) - 1) | 128 STEPCONFIG_FIFO1; 129 else 130 stepconfig = STEPCONFIG_FIFO1; 131 132 if (iio_buffer_enabled(indio_dev)) 133 stepconfig |= STEPCONFIG_MODE_SWCNT; 134 135 tiadc_writel(adc_dev, REG_STEPCONFIG(steps), 136 stepconfig | STEPCONFIG_INP(chan) | 137 STEPCONFIG_INM_ADCREFM | STEPCONFIG_RFP_VREFP | 138 STEPCONFIG_RFM_VREFN); 139 140 tiadc_writel(adc_dev, REG_STEPDELAY(steps), 141 STEPDELAY_OPEN(adc_dev->open_delay[i]) | 142 STEPDELAY_SAMPLE(adc_dev->sample_delay[i])); 143 144 adc_dev->channel_step[i] = steps; 145 steps++; 146 } 147 } 148 149 static irqreturn_t tiadc_irq_h(int irq, void *private) 150 { 151 struct iio_dev *indio_dev = private; 152 struct tiadc_device *adc_dev = iio_priv(indio_dev); 153 unsigned int status, config, adc_fsm; 154 unsigned short count = 0; 155 156 status = tiadc_readl(adc_dev, REG_IRQSTATUS); 157 158 /* 159 * ADC and touchscreen share the IRQ line. 160 * FIFO0 interrupts are used by TSC. Handle FIFO1 IRQs here only 161 */ 162 if (status & IRQENB_FIFO1OVRRUN) { 163 /* FIFO Overrun. Clear flag. Disable/Enable ADC to recover */ 164 config = tiadc_readl(adc_dev, REG_CTRL); 165 config &= ~(CNTRLREG_SSENB); 166 tiadc_writel(adc_dev, REG_CTRL, config); 167 tiadc_writel(adc_dev, REG_IRQSTATUS, 168 IRQENB_FIFO1OVRRUN | IRQENB_FIFO1UNDRFLW | 169 IRQENB_FIFO1THRES); 170 171 /* 172 * Wait for the idle state. 173 * ADC needs to finish the current conversion 174 * before disabling the module 175 */ 176 do { 177 adc_fsm = tiadc_readl(adc_dev, REG_ADCFSM); 178 } while (adc_fsm != 0x10 && count++ < 100); 179 180 tiadc_writel(adc_dev, REG_CTRL, (config | CNTRLREG_SSENB)); 181 return IRQ_HANDLED; 182 } else if (status & IRQENB_FIFO1THRES) { 183 /* Disable irq and wake worker thread */ 184 tiadc_writel(adc_dev, REG_IRQCLR, IRQENB_FIFO1THRES); 185 return IRQ_WAKE_THREAD; 186 } 187 188 return IRQ_NONE; 189 } 190 191 static irqreturn_t tiadc_worker_h(int irq, void *private) 192 { 193 struct iio_dev *indio_dev = private; 194 struct tiadc_device *adc_dev = iio_priv(indio_dev); 195 int i, k, fifo1count, read; 196 u16 *data = adc_dev->data; 197 198 fifo1count = tiadc_readl(adc_dev, REG_FIFO1CNT); 199 for (k = 0; k < fifo1count; k = k + i) { 200 for (i = 0; i < indio_dev->scan_bytes / 2; i++) { 201 read = tiadc_readl(adc_dev, REG_FIFO1); 202 data[i] = read & FIFOREAD_DATA_MASK; 203 } 204 iio_push_to_buffers(indio_dev, (u8 *)data); 205 } 206 207 tiadc_writel(adc_dev, REG_IRQSTATUS, IRQENB_FIFO1THRES); 208 tiadc_writel(adc_dev, REG_IRQENABLE, IRQENB_FIFO1THRES); 209 210 return IRQ_HANDLED; 211 } 212 213 static void tiadc_dma_rx_complete(void *param) 214 { 215 struct iio_dev *indio_dev = param; 216 struct tiadc_device *adc_dev = iio_priv(indio_dev); 217 struct tiadc_dma *dma = &adc_dev->dma; 218 u8 *data; 219 int i; 220 221 data = dma->buf + dma->current_period * dma->period_size; 222 dma->current_period = 1 - dma->current_period; /* swap the buffer ID */ 223 224 for (i = 0; i < dma->period_size; i += indio_dev->scan_bytes) { 225 iio_push_to_buffers(indio_dev, data); 226 data += indio_dev->scan_bytes; 227 } 228 } 229 230 static int tiadc_start_dma(struct iio_dev *indio_dev) 231 { 232 struct tiadc_device *adc_dev = iio_priv(indio_dev); 233 struct tiadc_dma *dma = &adc_dev->dma; 234 struct dma_async_tx_descriptor *desc; 235 236 dma->current_period = 0; /* We start to fill period 0 */ 237 238 /* 239 * Make the fifo thresh as the multiple of total number of 240 * channels enabled, so make sure that cyclic DMA period 241 * length is also a multiple of total number of channels 242 * enabled. This ensures that no invalid data is reported 243 * to the stack via iio_push_to_buffers(). 244 */ 245 dma->fifo_thresh = rounddown(FIFO1_THRESHOLD + 1, 246 adc_dev->total_ch_enabled) - 1; 247 248 /* Make sure that period length is multiple of fifo thresh level */ 249 dma->period_size = rounddown(DMA_BUFFER_SIZE / 2, 250 (dma->fifo_thresh + 1) * sizeof(u16)); 251 252 dma->conf.src_maxburst = dma->fifo_thresh + 1; 253 dmaengine_slave_config(dma->chan, &dma->conf); 254 255 desc = dmaengine_prep_dma_cyclic(dma->chan, dma->addr, 256 dma->period_size * 2, 257 dma->period_size, DMA_DEV_TO_MEM, 258 DMA_PREP_INTERRUPT); 259 if (!desc) 260 return -EBUSY; 261 262 desc->callback = tiadc_dma_rx_complete; 263 desc->callback_param = indio_dev; 264 265 dma->cookie = dmaengine_submit(desc); 266 267 dma_async_issue_pending(dma->chan); 268 269 tiadc_writel(adc_dev, REG_FIFO1THR, dma->fifo_thresh); 270 tiadc_writel(adc_dev, REG_DMA1REQ, dma->fifo_thresh); 271 tiadc_writel(adc_dev, REG_DMAENABLE_SET, DMA_FIFO1); 272 273 return 0; 274 } 275 276 static int tiadc_buffer_preenable(struct iio_dev *indio_dev) 277 { 278 struct tiadc_device *adc_dev = iio_priv(indio_dev); 279 int i, fifo1count; 280 int ret; 281 282 ret = tiadc_wait_idle(adc_dev); 283 if (ret) 284 return ret; 285 286 tiadc_writel(adc_dev, REG_IRQCLR, 287 IRQENB_FIFO1THRES | IRQENB_FIFO1OVRRUN | 288 IRQENB_FIFO1UNDRFLW); 289 290 /* Flush FIFO. Needed in corner cases in simultaneous tsc/adc use */ 291 fifo1count = tiadc_readl(adc_dev, REG_FIFO1CNT); 292 for (i = 0; i < fifo1count; i++) 293 tiadc_readl(adc_dev, REG_FIFO1); 294 295 return 0; 296 } 297 298 static int tiadc_buffer_postenable(struct iio_dev *indio_dev) 299 { 300 struct tiadc_device *adc_dev = iio_priv(indio_dev); 301 struct tiadc_dma *dma = &adc_dev->dma; 302 unsigned int irq_enable; 303 unsigned int enb = 0; 304 u8 bit; 305 306 tiadc_step_config(indio_dev); 307 for_each_set_bit(bit, indio_dev->active_scan_mask, adc_dev->channels) { 308 enb |= (get_adc_step_bit(adc_dev, bit) << 1); 309 adc_dev->total_ch_enabled++; 310 } 311 adc_dev->buffer_en_ch_steps = enb; 312 313 if (dma->chan) 314 tiadc_start_dma(indio_dev); 315 316 am335x_tsc_se_set_cache(adc_dev->mfd_tscadc, enb); 317 318 tiadc_writel(adc_dev, REG_IRQSTATUS, 319 IRQENB_FIFO1THRES | IRQENB_FIFO1OVRRUN | 320 IRQENB_FIFO1UNDRFLW); 321 322 irq_enable = IRQENB_FIFO1OVRRUN; 323 if (!dma->chan) 324 irq_enable |= IRQENB_FIFO1THRES; 325 tiadc_writel(adc_dev, REG_IRQENABLE, irq_enable); 326 327 return 0; 328 } 329 330 static int tiadc_buffer_predisable(struct iio_dev *indio_dev) 331 { 332 struct tiadc_device *adc_dev = iio_priv(indio_dev); 333 struct tiadc_dma *dma = &adc_dev->dma; 334 int fifo1count, i; 335 336 tiadc_writel(adc_dev, REG_IRQCLR, 337 IRQENB_FIFO1THRES | IRQENB_FIFO1OVRRUN | 338 IRQENB_FIFO1UNDRFLW); 339 am335x_tsc_se_clr(adc_dev->mfd_tscadc, adc_dev->buffer_en_ch_steps); 340 adc_dev->buffer_en_ch_steps = 0; 341 adc_dev->total_ch_enabled = 0; 342 if (dma->chan) { 343 tiadc_writel(adc_dev, REG_DMAENABLE_CLEAR, 0x2); 344 dmaengine_terminate_async(dma->chan); 345 } 346 347 /* Flush FIFO of leftover data in the time it takes to disable adc */ 348 fifo1count = tiadc_readl(adc_dev, REG_FIFO1CNT); 349 for (i = 0; i < fifo1count; i++) 350 tiadc_readl(adc_dev, REG_FIFO1); 351 352 return 0; 353 } 354 355 static int tiadc_buffer_postdisable(struct iio_dev *indio_dev) 356 { 357 tiadc_step_config(indio_dev); 358 359 return 0; 360 } 361 362 static const struct iio_buffer_setup_ops tiadc_buffer_setup_ops = { 363 .preenable = &tiadc_buffer_preenable, 364 .postenable = &tiadc_buffer_postenable, 365 .predisable = &tiadc_buffer_predisable, 366 .postdisable = &tiadc_buffer_postdisable, 367 }; 368 369 static int tiadc_iio_buffered_hardware_setup(struct device *dev, 370 struct iio_dev *indio_dev, 371 irqreturn_t (*pollfunc_bh)(int irq, void *p), 372 irqreturn_t (*pollfunc_th)(int irq, void *p), 373 int irq, unsigned long flags, 374 const struct iio_buffer_setup_ops *setup_ops) 375 { 376 int ret; 377 378 ret = devm_iio_kfifo_buffer_setup(dev, indio_dev, setup_ops); 379 if (ret) 380 return ret; 381 382 return devm_request_threaded_irq(dev, irq, pollfunc_th, pollfunc_bh, 383 flags, indio_dev->name, indio_dev); 384 } 385 386 static const char * const chan_name_ain[] = { 387 "AIN0", 388 "AIN1", 389 "AIN2", 390 "AIN3", 391 "AIN4", 392 "AIN5", 393 "AIN6", 394 "AIN7", 395 }; 396 397 static int tiadc_channel_init(struct device *dev, struct iio_dev *indio_dev, 398 int channels) 399 { 400 struct tiadc_device *adc_dev = iio_priv(indio_dev); 401 struct iio_chan_spec *chan_array; 402 struct iio_chan_spec *chan; 403 int i; 404 405 indio_dev->num_channels = channels; 406 chan_array = devm_kcalloc(dev, channels, sizeof(*chan_array), 407 GFP_KERNEL); 408 if (!chan_array) 409 return -ENOMEM; 410 411 chan = chan_array; 412 for (i = 0; i < channels; i++, chan++) { 413 chan->type = IIO_VOLTAGE; 414 chan->indexed = 1; 415 chan->channel = adc_dev->channel_line[i]; 416 chan->info_mask_separate = BIT(IIO_CHAN_INFO_RAW); 417 chan->info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE); 418 chan->datasheet_name = chan_name_ain[chan->channel]; 419 chan->scan_index = i; 420 chan->scan_type.sign = 'u'; 421 chan->scan_type.realbits = 12; 422 chan->scan_type.storagebits = 16; 423 } 424 425 indio_dev->channels = chan_array; 426 427 return 0; 428 } 429 430 static int tiadc_read_raw(struct iio_dev *indio_dev, 431 struct iio_chan_spec const *chan, int *val, int *val2, 432 long mask) 433 { 434 struct tiadc_device *adc_dev = iio_priv(indio_dev); 435 int i, map_val; 436 unsigned int fifo1count, read, stepid; 437 bool found = false; 438 u32 step_en; 439 unsigned long timeout; 440 int ret; 441 442 switch (mask) { 443 case IIO_CHAN_INFO_RAW: 444 break; 445 case IIO_CHAN_INFO_SCALE: 446 switch (chan->type) { 447 case IIO_VOLTAGE: 448 *val = 1800; 449 *val2 = chan->scan_type.realbits; 450 return IIO_VAL_FRACTIONAL_LOG2; 451 default: 452 return -EINVAL; 453 } 454 break; 455 default: 456 return -EINVAL; 457 } 458 459 if (iio_buffer_enabled(indio_dev)) 460 return -EBUSY; 461 462 step_en = get_adc_chan_step_mask(adc_dev, chan); 463 if (!step_en) 464 return -EINVAL; 465 466 mutex_lock(&adc_dev->fifo1_lock); 467 468 ret = tiadc_wait_idle(adc_dev); 469 if (ret) 470 goto err_unlock; 471 472 fifo1count = tiadc_readl(adc_dev, REG_FIFO1CNT); 473 while (fifo1count--) 474 tiadc_readl(adc_dev, REG_FIFO1); 475 476 am335x_tsc_se_set_once(adc_dev->mfd_tscadc, step_en); 477 478 /* Wait for Fifo threshold interrupt */ 479 timeout = jiffies + msecs_to_jiffies(IDLE_TIMEOUT_MS * adc_dev->channels); 480 while (1) { 481 fifo1count = tiadc_readl(adc_dev, REG_FIFO1CNT); 482 if (fifo1count) 483 break; 484 485 if (time_after(jiffies, timeout)) { 486 am335x_tsc_se_adc_done(adc_dev->mfd_tscadc); 487 ret = -EAGAIN; 488 goto err_unlock; 489 } 490 } 491 492 map_val = adc_dev->channel_step[chan->scan_index]; 493 494 /* 495 * We check the complete FIFO. We programmed just one entry but in case 496 * something went wrong we left empty handed (-EAGAIN previously) and 497 * then the value apeared somehow in the FIFO we would have two entries. 498 * Therefore we read every item and keep only the latest version of the 499 * requested channel. 500 */ 501 for (i = 0; i < fifo1count; i++) { 502 read = tiadc_readl(adc_dev, REG_FIFO1); 503 stepid = read & FIFOREAD_CHNLID_MASK; 504 stepid = stepid >> 0x10; 505 506 if (stepid == map_val) { 507 read = read & FIFOREAD_DATA_MASK; 508 found = true; 509 *val = (u16)read; 510 } 511 } 512 513 am335x_tsc_se_adc_done(adc_dev->mfd_tscadc); 514 515 if (!found) 516 ret = -EBUSY; 517 518 err_unlock: 519 mutex_unlock(&adc_dev->fifo1_lock); 520 return ret ? ret : IIO_VAL_INT; 521 } 522 523 static const struct iio_info tiadc_info = { 524 .read_raw = &tiadc_read_raw, 525 }; 526 527 static int tiadc_request_dma(struct platform_device *pdev, 528 struct tiadc_device *adc_dev) 529 { 530 struct tiadc_dma *dma = &adc_dev->dma; 531 dma_cap_mask_t mask; 532 533 /* Default slave configuration parameters */ 534 dma->conf.direction = DMA_DEV_TO_MEM; 535 dma->conf.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; 536 dma->conf.src_addr = adc_dev->mfd_tscadc->tscadc_phys_base + REG_FIFO1; 537 538 dma_cap_zero(mask); 539 dma_cap_set(DMA_CYCLIC, mask); 540 541 /* Get a channel for RX */ 542 dma->chan = dma_request_chan(adc_dev->mfd_tscadc->dev, "fifo1"); 543 if (IS_ERR(dma->chan)) { 544 int ret = PTR_ERR(dma->chan); 545 546 dma->chan = NULL; 547 return ret; 548 } 549 550 /* RX buffer */ 551 dma->buf = dma_alloc_coherent(dma->chan->device->dev, DMA_BUFFER_SIZE, 552 &dma->addr, GFP_KERNEL); 553 if (!dma->buf) 554 goto err; 555 556 return 0; 557 558 err: 559 dma_release_channel(dma->chan); 560 return -ENOMEM; 561 } 562 563 static int tiadc_parse_dt(struct platform_device *pdev, 564 struct tiadc_device *adc_dev) 565 { 566 struct device_node *node = pdev->dev.of_node; 567 struct property *prop; 568 const __be32 *cur; 569 int channels = 0; 570 u32 val; 571 int i; 572 573 of_property_for_each_u32(node, "ti,adc-channels", prop, cur, val) { 574 adc_dev->channel_line[channels] = val; 575 576 /* Set Default values for optional DT parameters */ 577 adc_dev->open_delay[channels] = STEPCONFIG_OPENDLY; 578 adc_dev->sample_delay[channels] = STEPCONFIG_SAMPLEDLY; 579 adc_dev->step_avg[channels] = 16; 580 581 channels++; 582 } 583 584 adc_dev->channels = channels; 585 586 of_property_read_u32_array(node, "ti,chan-step-avg", 587 adc_dev->step_avg, channels); 588 of_property_read_u32_array(node, "ti,chan-step-opendelay", 589 adc_dev->open_delay, channels); 590 of_property_read_u32_array(node, "ti,chan-step-sampledelay", 591 adc_dev->sample_delay, channels); 592 593 for (i = 0; i < adc_dev->channels; i++) { 594 int chan; 595 596 chan = adc_dev->channel_line[i]; 597 598 if (adc_dev->step_avg[i] > STEPCONFIG_AVG_16) { 599 dev_warn(&pdev->dev, 600 "chan %d: wrong step avg, truncated to %ld\n", 601 chan, STEPCONFIG_AVG_16); 602 adc_dev->step_avg[i] = STEPCONFIG_AVG_16; 603 } 604 605 if (adc_dev->open_delay[i] > STEPCONFIG_MAX_OPENDLY) { 606 dev_warn(&pdev->dev, 607 "chan %d: wrong open delay, truncated to 0x%lX\n", 608 chan, STEPCONFIG_MAX_OPENDLY); 609 adc_dev->open_delay[i] = STEPCONFIG_MAX_OPENDLY; 610 } 611 612 if (adc_dev->sample_delay[i] > STEPCONFIG_MAX_SAMPLE) { 613 dev_warn(&pdev->dev, 614 "chan %d: wrong sample delay, truncated to 0x%lX\n", 615 chan, STEPCONFIG_MAX_SAMPLE); 616 adc_dev->sample_delay[i] = STEPCONFIG_MAX_SAMPLE; 617 } 618 } 619 620 return 0; 621 } 622 623 static int tiadc_probe(struct platform_device *pdev) 624 { 625 struct iio_dev *indio_dev; 626 struct tiadc_device *adc_dev; 627 struct device_node *node = pdev->dev.of_node; 628 int err; 629 630 if (!node) { 631 dev_err(&pdev->dev, "Could not find valid DT data.\n"); 632 return -EINVAL; 633 } 634 635 indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*adc_dev)); 636 if (!indio_dev) { 637 dev_err(&pdev->dev, "failed to allocate iio device\n"); 638 return -ENOMEM; 639 } 640 adc_dev = iio_priv(indio_dev); 641 642 adc_dev->mfd_tscadc = ti_tscadc_dev_get(pdev); 643 tiadc_parse_dt(pdev, adc_dev); 644 645 indio_dev->name = dev_name(&pdev->dev); 646 indio_dev->modes = INDIO_DIRECT_MODE; 647 indio_dev->info = &tiadc_info; 648 649 tiadc_step_config(indio_dev); 650 tiadc_writel(adc_dev, REG_FIFO1THR, FIFO1_THRESHOLD); 651 mutex_init(&adc_dev->fifo1_lock); 652 653 err = tiadc_channel_init(&pdev->dev, indio_dev, adc_dev->channels); 654 if (err < 0) 655 return err; 656 657 err = tiadc_iio_buffered_hardware_setup(&pdev->dev, indio_dev, 658 &tiadc_worker_h, 659 &tiadc_irq_h, 660 adc_dev->mfd_tscadc->irq, 661 IRQF_SHARED, 662 &tiadc_buffer_setup_ops); 663 if (err) 664 return err; 665 666 err = iio_device_register(indio_dev); 667 if (err) 668 return err; 669 670 platform_set_drvdata(pdev, indio_dev); 671 672 err = tiadc_request_dma(pdev, adc_dev); 673 if (err && err != -ENODEV) { 674 dev_err_probe(&pdev->dev, err, "DMA request failed\n"); 675 goto err_dma; 676 } 677 678 return 0; 679 680 err_dma: 681 iio_device_unregister(indio_dev); 682 683 return err; 684 } 685 686 static int tiadc_remove(struct platform_device *pdev) 687 { 688 struct iio_dev *indio_dev = platform_get_drvdata(pdev); 689 struct tiadc_device *adc_dev = iio_priv(indio_dev); 690 struct tiadc_dma *dma = &adc_dev->dma; 691 u32 step_en; 692 693 if (dma->chan) { 694 dma_free_coherent(dma->chan->device->dev, DMA_BUFFER_SIZE, 695 dma->buf, dma->addr); 696 dma_release_channel(dma->chan); 697 } 698 iio_device_unregister(indio_dev); 699 700 step_en = get_adc_step_mask(adc_dev); 701 am335x_tsc_se_clr(adc_dev->mfd_tscadc, step_en); 702 703 return 0; 704 } 705 706 static int tiadc_suspend(struct device *dev) 707 { 708 struct iio_dev *indio_dev = dev_get_drvdata(dev); 709 struct tiadc_device *adc_dev = iio_priv(indio_dev); 710 unsigned int idle; 711 712 idle = tiadc_readl(adc_dev, REG_CTRL); 713 idle &= ~(CNTRLREG_SSENB); 714 tiadc_writel(adc_dev, REG_CTRL, idle | CNTRLREG_POWERDOWN); 715 716 return 0; 717 } 718 719 static int tiadc_resume(struct device *dev) 720 { 721 struct iio_dev *indio_dev = dev_get_drvdata(dev); 722 struct tiadc_device *adc_dev = iio_priv(indio_dev); 723 unsigned int restore; 724 725 /* Make sure ADC is powered up */ 726 restore = tiadc_readl(adc_dev, REG_CTRL); 727 restore &= ~CNTRLREG_POWERDOWN; 728 tiadc_writel(adc_dev, REG_CTRL, restore); 729 730 tiadc_step_config(indio_dev); 731 am335x_tsc_se_set_cache(adc_dev->mfd_tscadc, 732 adc_dev->buffer_en_ch_steps); 733 return 0; 734 } 735 736 static DEFINE_SIMPLE_DEV_PM_OPS(tiadc_pm_ops, tiadc_suspend, tiadc_resume); 737 738 static const struct of_device_id ti_adc_dt_ids[] = { 739 { .compatible = "ti,am3359-adc", }, 740 { .compatible = "ti,am4372-adc", }, 741 { } 742 }; 743 MODULE_DEVICE_TABLE(of, ti_adc_dt_ids); 744 745 static struct platform_driver tiadc_driver = { 746 .driver = { 747 .name = "TI-am335x-adc", 748 .pm = pm_sleep_ptr(&tiadc_pm_ops), 749 .of_match_table = ti_adc_dt_ids, 750 }, 751 .probe = tiadc_probe, 752 .remove = tiadc_remove, 753 }; 754 module_platform_driver(tiadc_driver); 755 756 MODULE_DESCRIPTION("TI ADC controller driver"); 757 MODULE_AUTHOR("Rachna Patil <rachna@ti.com>"); 758 MODULE_LICENSE("GPL"); 759