1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Driver for Motorola PCAP2 as present in EZX phones 4 * 5 * Copyright (C) 2006 Harald Welte <laforge@openezx.org> 6 * Copyright (C) 2009 Daniel Ribeiro <drwyrm@gmail.com> 7 */ 8 9 #include <linux/module.h> 10 #include <linux/kernel.h> 11 #include <linux/platform_device.h> 12 #include <linux/interrupt.h> 13 #include <linux/irq.h> 14 #include <linux/mfd/ezx-pcap.h> 15 #include <linux/spi/spi.h> 16 #include <linux/gpio.h> 17 #include <linux/slab.h> 18 19 #define PCAP_ADC_MAXQ 8 20 struct pcap_adc_request { 21 u8 bank; 22 u8 ch[2]; 23 u32 flags; 24 void (*callback)(void *, u16[]); 25 void *data; 26 }; 27 28 struct pcap_adc_sync_request { 29 u16 res[2]; 30 struct completion completion; 31 }; 32 33 struct pcap_chip { 34 struct spi_device *spi; 35 36 /* IO */ 37 u32 buf; 38 struct mutex io_mutex; 39 40 /* IRQ */ 41 unsigned int irq_base; 42 u32 msr; 43 struct work_struct isr_work; 44 struct work_struct msr_work; 45 struct workqueue_struct *workqueue; 46 47 /* ADC */ 48 struct pcap_adc_request *adc_queue[PCAP_ADC_MAXQ]; 49 u8 adc_head; 50 u8 adc_tail; 51 struct mutex adc_mutex; 52 }; 53 54 /* IO */ 55 static int ezx_pcap_putget(struct pcap_chip *pcap, u32 *data) 56 { 57 struct spi_transfer t; 58 struct spi_message m; 59 int status; 60 61 memset(&t, 0, sizeof(t)); 62 spi_message_init(&m); 63 t.len = sizeof(u32); 64 spi_message_add_tail(&t, &m); 65 66 pcap->buf = *data; 67 t.tx_buf = (u8 *) &pcap->buf; 68 t.rx_buf = (u8 *) &pcap->buf; 69 status = spi_sync(pcap->spi, &m); 70 71 if (status == 0) 72 *data = pcap->buf; 73 74 return status; 75 } 76 77 int ezx_pcap_write(struct pcap_chip *pcap, u8 reg_num, u32 value) 78 { 79 int ret; 80 81 mutex_lock(&pcap->io_mutex); 82 value &= PCAP_REGISTER_VALUE_MASK; 83 value |= PCAP_REGISTER_WRITE_OP_BIT 84 | (reg_num << PCAP_REGISTER_ADDRESS_SHIFT); 85 ret = ezx_pcap_putget(pcap, &value); 86 mutex_unlock(&pcap->io_mutex); 87 88 return ret; 89 } 90 EXPORT_SYMBOL_GPL(ezx_pcap_write); 91 92 int ezx_pcap_read(struct pcap_chip *pcap, u8 reg_num, u32 *value) 93 { 94 int ret; 95 96 mutex_lock(&pcap->io_mutex); 97 *value = PCAP_REGISTER_READ_OP_BIT 98 | (reg_num << PCAP_REGISTER_ADDRESS_SHIFT); 99 100 ret = ezx_pcap_putget(pcap, value); 101 mutex_unlock(&pcap->io_mutex); 102 103 return ret; 104 } 105 EXPORT_SYMBOL_GPL(ezx_pcap_read); 106 107 int ezx_pcap_set_bits(struct pcap_chip *pcap, u8 reg_num, u32 mask, u32 val) 108 { 109 int ret; 110 u32 tmp = PCAP_REGISTER_READ_OP_BIT | 111 (reg_num << PCAP_REGISTER_ADDRESS_SHIFT); 112 113 mutex_lock(&pcap->io_mutex); 114 ret = ezx_pcap_putget(pcap, &tmp); 115 if (ret) 116 goto out_unlock; 117 118 tmp &= (PCAP_REGISTER_VALUE_MASK & ~mask); 119 tmp |= (val & mask) | PCAP_REGISTER_WRITE_OP_BIT | 120 (reg_num << PCAP_REGISTER_ADDRESS_SHIFT); 121 122 ret = ezx_pcap_putget(pcap, &tmp); 123 out_unlock: 124 mutex_unlock(&pcap->io_mutex); 125 126 return ret; 127 } 128 EXPORT_SYMBOL_GPL(ezx_pcap_set_bits); 129 130 /* IRQ */ 131 int irq_to_pcap(struct pcap_chip *pcap, int irq) 132 { 133 return irq - pcap->irq_base; 134 } 135 EXPORT_SYMBOL_GPL(irq_to_pcap); 136 137 int pcap_to_irq(struct pcap_chip *pcap, int irq) 138 { 139 return pcap->irq_base + irq; 140 } 141 EXPORT_SYMBOL_GPL(pcap_to_irq); 142 143 static void pcap_mask_irq(struct irq_data *d) 144 { 145 struct pcap_chip *pcap = irq_data_get_irq_chip_data(d); 146 147 pcap->msr |= 1 << irq_to_pcap(pcap, d->irq); 148 queue_work(pcap->workqueue, &pcap->msr_work); 149 } 150 151 static void pcap_unmask_irq(struct irq_data *d) 152 { 153 struct pcap_chip *pcap = irq_data_get_irq_chip_data(d); 154 155 pcap->msr &= ~(1 << irq_to_pcap(pcap, d->irq)); 156 queue_work(pcap->workqueue, &pcap->msr_work); 157 } 158 159 static struct irq_chip pcap_irq_chip = { 160 .name = "pcap", 161 .irq_disable = pcap_mask_irq, 162 .irq_mask = pcap_mask_irq, 163 .irq_unmask = pcap_unmask_irq, 164 }; 165 166 static void pcap_msr_work(struct work_struct *work) 167 { 168 struct pcap_chip *pcap = container_of(work, struct pcap_chip, msr_work); 169 170 ezx_pcap_write(pcap, PCAP_REG_MSR, pcap->msr); 171 } 172 173 static void pcap_isr_work(struct work_struct *work) 174 { 175 struct pcap_chip *pcap = container_of(work, struct pcap_chip, isr_work); 176 struct pcap_platform_data *pdata = dev_get_platdata(&pcap->spi->dev); 177 u32 msr, isr, int_sel, service; 178 int irq; 179 180 do { 181 ezx_pcap_read(pcap, PCAP_REG_MSR, &msr); 182 ezx_pcap_read(pcap, PCAP_REG_ISR, &isr); 183 184 /* We can't service/ack irqs that are assigned to port 2 */ 185 if (!(pdata->config & PCAP_SECOND_PORT)) { 186 ezx_pcap_read(pcap, PCAP_REG_INT_SEL, &int_sel); 187 isr &= ~int_sel; 188 } 189 190 ezx_pcap_write(pcap, PCAP_REG_MSR, isr | msr); 191 ezx_pcap_write(pcap, PCAP_REG_ISR, isr); 192 193 local_irq_disable(); 194 service = isr & ~msr; 195 for (irq = pcap->irq_base; service; service >>= 1, irq++) { 196 if (service & 1) 197 generic_handle_irq(irq); 198 } 199 local_irq_enable(); 200 ezx_pcap_write(pcap, PCAP_REG_MSR, pcap->msr); 201 } while (gpio_get_value(pdata->gpio)); 202 } 203 204 static void pcap_irq_handler(struct irq_desc *desc) 205 { 206 struct pcap_chip *pcap = irq_desc_get_handler_data(desc); 207 208 desc->irq_data.chip->irq_ack(&desc->irq_data); 209 queue_work(pcap->workqueue, &pcap->isr_work); 210 } 211 212 /* ADC */ 213 void pcap_set_ts_bits(struct pcap_chip *pcap, u32 bits) 214 { 215 u32 tmp; 216 217 mutex_lock(&pcap->adc_mutex); 218 ezx_pcap_read(pcap, PCAP_REG_ADC, &tmp); 219 tmp &= ~(PCAP_ADC_TS_M_MASK | PCAP_ADC_TS_REF_LOWPWR); 220 tmp |= bits & (PCAP_ADC_TS_M_MASK | PCAP_ADC_TS_REF_LOWPWR); 221 ezx_pcap_write(pcap, PCAP_REG_ADC, tmp); 222 mutex_unlock(&pcap->adc_mutex); 223 } 224 EXPORT_SYMBOL_GPL(pcap_set_ts_bits); 225 226 static void pcap_disable_adc(struct pcap_chip *pcap) 227 { 228 u32 tmp; 229 230 ezx_pcap_read(pcap, PCAP_REG_ADC, &tmp); 231 tmp &= ~(PCAP_ADC_ADEN|PCAP_ADC_BATT_I_ADC|PCAP_ADC_BATT_I_POLARITY); 232 ezx_pcap_write(pcap, PCAP_REG_ADC, tmp); 233 } 234 235 static void pcap_adc_trigger(struct pcap_chip *pcap) 236 { 237 u32 tmp; 238 u8 head; 239 240 mutex_lock(&pcap->adc_mutex); 241 head = pcap->adc_head; 242 if (!pcap->adc_queue[head]) { 243 /* queue is empty, save power */ 244 pcap_disable_adc(pcap); 245 mutex_unlock(&pcap->adc_mutex); 246 return; 247 } 248 /* start conversion on requested bank, save TS_M bits */ 249 ezx_pcap_read(pcap, PCAP_REG_ADC, &tmp); 250 tmp &= (PCAP_ADC_TS_M_MASK | PCAP_ADC_TS_REF_LOWPWR); 251 tmp |= pcap->adc_queue[head]->flags | PCAP_ADC_ADEN; 252 253 if (pcap->adc_queue[head]->bank == PCAP_ADC_BANK_1) 254 tmp |= PCAP_ADC_AD_SEL1; 255 256 ezx_pcap_write(pcap, PCAP_REG_ADC, tmp); 257 mutex_unlock(&pcap->adc_mutex); 258 ezx_pcap_write(pcap, PCAP_REG_ADR, PCAP_ADR_ASC); 259 } 260 261 static irqreturn_t pcap_adc_irq(int irq, void *_pcap) 262 { 263 struct pcap_chip *pcap = _pcap; 264 struct pcap_adc_request *req; 265 u16 res[2]; 266 u32 tmp; 267 268 mutex_lock(&pcap->adc_mutex); 269 req = pcap->adc_queue[pcap->adc_head]; 270 271 if (WARN(!req, "adc irq without pending request\n")) { 272 mutex_unlock(&pcap->adc_mutex); 273 return IRQ_HANDLED; 274 } 275 276 /* read requested channels results */ 277 ezx_pcap_read(pcap, PCAP_REG_ADC, &tmp); 278 tmp &= ~(PCAP_ADC_ADA1_MASK | PCAP_ADC_ADA2_MASK); 279 tmp |= (req->ch[0] << PCAP_ADC_ADA1_SHIFT); 280 tmp |= (req->ch[1] << PCAP_ADC_ADA2_SHIFT); 281 ezx_pcap_write(pcap, PCAP_REG_ADC, tmp); 282 ezx_pcap_read(pcap, PCAP_REG_ADR, &tmp); 283 res[0] = (tmp & PCAP_ADR_ADD1_MASK) >> PCAP_ADR_ADD1_SHIFT; 284 res[1] = (tmp & PCAP_ADR_ADD2_MASK) >> PCAP_ADR_ADD2_SHIFT; 285 286 pcap->adc_queue[pcap->adc_head] = NULL; 287 pcap->adc_head = (pcap->adc_head + 1) & (PCAP_ADC_MAXQ - 1); 288 mutex_unlock(&pcap->adc_mutex); 289 290 /* pass the results and release memory */ 291 req->callback(req->data, res); 292 kfree(req); 293 294 /* trigger next conversion (if any) on queue */ 295 pcap_adc_trigger(pcap); 296 297 return IRQ_HANDLED; 298 } 299 300 int pcap_adc_async(struct pcap_chip *pcap, u8 bank, u32 flags, u8 ch[], 301 void *callback, void *data) 302 { 303 struct pcap_adc_request *req; 304 305 /* This will be freed after we have a result */ 306 req = kmalloc(sizeof(struct pcap_adc_request), GFP_KERNEL); 307 if (!req) 308 return -ENOMEM; 309 310 req->bank = bank; 311 req->flags = flags; 312 req->ch[0] = ch[0]; 313 req->ch[1] = ch[1]; 314 req->callback = callback; 315 req->data = data; 316 317 mutex_lock(&pcap->adc_mutex); 318 if (pcap->adc_queue[pcap->adc_tail]) { 319 mutex_unlock(&pcap->adc_mutex); 320 kfree(req); 321 return -EBUSY; 322 } 323 pcap->adc_queue[pcap->adc_tail] = req; 324 pcap->adc_tail = (pcap->adc_tail + 1) & (PCAP_ADC_MAXQ - 1); 325 mutex_unlock(&pcap->adc_mutex); 326 327 /* start conversion */ 328 pcap_adc_trigger(pcap); 329 330 return 0; 331 } 332 EXPORT_SYMBOL_GPL(pcap_adc_async); 333 334 static void pcap_adc_sync_cb(void *param, u16 res[]) 335 { 336 struct pcap_adc_sync_request *req = param; 337 338 req->res[0] = res[0]; 339 req->res[1] = res[1]; 340 complete(&req->completion); 341 } 342 343 int pcap_adc_sync(struct pcap_chip *pcap, u8 bank, u32 flags, u8 ch[], 344 u16 res[]) 345 { 346 struct pcap_adc_sync_request sync_data; 347 int ret; 348 349 init_completion(&sync_data.completion); 350 ret = pcap_adc_async(pcap, bank, flags, ch, pcap_adc_sync_cb, 351 &sync_data); 352 if (ret) 353 return ret; 354 wait_for_completion(&sync_data.completion); 355 res[0] = sync_data.res[0]; 356 res[1] = sync_data.res[1]; 357 358 return 0; 359 } 360 EXPORT_SYMBOL_GPL(pcap_adc_sync); 361 362 /* subdevs */ 363 static int pcap_remove_subdev(struct device *dev, void *unused) 364 { 365 platform_device_unregister(to_platform_device(dev)); 366 return 0; 367 } 368 369 static int pcap_add_subdev(struct pcap_chip *pcap, 370 struct pcap_subdev *subdev) 371 { 372 struct platform_device *pdev; 373 int ret; 374 375 pdev = platform_device_alloc(subdev->name, subdev->id); 376 if (!pdev) 377 return -ENOMEM; 378 379 pdev->dev.parent = &pcap->spi->dev; 380 pdev->dev.platform_data = subdev->platform_data; 381 382 ret = platform_device_add(pdev); 383 if (ret) 384 platform_device_put(pdev); 385 386 return ret; 387 } 388 389 static int ezx_pcap_remove(struct spi_device *spi) 390 { 391 struct pcap_chip *pcap = spi_get_drvdata(spi); 392 int i; 393 394 /* remove all registered subdevs */ 395 device_for_each_child(&spi->dev, NULL, pcap_remove_subdev); 396 397 /* cleanup ADC */ 398 mutex_lock(&pcap->adc_mutex); 399 for (i = 0; i < PCAP_ADC_MAXQ; i++) 400 kfree(pcap->adc_queue[i]); 401 mutex_unlock(&pcap->adc_mutex); 402 403 /* cleanup irqchip */ 404 for (i = pcap->irq_base; i < (pcap->irq_base + PCAP_NIRQS); i++) 405 irq_set_chip_and_handler(i, NULL, NULL); 406 407 destroy_workqueue(pcap->workqueue); 408 409 return 0; 410 } 411 412 static int ezx_pcap_probe(struct spi_device *spi) 413 { 414 struct pcap_platform_data *pdata = dev_get_platdata(&spi->dev); 415 struct pcap_chip *pcap; 416 int i, adc_irq; 417 int ret = -ENODEV; 418 419 /* platform data is required */ 420 if (!pdata) 421 goto ret; 422 423 pcap = devm_kzalloc(&spi->dev, sizeof(*pcap), GFP_KERNEL); 424 if (!pcap) { 425 ret = -ENOMEM; 426 goto ret; 427 } 428 429 mutex_init(&pcap->io_mutex); 430 mutex_init(&pcap->adc_mutex); 431 INIT_WORK(&pcap->isr_work, pcap_isr_work); 432 INIT_WORK(&pcap->msr_work, pcap_msr_work); 433 spi_set_drvdata(spi, pcap); 434 435 /* setup spi */ 436 spi->bits_per_word = 32; 437 spi->mode = SPI_MODE_0 | (pdata->config & PCAP_CS_AH ? SPI_CS_HIGH : 0); 438 ret = spi_setup(spi); 439 if (ret) 440 goto ret; 441 442 pcap->spi = spi; 443 444 /* setup irq */ 445 pcap->irq_base = pdata->irq_base; 446 pcap->workqueue = create_singlethread_workqueue("pcapd"); 447 if (!pcap->workqueue) { 448 ret = -ENOMEM; 449 dev_err(&spi->dev, "can't create pcap thread\n"); 450 goto ret; 451 } 452 453 /* redirect interrupts to AP, except adcdone2 */ 454 if (!(pdata->config & PCAP_SECOND_PORT)) 455 ezx_pcap_write(pcap, PCAP_REG_INT_SEL, 456 (1 << PCAP_IRQ_ADCDONE2)); 457 458 /* setup irq chip */ 459 for (i = pcap->irq_base; i < (pcap->irq_base + PCAP_NIRQS); i++) { 460 irq_set_chip_and_handler(i, &pcap_irq_chip, handle_simple_irq); 461 irq_set_chip_data(i, pcap); 462 irq_clear_status_flags(i, IRQ_NOREQUEST | IRQ_NOPROBE); 463 } 464 465 /* mask/ack all PCAP interrupts */ 466 ezx_pcap_write(pcap, PCAP_REG_MSR, PCAP_MASK_ALL_INTERRUPT); 467 ezx_pcap_write(pcap, PCAP_REG_ISR, PCAP_CLEAR_INTERRUPT_REGISTER); 468 pcap->msr = PCAP_MASK_ALL_INTERRUPT; 469 470 irq_set_irq_type(spi->irq, IRQ_TYPE_EDGE_RISING); 471 irq_set_chained_handler_and_data(spi->irq, pcap_irq_handler, pcap); 472 irq_set_irq_wake(spi->irq, 1); 473 474 /* ADC */ 475 adc_irq = pcap_to_irq(pcap, (pdata->config & PCAP_SECOND_PORT) ? 476 PCAP_IRQ_ADCDONE2 : PCAP_IRQ_ADCDONE); 477 478 ret = devm_request_irq(&spi->dev, adc_irq, pcap_adc_irq, 0, "ADC", 479 pcap); 480 if (ret) 481 goto free_irqchip; 482 483 /* setup subdevs */ 484 for (i = 0; i < pdata->num_subdevs; i++) { 485 ret = pcap_add_subdev(pcap, &pdata->subdevs[i]); 486 if (ret) 487 goto remove_subdevs; 488 } 489 490 /* board specific quirks */ 491 if (pdata->init) 492 pdata->init(pcap); 493 494 return 0; 495 496 remove_subdevs: 497 device_for_each_child(&spi->dev, NULL, pcap_remove_subdev); 498 free_irqchip: 499 for (i = pcap->irq_base; i < (pcap->irq_base + PCAP_NIRQS); i++) 500 irq_set_chip_and_handler(i, NULL, NULL); 501 /* destroy_workqueue: */ 502 destroy_workqueue(pcap->workqueue); 503 ret: 504 return ret; 505 } 506 507 static struct spi_driver ezxpcap_driver = { 508 .probe = ezx_pcap_probe, 509 .remove = ezx_pcap_remove, 510 .driver = { 511 .name = "ezx-pcap", 512 }, 513 }; 514 515 static int __init ezx_pcap_init(void) 516 { 517 return spi_register_driver(&ezxpcap_driver); 518 } 519 520 static void __exit ezx_pcap_exit(void) 521 { 522 spi_unregister_driver(&ezxpcap_driver); 523 } 524 525 subsys_initcall(ezx_pcap_init); 526 module_exit(ezx_pcap_exit); 527 528 MODULE_LICENSE("GPL"); 529 MODULE_AUTHOR("Daniel Ribeiro / Harald Welte"); 530 MODULE_DESCRIPTION("Motorola PCAP2 ASIC Driver"); 531 MODULE_ALIAS("spi:ezx-pcap"); 532