1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Driver for Audio DMA Controller (ADMAC) on t8103 (M1) and other Apple chips 4 * 5 * Copyright (C) The Asahi Linux Contributors 6 */ 7 8 #include <linux/bits.h> 9 #include <linux/bitfield.h> 10 #include <linux/device.h> 11 #include <linux/init.h> 12 #include <linux/module.h> 13 #include <linux/of_device.h> 14 #include <linux/of_dma.h> 15 #include <linux/reset.h> 16 #include <linux/spinlock.h> 17 #include <linux/interrupt.h> 18 19 #include "dmaengine.h" 20 21 #define NCHANNELS_MAX 64 22 #define IRQ_NOUTPUTS 4 23 24 #define RING_WRITE_SLOT GENMASK(1, 0) 25 #define RING_READ_SLOT GENMASK(5, 4) 26 #define RING_FULL BIT(9) 27 #define RING_EMPTY BIT(8) 28 #define RING_ERR BIT(10) 29 30 #define STATUS_DESC_DONE BIT(0) 31 #define STATUS_ERR BIT(6) 32 33 #define FLAG_DESC_NOTIFY BIT(16) 34 35 #define REG_TX_START 0x0000 36 #define REG_TX_STOP 0x0004 37 #define REG_RX_START 0x0008 38 #define REG_RX_STOP 0x000c 39 40 #define REG_CHAN_CTL(ch) (0x8000 + (ch) * 0x200) 41 #define REG_CHAN_CTL_RST_RINGS BIT(0) 42 43 #define REG_DESC_RING(ch) (0x8070 + (ch) * 0x200) 44 #define REG_REPORT_RING(ch) (0x8074 + (ch) * 0x200) 45 46 #define REG_RESIDUE(ch) (0x8064 + (ch) * 0x200) 47 48 #define REG_BUS_WIDTH(ch) (0x8040 + (ch) * 0x200) 49 50 #define BUS_WIDTH_8BIT 0x00 51 #define BUS_WIDTH_16BIT 0x01 52 #define BUS_WIDTH_32BIT 0x02 53 #define BUS_WIDTH_FRAME_2_WORDS 0x10 54 #define BUS_WIDTH_FRAME_4_WORDS 0x20 55 56 #define CHAN_BUFSIZE 0x8000 57 58 #define REG_CHAN_FIFOCTL(ch) (0x8054 + (ch) * 0x200) 59 #define CHAN_FIFOCTL_LIMIT GENMASK(31, 16) 60 #define CHAN_FIFOCTL_THRESHOLD GENMASK(15, 0) 61 62 #define REG_DESC_WRITE(ch) (0x10000 + ((ch) / 2) * 0x4 + ((ch) & 1) * 0x4000) 63 #define REG_REPORT_READ(ch) (0x10100 + ((ch) / 2) * 0x4 + ((ch) & 1) * 0x4000) 64 65 #define REG_TX_INTSTATE(idx) (0x0030 + (idx) * 4) 66 #define REG_RX_INTSTATE(idx) (0x0040 + (idx) * 4) 67 #define REG_CHAN_INTSTATUS(ch, idx) (0x8010 + (ch) * 0x200 + (idx) * 4) 68 #define REG_CHAN_INTMASK(ch, idx) (0x8020 + (ch) * 0x200 + (idx) * 4) 69 70 struct admac_data; 71 struct admac_tx; 72 73 struct admac_chan { 74 unsigned int no; 75 struct admac_data *host; 76 struct dma_chan chan; 77 struct tasklet_struct tasklet; 78 79 spinlock_t lock; 80 struct admac_tx *current_tx; 81 int nperiod_acks; 82 83 /* 84 * We maintain a 'submitted' and 'issued' list mainly for interface 85 * correctness. Typical use of the driver (per channel) will be 86 * prepping, submitting and issuing a single cyclic transaction which 87 * will stay current until terminate_all is called. 88 */ 89 struct list_head submitted; 90 struct list_head issued; 91 92 struct list_head to_free; 93 }; 94 95 struct admac_data { 96 struct dma_device dma; 97 struct device *dev; 98 __iomem void *base; 99 struct reset_control *rstc; 100 101 int irq; 102 int irq_index; 103 int nchannels; 104 struct admac_chan channels[]; 105 }; 106 107 struct admac_tx { 108 struct dma_async_tx_descriptor tx; 109 bool cyclic; 110 dma_addr_t buf_addr; 111 dma_addr_t buf_end; 112 size_t buf_len; 113 size_t period_len; 114 115 size_t submitted_pos; 116 size_t reclaimed_pos; 117 118 struct list_head node; 119 }; 120 121 static void admac_modify(struct admac_data *ad, int reg, u32 mask, u32 val) 122 { 123 void __iomem *addr = ad->base + reg; 124 u32 curr = readl_relaxed(addr); 125 126 writel_relaxed((curr & ~mask) | (val & mask), addr); 127 } 128 129 static struct admac_chan *to_admac_chan(struct dma_chan *chan) 130 { 131 return container_of(chan, struct admac_chan, chan); 132 } 133 134 static struct admac_tx *to_admac_tx(struct dma_async_tx_descriptor *tx) 135 { 136 return container_of(tx, struct admac_tx, tx); 137 } 138 139 static enum dma_transfer_direction admac_chan_direction(int channo) 140 { 141 /* Channel directions are hardwired */ 142 return (channo & 1) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV; 143 } 144 145 static dma_cookie_t admac_tx_submit(struct dma_async_tx_descriptor *tx) 146 { 147 struct admac_tx *adtx = to_admac_tx(tx); 148 struct admac_chan *adchan = to_admac_chan(tx->chan); 149 unsigned long flags; 150 dma_cookie_t cookie; 151 152 spin_lock_irqsave(&adchan->lock, flags); 153 cookie = dma_cookie_assign(tx); 154 list_add_tail(&adtx->node, &adchan->submitted); 155 spin_unlock_irqrestore(&adchan->lock, flags); 156 157 return cookie; 158 } 159 160 static int admac_desc_free(struct dma_async_tx_descriptor *tx) 161 { 162 kfree(to_admac_tx(tx)); 163 164 return 0; 165 } 166 167 static struct dma_async_tx_descriptor *admac_prep_dma_cyclic( 168 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, 169 size_t period_len, enum dma_transfer_direction direction, 170 unsigned long flags) 171 { 172 struct admac_chan *adchan = container_of(chan, struct admac_chan, chan); 173 struct admac_tx *adtx; 174 175 if (direction != admac_chan_direction(adchan->no)) 176 return NULL; 177 178 adtx = kzalloc(sizeof(*adtx), GFP_NOWAIT); 179 if (!adtx) 180 return NULL; 181 182 adtx->cyclic = true; 183 184 adtx->buf_addr = buf_addr; 185 adtx->buf_len = buf_len; 186 adtx->buf_end = buf_addr + buf_len; 187 adtx->period_len = period_len; 188 189 adtx->submitted_pos = 0; 190 adtx->reclaimed_pos = 0; 191 192 dma_async_tx_descriptor_init(&adtx->tx, chan); 193 adtx->tx.tx_submit = admac_tx_submit; 194 adtx->tx.desc_free = admac_desc_free; 195 196 return &adtx->tx; 197 } 198 199 /* 200 * Write one hardware descriptor for a dmaengine cyclic transaction. 201 */ 202 static void admac_cyclic_write_one_desc(struct admac_data *ad, int channo, 203 struct admac_tx *tx) 204 { 205 dma_addr_t addr; 206 207 addr = tx->buf_addr + (tx->submitted_pos % tx->buf_len); 208 209 /* If happens means we have buggy code */ 210 WARN_ON_ONCE(addr + tx->period_len > tx->buf_end); 211 212 dev_dbg(ad->dev, "ch%d descriptor: addr=0x%pad len=0x%zx flags=0x%lx\n", 213 channo, &addr, tx->period_len, FLAG_DESC_NOTIFY); 214 215 writel_relaxed(lower_32_bits(addr), ad->base + REG_DESC_WRITE(channo)); 216 writel_relaxed(upper_32_bits(addr), ad->base + REG_DESC_WRITE(channo)); 217 writel_relaxed(tx->period_len, ad->base + REG_DESC_WRITE(channo)); 218 writel_relaxed(FLAG_DESC_NOTIFY, ad->base + REG_DESC_WRITE(channo)); 219 220 tx->submitted_pos += tx->period_len; 221 tx->submitted_pos %= 2 * tx->buf_len; 222 } 223 224 /* 225 * Write all the hardware descriptors for a dmaengine cyclic 226 * transaction there is space for. 227 */ 228 static void admac_cyclic_write_desc(struct admac_data *ad, int channo, 229 struct admac_tx *tx) 230 { 231 int i; 232 233 for (i = 0; i < 4; i++) { 234 if (readl_relaxed(ad->base + REG_DESC_RING(channo)) & RING_FULL) 235 break; 236 admac_cyclic_write_one_desc(ad, channo, tx); 237 } 238 } 239 240 static int admac_ring_noccupied_slots(int ringval) 241 { 242 int wrslot = FIELD_GET(RING_WRITE_SLOT, ringval); 243 int rdslot = FIELD_GET(RING_READ_SLOT, ringval); 244 245 if (wrslot != rdslot) { 246 return (wrslot + 4 - rdslot) % 4; 247 } else { 248 WARN_ON((ringval & (RING_FULL | RING_EMPTY)) == 0); 249 250 if (ringval & RING_FULL) 251 return 4; 252 else 253 return 0; 254 } 255 } 256 257 /* 258 * Read from hardware the residue of a cyclic dmaengine transaction. 259 */ 260 static u32 admac_cyclic_read_residue(struct admac_data *ad, int channo, 261 struct admac_tx *adtx) 262 { 263 u32 ring1, ring2; 264 u32 residue1, residue2; 265 int nreports; 266 size_t pos; 267 268 ring1 = readl_relaxed(ad->base + REG_REPORT_RING(channo)); 269 residue1 = readl_relaxed(ad->base + REG_RESIDUE(channo)); 270 ring2 = readl_relaxed(ad->base + REG_REPORT_RING(channo)); 271 residue2 = readl_relaxed(ad->base + REG_RESIDUE(channo)); 272 273 if (residue2 > residue1) { 274 /* 275 * Controller must have loaded next descriptor between 276 * the two residue reads 277 */ 278 nreports = admac_ring_noccupied_slots(ring1) + 1; 279 } else { 280 /* No descriptor load between the two reads, ring2 is safe to use */ 281 nreports = admac_ring_noccupied_slots(ring2); 282 } 283 284 pos = adtx->reclaimed_pos + adtx->period_len * (nreports + 1) - residue2; 285 286 return adtx->buf_len - pos % adtx->buf_len; 287 } 288 289 static enum dma_status admac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, 290 struct dma_tx_state *txstate) 291 { 292 struct admac_chan *adchan = to_admac_chan(chan); 293 struct admac_data *ad = adchan->host; 294 struct admac_tx *adtx; 295 296 enum dma_status ret; 297 size_t residue; 298 unsigned long flags; 299 300 ret = dma_cookie_status(chan, cookie, txstate); 301 if (ret == DMA_COMPLETE || !txstate) 302 return ret; 303 304 spin_lock_irqsave(&adchan->lock, flags); 305 adtx = adchan->current_tx; 306 307 if (adtx && adtx->tx.cookie == cookie) { 308 ret = DMA_IN_PROGRESS; 309 residue = admac_cyclic_read_residue(ad, adchan->no, adtx); 310 } else { 311 ret = DMA_IN_PROGRESS; 312 residue = 0; 313 list_for_each_entry(adtx, &adchan->issued, node) { 314 if (adtx->tx.cookie == cookie) { 315 residue = adtx->buf_len; 316 break; 317 } 318 } 319 } 320 spin_unlock_irqrestore(&adchan->lock, flags); 321 322 dma_set_residue(txstate, residue); 323 return ret; 324 } 325 326 static void admac_start_chan(struct admac_chan *adchan) 327 { 328 struct admac_data *ad = adchan->host; 329 u32 startbit = 1 << (adchan->no / 2); 330 331 writel_relaxed(STATUS_DESC_DONE | STATUS_ERR, 332 ad->base + REG_CHAN_INTSTATUS(adchan->no, ad->irq_index)); 333 writel_relaxed(STATUS_DESC_DONE | STATUS_ERR, 334 ad->base + REG_CHAN_INTMASK(adchan->no, ad->irq_index)); 335 336 switch (admac_chan_direction(adchan->no)) { 337 case DMA_MEM_TO_DEV: 338 writel_relaxed(startbit, ad->base + REG_TX_START); 339 break; 340 case DMA_DEV_TO_MEM: 341 writel_relaxed(startbit, ad->base + REG_RX_START); 342 break; 343 default: 344 break; 345 } 346 dev_dbg(adchan->host->dev, "ch%d start\n", adchan->no); 347 } 348 349 static void admac_stop_chan(struct admac_chan *adchan) 350 { 351 struct admac_data *ad = adchan->host; 352 u32 stopbit = 1 << (adchan->no / 2); 353 354 switch (admac_chan_direction(adchan->no)) { 355 case DMA_MEM_TO_DEV: 356 writel_relaxed(stopbit, ad->base + REG_TX_STOP); 357 break; 358 case DMA_DEV_TO_MEM: 359 writel_relaxed(stopbit, ad->base + REG_RX_STOP); 360 break; 361 default: 362 break; 363 } 364 dev_dbg(adchan->host->dev, "ch%d stop\n", adchan->no); 365 } 366 367 static void admac_reset_rings(struct admac_chan *adchan) 368 { 369 struct admac_data *ad = adchan->host; 370 371 writel_relaxed(REG_CHAN_CTL_RST_RINGS, 372 ad->base + REG_CHAN_CTL(adchan->no)); 373 writel_relaxed(0, ad->base + REG_CHAN_CTL(adchan->no)); 374 } 375 376 static void admac_start_current_tx(struct admac_chan *adchan) 377 { 378 struct admac_data *ad = adchan->host; 379 int ch = adchan->no; 380 381 admac_reset_rings(adchan); 382 writel_relaxed(0, ad->base + REG_CHAN_CTL(ch)); 383 384 admac_cyclic_write_one_desc(ad, ch, adchan->current_tx); 385 admac_start_chan(adchan); 386 admac_cyclic_write_desc(ad, ch, adchan->current_tx); 387 } 388 389 static void admac_issue_pending(struct dma_chan *chan) 390 { 391 struct admac_chan *adchan = to_admac_chan(chan); 392 struct admac_tx *tx; 393 unsigned long flags; 394 395 spin_lock_irqsave(&adchan->lock, flags); 396 list_splice_tail_init(&adchan->submitted, &adchan->issued); 397 if (!list_empty(&adchan->issued) && !adchan->current_tx) { 398 tx = list_first_entry(&adchan->issued, struct admac_tx, node); 399 list_del(&tx->node); 400 401 adchan->current_tx = tx; 402 adchan->nperiod_acks = 0; 403 admac_start_current_tx(adchan); 404 } 405 spin_unlock_irqrestore(&adchan->lock, flags); 406 } 407 408 static int admac_pause(struct dma_chan *chan) 409 { 410 struct admac_chan *adchan = to_admac_chan(chan); 411 412 admac_stop_chan(adchan); 413 414 return 0; 415 } 416 417 static int admac_resume(struct dma_chan *chan) 418 { 419 struct admac_chan *adchan = to_admac_chan(chan); 420 421 admac_start_chan(adchan); 422 423 return 0; 424 } 425 426 static int admac_terminate_all(struct dma_chan *chan) 427 { 428 struct admac_chan *adchan = to_admac_chan(chan); 429 unsigned long flags; 430 431 spin_lock_irqsave(&adchan->lock, flags); 432 admac_stop_chan(adchan); 433 admac_reset_rings(adchan); 434 435 adchan->current_tx = NULL; 436 /* 437 * Descriptors can only be freed after the tasklet 438 * has been killed (in admac_synchronize). 439 */ 440 list_splice_tail_init(&adchan->submitted, &adchan->to_free); 441 list_splice_tail_init(&adchan->issued, &adchan->to_free); 442 spin_unlock_irqrestore(&adchan->lock, flags); 443 444 return 0; 445 } 446 447 static void admac_synchronize(struct dma_chan *chan) 448 { 449 struct admac_chan *adchan = to_admac_chan(chan); 450 struct admac_tx *adtx, *_adtx; 451 unsigned long flags; 452 LIST_HEAD(head); 453 454 spin_lock_irqsave(&adchan->lock, flags); 455 list_splice_tail_init(&adchan->to_free, &head); 456 spin_unlock_irqrestore(&adchan->lock, flags); 457 458 tasklet_kill(&adchan->tasklet); 459 460 list_for_each_entry_safe(adtx, _adtx, &head, node) { 461 list_del(&adtx->node); 462 admac_desc_free(&adtx->tx); 463 } 464 } 465 466 static int admac_alloc_chan_resources(struct dma_chan *chan) 467 { 468 struct admac_chan *adchan = to_admac_chan(chan); 469 470 dma_cookie_init(&adchan->chan); 471 return 0; 472 } 473 474 static void admac_free_chan_resources(struct dma_chan *chan) 475 { 476 admac_terminate_all(chan); 477 admac_synchronize(chan); 478 } 479 480 static struct dma_chan *admac_dma_of_xlate(struct of_phandle_args *dma_spec, 481 struct of_dma *ofdma) 482 { 483 struct admac_data *ad = (struct admac_data *) ofdma->of_dma_data; 484 unsigned int index; 485 486 if (dma_spec->args_count != 1) 487 return NULL; 488 489 index = dma_spec->args[0]; 490 491 if (index >= ad->nchannels) { 492 dev_err(ad->dev, "channel index %u out of bounds\n", index); 493 return NULL; 494 } 495 496 return &ad->channels[index].chan; 497 } 498 499 static int admac_drain_reports(struct admac_data *ad, int channo) 500 { 501 int count; 502 503 for (count = 0; count < 4; count++) { 504 u32 countval_hi, countval_lo, unk1, flags; 505 506 if (readl_relaxed(ad->base + REG_REPORT_RING(channo)) & RING_EMPTY) 507 break; 508 509 countval_lo = readl_relaxed(ad->base + REG_REPORT_READ(channo)); 510 countval_hi = readl_relaxed(ad->base + REG_REPORT_READ(channo)); 511 unk1 = readl_relaxed(ad->base + REG_REPORT_READ(channo)); 512 flags = readl_relaxed(ad->base + REG_REPORT_READ(channo)); 513 514 dev_dbg(ad->dev, "ch%d report: countval=0x%llx unk1=0x%x flags=0x%x\n", 515 channo, ((u64) countval_hi) << 32 | countval_lo, unk1, flags); 516 } 517 518 return count; 519 } 520 521 static void admac_handle_status_err(struct admac_data *ad, int channo) 522 { 523 bool handled = false; 524 525 if (readl_relaxed(ad->base + REG_DESC_RING(channo)) & RING_ERR) { 526 writel_relaxed(RING_ERR, ad->base + REG_DESC_RING(channo)); 527 dev_err_ratelimited(ad->dev, "ch%d descriptor ring error\n", channo); 528 handled = true; 529 } 530 531 if (readl_relaxed(ad->base + REG_REPORT_RING(channo)) & RING_ERR) { 532 writel_relaxed(RING_ERR, ad->base + REG_REPORT_RING(channo)); 533 dev_err_ratelimited(ad->dev, "ch%d report ring error\n", channo); 534 handled = true; 535 } 536 537 if (unlikely(!handled)) { 538 dev_err(ad->dev, "ch%d unknown error, masking errors as cause of IRQs\n", channo); 539 admac_modify(ad, REG_CHAN_INTMASK(channo, ad->irq_index), 540 STATUS_ERR, 0); 541 } 542 } 543 544 static void admac_handle_status_desc_done(struct admac_data *ad, int channo) 545 { 546 struct admac_chan *adchan = &ad->channels[channo]; 547 unsigned long flags; 548 int nreports; 549 550 writel_relaxed(STATUS_DESC_DONE, 551 ad->base + REG_CHAN_INTSTATUS(channo, ad->irq_index)); 552 553 spin_lock_irqsave(&adchan->lock, flags); 554 nreports = admac_drain_reports(ad, channo); 555 556 if (adchan->current_tx) { 557 struct admac_tx *tx = adchan->current_tx; 558 559 adchan->nperiod_acks += nreports; 560 tx->reclaimed_pos += nreports * tx->period_len; 561 tx->reclaimed_pos %= 2 * tx->buf_len; 562 563 admac_cyclic_write_desc(ad, channo, tx); 564 tasklet_schedule(&adchan->tasklet); 565 } 566 spin_unlock_irqrestore(&adchan->lock, flags); 567 } 568 569 static void admac_handle_chan_int(struct admac_data *ad, int no) 570 { 571 u32 cause = readl_relaxed(ad->base + REG_CHAN_INTSTATUS(no, ad->irq_index)); 572 573 if (cause & STATUS_ERR) 574 admac_handle_status_err(ad, no); 575 576 if (cause & STATUS_DESC_DONE) 577 admac_handle_status_desc_done(ad, no); 578 } 579 580 static irqreturn_t admac_interrupt(int irq, void *devid) 581 { 582 struct admac_data *ad = devid; 583 u32 rx_intstate, tx_intstate; 584 int i; 585 586 rx_intstate = readl_relaxed(ad->base + REG_RX_INTSTATE(ad->irq_index)); 587 tx_intstate = readl_relaxed(ad->base + REG_TX_INTSTATE(ad->irq_index)); 588 589 if (!tx_intstate && !rx_intstate) 590 return IRQ_NONE; 591 592 for (i = 0; i < ad->nchannels; i += 2) { 593 if (tx_intstate & 1) 594 admac_handle_chan_int(ad, i); 595 tx_intstate >>= 1; 596 } 597 598 for (i = 1; i < ad->nchannels; i += 2) { 599 if (rx_intstate & 1) 600 admac_handle_chan_int(ad, i); 601 rx_intstate >>= 1; 602 } 603 604 return IRQ_HANDLED; 605 } 606 607 static void admac_chan_tasklet(struct tasklet_struct *t) 608 { 609 struct admac_chan *adchan = from_tasklet(adchan, t, tasklet); 610 struct admac_tx *adtx; 611 struct dmaengine_desc_callback cb; 612 struct dmaengine_result tx_result; 613 int nacks; 614 615 spin_lock_irq(&adchan->lock); 616 adtx = adchan->current_tx; 617 nacks = adchan->nperiod_acks; 618 adchan->nperiod_acks = 0; 619 spin_unlock_irq(&adchan->lock); 620 621 if (!adtx || !nacks) 622 return; 623 624 tx_result.result = DMA_TRANS_NOERROR; 625 tx_result.residue = 0; 626 627 dmaengine_desc_get_callback(&adtx->tx, &cb); 628 while (nacks--) 629 dmaengine_desc_callback_invoke(&cb, &tx_result); 630 } 631 632 static int admac_device_config(struct dma_chan *chan, 633 struct dma_slave_config *config) 634 { 635 struct admac_chan *adchan = to_admac_chan(chan); 636 struct admac_data *ad = adchan->host; 637 bool is_tx = admac_chan_direction(adchan->no) == DMA_MEM_TO_DEV; 638 int wordsize = 0; 639 u32 bus_width = 0; 640 641 switch (is_tx ? config->dst_addr_width : config->src_addr_width) { 642 case DMA_SLAVE_BUSWIDTH_1_BYTE: 643 wordsize = 1; 644 bus_width |= BUS_WIDTH_8BIT; 645 break; 646 case DMA_SLAVE_BUSWIDTH_2_BYTES: 647 wordsize = 2; 648 bus_width |= BUS_WIDTH_16BIT; 649 break; 650 case DMA_SLAVE_BUSWIDTH_4_BYTES: 651 wordsize = 4; 652 bus_width |= BUS_WIDTH_32BIT; 653 break; 654 default: 655 return -EINVAL; 656 } 657 658 /* 659 * We take port_window_size to be the number of words in a frame. 660 * 661 * The controller has some means of out-of-band signalling, to the peripheral, 662 * of words position in a frame. That's where the importance of this control 663 * comes from. 664 */ 665 switch (is_tx ? config->dst_port_window_size : config->src_port_window_size) { 666 case 0 ... 1: 667 break; 668 case 2: 669 bus_width |= BUS_WIDTH_FRAME_2_WORDS; 670 break; 671 case 4: 672 bus_width |= BUS_WIDTH_FRAME_4_WORDS; 673 break; 674 default: 675 return -EINVAL; 676 } 677 678 writel_relaxed(bus_width, ad->base + REG_BUS_WIDTH(adchan->no)); 679 680 /* 681 * By FIFOCTL_LIMIT we seem to set the maximal number of bytes allowed to be 682 * held in controller's per-channel FIFO. Transfers seem to be triggered 683 * around the time FIFO occupancy touches FIFOCTL_THRESHOLD. 684 * 685 * The numbers we set are more or less arbitrary. 686 */ 687 writel_relaxed(FIELD_PREP(CHAN_FIFOCTL_LIMIT, 0x30 * wordsize) 688 | FIELD_PREP(CHAN_FIFOCTL_THRESHOLD, 0x18 * wordsize), 689 ad->base + REG_CHAN_FIFOCTL(adchan->no)); 690 691 return 0; 692 } 693 694 static int admac_probe(struct platform_device *pdev) 695 { 696 struct device_node *np = pdev->dev.of_node; 697 struct admac_data *ad; 698 struct dma_device *dma; 699 int nchannels; 700 int err, irq, i; 701 702 err = of_property_read_u32(np, "dma-channels", &nchannels); 703 if (err || nchannels > NCHANNELS_MAX) { 704 dev_err(&pdev->dev, "missing or invalid dma-channels property\n"); 705 return -EINVAL; 706 } 707 708 ad = devm_kzalloc(&pdev->dev, struct_size(ad, channels, nchannels), GFP_KERNEL); 709 if (!ad) 710 return -ENOMEM; 711 712 platform_set_drvdata(pdev, ad); 713 ad->dev = &pdev->dev; 714 ad->nchannels = nchannels; 715 716 /* 717 * The controller has 4 IRQ outputs. Try them all until 718 * we find one we can use. 719 */ 720 for (i = 0; i < IRQ_NOUTPUTS; i++) { 721 irq = platform_get_irq_optional(pdev, i); 722 if (irq >= 0) { 723 ad->irq_index = i; 724 break; 725 } 726 } 727 728 if (irq < 0) 729 return dev_err_probe(&pdev->dev, irq, "no usable interrupt\n"); 730 ad->irq = irq; 731 732 ad->base = devm_platform_ioremap_resource(pdev, 0); 733 if (IS_ERR(ad->base)) 734 return dev_err_probe(&pdev->dev, PTR_ERR(ad->base), 735 "unable to obtain MMIO resource\n"); 736 737 ad->rstc = devm_reset_control_get_optional_shared(&pdev->dev, NULL); 738 if (IS_ERR(ad->rstc)) 739 return PTR_ERR(ad->rstc); 740 741 dma = &ad->dma; 742 743 dma_cap_set(DMA_PRIVATE, dma->cap_mask); 744 dma_cap_set(DMA_CYCLIC, dma->cap_mask); 745 746 dma->dev = &pdev->dev; 747 dma->device_alloc_chan_resources = admac_alloc_chan_resources; 748 dma->device_free_chan_resources = admac_free_chan_resources; 749 dma->device_tx_status = admac_tx_status; 750 dma->device_issue_pending = admac_issue_pending; 751 dma->device_terminate_all = admac_terminate_all; 752 dma->device_synchronize = admac_synchronize; 753 dma->device_prep_dma_cyclic = admac_prep_dma_cyclic; 754 dma->device_config = admac_device_config; 755 dma->device_pause = admac_pause; 756 dma->device_resume = admac_resume; 757 758 dma->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM); 759 dma->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; 760 dma->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | 761 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | 762 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); 763 764 INIT_LIST_HEAD(&dma->channels); 765 for (i = 0; i < nchannels; i++) { 766 struct admac_chan *adchan = &ad->channels[i]; 767 768 adchan->host = ad; 769 adchan->no = i; 770 adchan->chan.device = &ad->dma; 771 spin_lock_init(&adchan->lock); 772 INIT_LIST_HEAD(&adchan->submitted); 773 INIT_LIST_HEAD(&adchan->issued); 774 INIT_LIST_HEAD(&adchan->to_free); 775 list_add_tail(&adchan->chan.device_node, &dma->channels); 776 tasklet_setup(&adchan->tasklet, admac_chan_tasklet); 777 } 778 779 err = reset_control_reset(ad->rstc); 780 if (err) 781 return dev_err_probe(&pdev->dev, err, 782 "unable to trigger reset\n"); 783 784 err = request_irq(irq, admac_interrupt, 0, dev_name(&pdev->dev), ad); 785 if (err) { 786 dev_err_probe(&pdev->dev, err, 787 "unable to register interrupt\n"); 788 goto free_reset; 789 } 790 791 err = dma_async_device_register(&ad->dma); 792 if (err) { 793 dev_err_probe(&pdev->dev, err, "failed to register DMA device\n"); 794 goto free_irq; 795 } 796 797 err = of_dma_controller_register(pdev->dev.of_node, admac_dma_of_xlate, ad); 798 if (err) { 799 dma_async_device_unregister(&ad->dma); 800 dev_err_probe(&pdev->dev, err, "failed to register with OF\n"); 801 goto free_irq; 802 } 803 804 return 0; 805 806 free_irq: 807 free_irq(ad->irq, ad); 808 free_reset: 809 reset_control_rearm(ad->rstc); 810 return err; 811 } 812 813 static int admac_remove(struct platform_device *pdev) 814 { 815 struct admac_data *ad = platform_get_drvdata(pdev); 816 817 of_dma_controller_free(pdev->dev.of_node); 818 dma_async_device_unregister(&ad->dma); 819 free_irq(ad->irq, ad); 820 reset_control_rearm(ad->rstc); 821 822 return 0; 823 } 824 825 static const struct of_device_id admac_of_match[] = { 826 { .compatible = "apple,admac", }, 827 { } 828 }; 829 MODULE_DEVICE_TABLE(of, admac_of_match); 830 831 static struct platform_driver apple_admac_driver = { 832 .driver = { 833 .name = "apple-admac", 834 .of_match_table = admac_of_match, 835 }, 836 .probe = admac_probe, 837 .remove = admac_remove, 838 }; 839 module_platform_driver(apple_admac_driver); 840 841 MODULE_AUTHOR("Martin Povišer <povik+lin@cutebit.org>"); 842 MODULE_DESCRIPTION("Driver for Audio DMA Controller (ADMAC) on Apple SoCs"); 843 MODULE_LICENSE("GPL"); 844