1 /* 2 * timb_dma.c timberdale FPGA DMA driver 3 * Copyright (c) 2010 Intel Corporation 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License version 2 as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 */ 14 15 /* Supports: 16 * Timberdale FPGA DMA engine 17 */ 18 19 #include <linux/dmaengine.h> 20 #include <linux/dma-mapping.h> 21 #include <linux/init.h> 22 #include <linux/interrupt.h> 23 #include <linux/io.h> 24 #include <linux/module.h> 25 #include <linux/platform_device.h> 26 #include <linux/slab.h> 27 28 #include <linux/timb_dma.h> 29 30 #include "dmaengine.h" 31 32 #define DRIVER_NAME "timb-dma" 33 34 /* Global DMA registers */ 35 #define TIMBDMA_ACR 0x34 36 #define TIMBDMA_32BIT_ADDR 0x01 37 38 #define TIMBDMA_ISR 0x080000 39 #define TIMBDMA_IPR 0x080004 40 #define TIMBDMA_IER 0x080008 41 42 /* Channel specific registers */ 43 /* RX instances base addresses are 0x00, 0x40, 0x80 ... 44 * TX instances base addresses are 0x18, 0x58, 0x98 ... 45 */ 46 #define TIMBDMA_INSTANCE_OFFSET 0x40 47 #define TIMBDMA_INSTANCE_TX_OFFSET 0x18 48 49 /* RX registers, relative the instance base */ 50 #define TIMBDMA_OFFS_RX_DHAR 0x00 51 #define TIMBDMA_OFFS_RX_DLAR 0x04 52 #define TIMBDMA_OFFS_RX_LR 0x0C 53 #define TIMBDMA_OFFS_RX_BLR 0x10 54 #define TIMBDMA_OFFS_RX_ER 0x14 55 #define TIMBDMA_RX_EN 0x01 56 /* bytes per Row, video specific register 57 * which is placed after the TX registers... 58 */ 59 #define TIMBDMA_OFFS_RX_BPRR 0x30 60 61 /* TX registers, relative the instance base */ 62 #define TIMBDMA_OFFS_TX_DHAR 0x00 63 #define TIMBDMA_OFFS_TX_DLAR 0x04 64 #define TIMBDMA_OFFS_TX_BLR 0x0C 65 #define TIMBDMA_OFFS_TX_LR 0x14 66 67 68 #define TIMB_DMA_DESC_SIZE 8 69 70 struct timb_dma_desc { 71 struct list_head desc_node; 72 struct dma_async_tx_descriptor txd; 73 u8 *desc_list; 74 unsigned int desc_list_len; 75 bool interrupt; 76 }; 77 78 struct timb_dma_chan { 79 struct dma_chan chan; 80 void __iomem *membase; 81 spinlock_t lock; /* Used to protect data structures, 82 especially the lists and descriptors, 83 from races between the tasklet and calls 84 from above */ 85 bool ongoing; 86 struct list_head active_list; 87 struct list_head queue; 88 struct list_head free_list; 89 unsigned int bytes_per_line; 90 enum dma_transfer_direction direction; 91 unsigned int descs; /* Descriptors to allocate */ 92 unsigned int desc_elems; /* number of elems per descriptor */ 93 }; 94 95 struct timb_dma { 96 struct dma_device dma; 97 void __iomem *membase; 98 struct tasklet_struct tasklet; 99 struct timb_dma_chan channels[0]; 100 }; 101 102 static struct device *chan2dev(struct dma_chan *chan) 103 { 104 return &chan->dev->device; 105 } 106 static struct device *chan2dmadev(struct dma_chan *chan) 107 { 108 return chan2dev(chan)->parent->parent; 109 } 110 111 static struct timb_dma *tdchantotd(struct timb_dma_chan *td_chan) 112 { 113 int id = td_chan->chan.chan_id; 114 return (struct timb_dma *)((u8 *)td_chan - 115 id * sizeof(struct timb_dma_chan) - sizeof(struct timb_dma)); 116 } 117 118 /* Must be called with the spinlock held */ 119 static void __td_enable_chan_irq(struct timb_dma_chan *td_chan) 120 { 121 int id = td_chan->chan.chan_id; 122 struct timb_dma *td = tdchantotd(td_chan); 123 u32 ier; 124 125 /* enable interrupt for this channel */ 126 ier = ioread32(td->membase + TIMBDMA_IER); 127 ier |= 1 << id; 128 dev_dbg(chan2dev(&td_chan->chan), "Enabling irq: %d, IER: 0x%x\n", id, 129 ier); 130 iowrite32(ier, td->membase + TIMBDMA_IER); 131 } 132 133 /* Should be called with the spinlock held */ 134 static bool __td_dma_done_ack(struct timb_dma_chan *td_chan) 135 { 136 int id = td_chan->chan.chan_id; 137 struct timb_dma *td = (struct timb_dma *)((u8 *)td_chan - 138 id * sizeof(struct timb_dma_chan) - sizeof(struct timb_dma)); 139 u32 isr; 140 bool done = false; 141 142 dev_dbg(chan2dev(&td_chan->chan), "Checking irq: %d, td: %p\n", id, td); 143 144 isr = ioread32(td->membase + TIMBDMA_ISR) & (1 << id); 145 if (isr) { 146 iowrite32(isr, td->membase + TIMBDMA_ISR); 147 done = true; 148 } 149 150 return done; 151 } 152 153 static int td_fill_desc(struct timb_dma_chan *td_chan, u8 *dma_desc, 154 struct scatterlist *sg, bool last) 155 { 156 if (sg_dma_len(sg) > USHRT_MAX) { 157 dev_err(chan2dev(&td_chan->chan), "Too big sg element\n"); 158 return -EINVAL; 159 } 160 161 /* length must be word aligned */ 162 if (sg_dma_len(sg) % sizeof(u32)) { 163 dev_err(chan2dev(&td_chan->chan), "Incorrect length: %d\n", 164 sg_dma_len(sg)); 165 return -EINVAL; 166 } 167 168 dev_dbg(chan2dev(&td_chan->chan), "desc: %p, addr: 0x%llx\n", 169 dma_desc, (unsigned long long)sg_dma_address(sg)); 170 171 dma_desc[7] = (sg_dma_address(sg) >> 24) & 0xff; 172 dma_desc[6] = (sg_dma_address(sg) >> 16) & 0xff; 173 dma_desc[5] = (sg_dma_address(sg) >> 8) & 0xff; 174 dma_desc[4] = (sg_dma_address(sg) >> 0) & 0xff; 175 176 dma_desc[3] = (sg_dma_len(sg) >> 8) & 0xff; 177 dma_desc[2] = (sg_dma_len(sg) >> 0) & 0xff; 178 179 dma_desc[1] = 0x00; 180 dma_desc[0] = 0x21 | (last ? 0x02 : 0); /* tran, valid */ 181 182 return 0; 183 } 184 185 /* Must be called with the spinlock held */ 186 static void __td_start_dma(struct timb_dma_chan *td_chan) 187 { 188 struct timb_dma_desc *td_desc; 189 190 if (td_chan->ongoing) { 191 dev_err(chan2dev(&td_chan->chan), 192 "Transfer already ongoing\n"); 193 return; 194 } 195 196 td_desc = list_entry(td_chan->active_list.next, struct timb_dma_desc, 197 desc_node); 198 199 dev_dbg(chan2dev(&td_chan->chan), 200 "td_chan: %p, chan: %d, membase: %p\n", 201 td_chan, td_chan->chan.chan_id, td_chan->membase); 202 203 if (td_chan->direction == DMA_DEV_TO_MEM) { 204 205 /* descriptor address */ 206 iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_DHAR); 207 iowrite32(td_desc->txd.phys, td_chan->membase + 208 TIMBDMA_OFFS_RX_DLAR); 209 /* Bytes per line */ 210 iowrite32(td_chan->bytes_per_line, td_chan->membase + 211 TIMBDMA_OFFS_RX_BPRR); 212 /* enable RX */ 213 iowrite32(TIMBDMA_RX_EN, td_chan->membase + TIMBDMA_OFFS_RX_ER); 214 } else { 215 /* address high */ 216 iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DHAR); 217 iowrite32(td_desc->txd.phys, td_chan->membase + 218 TIMBDMA_OFFS_TX_DLAR); 219 } 220 221 td_chan->ongoing = true; 222 223 if (td_desc->interrupt) 224 __td_enable_chan_irq(td_chan); 225 } 226 227 static void __td_finish(struct timb_dma_chan *td_chan) 228 { 229 dma_async_tx_callback callback; 230 void *param; 231 struct dma_async_tx_descriptor *txd; 232 struct timb_dma_desc *td_desc; 233 234 /* can happen if the descriptor is canceled */ 235 if (list_empty(&td_chan->active_list)) 236 return; 237 238 td_desc = list_entry(td_chan->active_list.next, struct timb_dma_desc, 239 desc_node); 240 txd = &td_desc->txd; 241 242 dev_dbg(chan2dev(&td_chan->chan), "descriptor %u complete\n", 243 txd->cookie); 244 245 /* make sure to stop the transfer */ 246 if (td_chan->direction == DMA_DEV_TO_MEM) 247 iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_ER); 248 /* Currently no support for stopping DMA transfers 249 else 250 iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DLAR); 251 */ 252 dma_cookie_complete(txd); 253 td_chan->ongoing = false; 254 255 callback = txd->callback; 256 param = txd->callback_param; 257 258 list_move(&td_desc->desc_node, &td_chan->free_list); 259 260 dma_descriptor_unmap(txd); 261 /* 262 * The API requires that no submissions are done from a 263 * callback, so we don't need to drop the lock here 264 */ 265 if (callback) 266 callback(param); 267 } 268 269 static u32 __td_ier_mask(struct timb_dma *td) 270 { 271 int i; 272 u32 ret = 0; 273 274 for (i = 0; i < td->dma.chancnt; i++) { 275 struct timb_dma_chan *td_chan = td->channels + i; 276 if (td_chan->ongoing) { 277 struct timb_dma_desc *td_desc = 278 list_entry(td_chan->active_list.next, 279 struct timb_dma_desc, desc_node); 280 if (td_desc->interrupt) 281 ret |= 1 << i; 282 } 283 } 284 285 return ret; 286 } 287 288 static void __td_start_next(struct timb_dma_chan *td_chan) 289 { 290 struct timb_dma_desc *td_desc; 291 292 BUG_ON(list_empty(&td_chan->queue)); 293 BUG_ON(td_chan->ongoing); 294 295 td_desc = list_entry(td_chan->queue.next, struct timb_dma_desc, 296 desc_node); 297 298 dev_dbg(chan2dev(&td_chan->chan), "%s: started %u\n", 299 __func__, td_desc->txd.cookie); 300 301 list_move(&td_desc->desc_node, &td_chan->active_list); 302 __td_start_dma(td_chan); 303 } 304 305 static dma_cookie_t td_tx_submit(struct dma_async_tx_descriptor *txd) 306 { 307 struct timb_dma_desc *td_desc = container_of(txd, struct timb_dma_desc, 308 txd); 309 struct timb_dma_chan *td_chan = container_of(txd->chan, 310 struct timb_dma_chan, chan); 311 dma_cookie_t cookie; 312 313 spin_lock_bh(&td_chan->lock); 314 cookie = dma_cookie_assign(txd); 315 316 if (list_empty(&td_chan->active_list)) { 317 dev_dbg(chan2dev(txd->chan), "%s: started %u\n", __func__, 318 txd->cookie); 319 list_add_tail(&td_desc->desc_node, &td_chan->active_list); 320 __td_start_dma(td_chan); 321 } else { 322 dev_dbg(chan2dev(txd->chan), "tx_submit: queued %u\n", 323 txd->cookie); 324 325 list_add_tail(&td_desc->desc_node, &td_chan->queue); 326 } 327 328 spin_unlock_bh(&td_chan->lock); 329 330 return cookie; 331 } 332 333 static struct timb_dma_desc *td_alloc_init_desc(struct timb_dma_chan *td_chan) 334 { 335 struct dma_chan *chan = &td_chan->chan; 336 struct timb_dma_desc *td_desc; 337 int err; 338 339 td_desc = kzalloc(sizeof(struct timb_dma_desc), GFP_KERNEL); 340 if (!td_desc) 341 goto out; 342 343 td_desc->desc_list_len = td_chan->desc_elems * TIMB_DMA_DESC_SIZE; 344 345 td_desc->desc_list = kzalloc(td_desc->desc_list_len, GFP_KERNEL); 346 if (!td_desc->desc_list) 347 goto err; 348 349 dma_async_tx_descriptor_init(&td_desc->txd, chan); 350 td_desc->txd.tx_submit = td_tx_submit; 351 td_desc->txd.flags = DMA_CTRL_ACK; 352 353 td_desc->txd.phys = dma_map_single(chan2dmadev(chan), 354 td_desc->desc_list, td_desc->desc_list_len, DMA_TO_DEVICE); 355 356 err = dma_mapping_error(chan2dmadev(chan), td_desc->txd.phys); 357 if (err) { 358 dev_err(chan2dev(chan), "DMA mapping error: %d\n", err); 359 goto err; 360 } 361 362 return td_desc; 363 err: 364 kfree(td_desc->desc_list); 365 kfree(td_desc); 366 out: 367 return NULL; 368 369 } 370 371 static void td_free_desc(struct timb_dma_desc *td_desc) 372 { 373 dev_dbg(chan2dev(td_desc->txd.chan), "Freeing desc: %p\n", td_desc); 374 dma_unmap_single(chan2dmadev(td_desc->txd.chan), td_desc->txd.phys, 375 td_desc->desc_list_len, DMA_TO_DEVICE); 376 377 kfree(td_desc->desc_list); 378 kfree(td_desc); 379 } 380 381 static void td_desc_put(struct timb_dma_chan *td_chan, 382 struct timb_dma_desc *td_desc) 383 { 384 dev_dbg(chan2dev(&td_chan->chan), "Putting desc: %p\n", td_desc); 385 386 spin_lock_bh(&td_chan->lock); 387 list_add(&td_desc->desc_node, &td_chan->free_list); 388 spin_unlock_bh(&td_chan->lock); 389 } 390 391 static struct timb_dma_desc *td_desc_get(struct timb_dma_chan *td_chan) 392 { 393 struct timb_dma_desc *td_desc, *_td_desc; 394 struct timb_dma_desc *ret = NULL; 395 396 spin_lock_bh(&td_chan->lock); 397 list_for_each_entry_safe(td_desc, _td_desc, &td_chan->free_list, 398 desc_node) { 399 if (async_tx_test_ack(&td_desc->txd)) { 400 list_del(&td_desc->desc_node); 401 ret = td_desc; 402 break; 403 } 404 dev_dbg(chan2dev(&td_chan->chan), "desc %p not ACKed\n", 405 td_desc); 406 } 407 spin_unlock_bh(&td_chan->lock); 408 409 return ret; 410 } 411 412 static int td_alloc_chan_resources(struct dma_chan *chan) 413 { 414 struct timb_dma_chan *td_chan = 415 container_of(chan, struct timb_dma_chan, chan); 416 int i; 417 418 dev_dbg(chan2dev(chan), "%s: entry\n", __func__); 419 420 BUG_ON(!list_empty(&td_chan->free_list)); 421 for (i = 0; i < td_chan->descs; i++) { 422 struct timb_dma_desc *td_desc = td_alloc_init_desc(td_chan); 423 if (!td_desc) { 424 if (i) 425 break; 426 else { 427 dev_err(chan2dev(chan), 428 "Couldnt allocate any descriptors\n"); 429 return -ENOMEM; 430 } 431 } 432 433 td_desc_put(td_chan, td_desc); 434 } 435 436 spin_lock_bh(&td_chan->lock); 437 dma_cookie_init(chan); 438 spin_unlock_bh(&td_chan->lock); 439 440 return 0; 441 } 442 443 static void td_free_chan_resources(struct dma_chan *chan) 444 { 445 struct timb_dma_chan *td_chan = 446 container_of(chan, struct timb_dma_chan, chan); 447 struct timb_dma_desc *td_desc, *_td_desc; 448 LIST_HEAD(list); 449 450 dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); 451 452 /* check that all descriptors are free */ 453 BUG_ON(!list_empty(&td_chan->active_list)); 454 BUG_ON(!list_empty(&td_chan->queue)); 455 456 spin_lock_bh(&td_chan->lock); 457 list_splice_init(&td_chan->free_list, &list); 458 spin_unlock_bh(&td_chan->lock); 459 460 list_for_each_entry_safe(td_desc, _td_desc, &list, desc_node) { 461 dev_dbg(chan2dev(chan), "%s: Freeing desc: %p\n", __func__, 462 td_desc); 463 td_free_desc(td_desc); 464 } 465 } 466 467 static enum dma_status td_tx_status(struct dma_chan *chan, dma_cookie_t cookie, 468 struct dma_tx_state *txstate) 469 { 470 enum dma_status ret; 471 472 dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); 473 474 ret = dma_cookie_status(chan, cookie, txstate); 475 476 dev_dbg(chan2dev(chan), "%s: exit, ret: %d\n", __func__, ret); 477 478 return ret; 479 } 480 481 static void td_issue_pending(struct dma_chan *chan) 482 { 483 struct timb_dma_chan *td_chan = 484 container_of(chan, struct timb_dma_chan, chan); 485 486 dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); 487 spin_lock_bh(&td_chan->lock); 488 489 if (!list_empty(&td_chan->active_list)) 490 /* transfer ongoing */ 491 if (__td_dma_done_ack(td_chan)) 492 __td_finish(td_chan); 493 494 if (list_empty(&td_chan->active_list) && !list_empty(&td_chan->queue)) 495 __td_start_next(td_chan); 496 497 spin_unlock_bh(&td_chan->lock); 498 } 499 500 static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan, 501 struct scatterlist *sgl, unsigned int sg_len, 502 enum dma_transfer_direction direction, unsigned long flags, 503 void *context) 504 { 505 struct timb_dma_chan *td_chan = 506 container_of(chan, struct timb_dma_chan, chan); 507 struct timb_dma_desc *td_desc; 508 struct scatterlist *sg; 509 unsigned int i; 510 unsigned int desc_usage = 0; 511 512 if (!sgl || !sg_len) { 513 dev_err(chan2dev(chan), "%s: No SG list\n", __func__); 514 return NULL; 515 } 516 517 /* even channels are for RX, odd for TX */ 518 if (td_chan->direction != direction) { 519 dev_err(chan2dev(chan), 520 "Requesting channel in wrong direction\n"); 521 return NULL; 522 } 523 524 td_desc = td_desc_get(td_chan); 525 if (!td_desc) { 526 dev_err(chan2dev(chan), "Not enough descriptors available\n"); 527 return NULL; 528 } 529 530 td_desc->interrupt = (flags & DMA_PREP_INTERRUPT) != 0; 531 532 for_each_sg(sgl, sg, sg_len, i) { 533 int err; 534 if (desc_usage > td_desc->desc_list_len) { 535 dev_err(chan2dev(chan), "No descriptor space\n"); 536 return NULL; 537 } 538 539 err = td_fill_desc(td_chan, td_desc->desc_list + desc_usage, sg, 540 i == (sg_len - 1)); 541 if (err) { 542 dev_err(chan2dev(chan), "Failed to update desc: %d\n", 543 err); 544 td_desc_put(td_chan, td_desc); 545 return NULL; 546 } 547 desc_usage += TIMB_DMA_DESC_SIZE; 548 } 549 550 dma_sync_single_for_device(chan2dmadev(chan), td_desc->txd.phys, 551 td_desc->desc_list_len, DMA_MEM_TO_DEV); 552 553 return &td_desc->txd; 554 } 555 556 static int td_terminate_all(struct dma_chan *chan) 557 { 558 struct timb_dma_chan *td_chan = 559 container_of(chan, struct timb_dma_chan, chan); 560 struct timb_dma_desc *td_desc, *_td_desc; 561 562 dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); 563 564 /* first the easy part, put the queue into the free list */ 565 spin_lock_bh(&td_chan->lock); 566 list_for_each_entry_safe(td_desc, _td_desc, &td_chan->queue, 567 desc_node) 568 list_move(&td_desc->desc_node, &td_chan->free_list); 569 570 /* now tear down the running */ 571 __td_finish(td_chan); 572 spin_unlock_bh(&td_chan->lock); 573 574 return 0; 575 } 576 577 static void td_tasklet(unsigned long data) 578 { 579 struct timb_dma *td = (struct timb_dma *)data; 580 u32 isr; 581 u32 ipr; 582 u32 ier; 583 int i; 584 585 isr = ioread32(td->membase + TIMBDMA_ISR); 586 ipr = isr & __td_ier_mask(td); 587 588 /* ack the interrupts */ 589 iowrite32(ipr, td->membase + TIMBDMA_ISR); 590 591 for (i = 0; i < td->dma.chancnt; i++) 592 if (ipr & (1 << i)) { 593 struct timb_dma_chan *td_chan = td->channels + i; 594 spin_lock(&td_chan->lock); 595 __td_finish(td_chan); 596 if (!list_empty(&td_chan->queue)) 597 __td_start_next(td_chan); 598 spin_unlock(&td_chan->lock); 599 } 600 601 ier = __td_ier_mask(td); 602 iowrite32(ier, td->membase + TIMBDMA_IER); 603 } 604 605 606 static irqreturn_t td_irq(int irq, void *devid) 607 { 608 struct timb_dma *td = devid; 609 u32 ipr = ioread32(td->membase + TIMBDMA_IPR); 610 611 if (ipr) { 612 /* disable interrupts, will be re-enabled in tasklet */ 613 iowrite32(0, td->membase + TIMBDMA_IER); 614 615 tasklet_schedule(&td->tasklet); 616 617 return IRQ_HANDLED; 618 } else 619 return IRQ_NONE; 620 } 621 622 623 static int td_probe(struct platform_device *pdev) 624 { 625 struct timb_dma_platform_data *pdata = dev_get_platdata(&pdev->dev); 626 struct timb_dma *td; 627 struct resource *iomem; 628 int irq; 629 int err; 630 int i; 631 632 if (!pdata) { 633 dev_err(&pdev->dev, "No platform data\n"); 634 return -EINVAL; 635 } 636 637 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 638 if (!iomem) 639 return -EINVAL; 640 641 irq = platform_get_irq(pdev, 0); 642 if (irq < 0) 643 return irq; 644 645 if (!request_mem_region(iomem->start, resource_size(iomem), 646 DRIVER_NAME)) 647 return -EBUSY; 648 649 td = kzalloc(sizeof(struct timb_dma) + 650 sizeof(struct timb_dma_chan) * pdata->nr_channels, GFP_KERNEL); 651 if (!td) { 652 err = -ENOMEM; 653 goto err_release_region; 654 } 655 656 dev_dbg(&pdev->dev, "Allocated TD: %p\n", td); 657 658 td->membase = ioremap(iomem->start, resource_size(iomem)); 659 if (!td->membase) { 660 dev_err(&pdev->dev, "Failed to remap I/O memory\n"); 661 err = -ENOMEM; 662 goto err_free_mem; 663 } 664 665 /* 32bit addressing */ 666 iowrite32(TIMBDMA_32BIT_ADDR, td->membase + TIMBDMA_ACR); 667 668 /* disable and clear any interrupts */ 669 iowrite32(0x0, td->membase + TIMBDMA_IER); 670 iowrite32(0xFFFFFFFF, td->membase + TIMBDMA_ISR); 671 672 tasklet_init(&td->tasklet, td_tasklet, (unsigned long)td); 673 674 err = request_irq(irq, td_irq, IRQF_SHARED, DRIVER_NAME, td); 675 if (err) { 676 dev_err(&pdev->dev, "Failed to request IRQ\n"); 677 goto err_tasklet_kill; 678 } 679 680 td->dma.device_alloc_chan_resources = td_alloc_chan_resources; 681 td->dma.device_free_chan_resources = td_free_chan_resources; 682 td->dma.device_tx_status = td_tx_status; 683 td->dma.device_issue_pending = td_issue_pending; 684 685 dma_cap_set(DMA_SLAVE, td->dma.cap_mask); 686 dma_cap_set(DMA_PRIVATE, td->dma.cap_mask); 687 td->dma.device_prep_slave_sg = td_prep_slave_sg; 688 td->dma.device_terminate_all = td_terminate_all; 689 690 td->dma.dev = &pdev->dev; 691 692 INIT_LIST_HEAD(&td->dma.channels); 693 694 for (i = 0; i < pdata->nr_channels; i++) { 695 struct timb_dma_chan *td_chan = &td->channels[i]; 696 struct timb_dma_platform_data_channel *pchan = 697 pdata->channels + i; 698 699 /* even channels are RX, odd are TX */ 700 if ((i % 2) == pchan->rx) { 701 dev_err(&pdev->dev, "Wrong channel configuration\n"); 702 err = -EINVAL; 703 goto err_free_irq; 704 } 705 706 td_chan->chan.device = &td->dma; 707 dma_cookie_init(&td_chan->chan); 708 spin_lock_init(&td_chan->lock); 709 INIT_LIST_HEAD(&td_chan->active_list); 710 INIT_LIST_HEAD(&td_chan->queue); 711 INIT_LIST_HEAD(&td_chan->free_list); 712 713 td_chan->descs = pchan->descriptors; 714 td_chan->desc_elems = pchan->descriptor_elements; 715 td_chan->bytes_per_line = pchan->bytes_per_line; 716 td_chan->direction = pchan->rx ? DMA_DEV_TO_MEM : 717 DMA_MEM_TO_DEV; 718 719 td_chan->membase = td->membase + 720 (i / 2) * TIMBDMA_INSTANCE_OFFSET + 721 (pchan->rx ? 0 : TIMBDMA_INSTANCE_TX_OFFSET); 722 723 dev_dbg(&pdev->dev, "Chan: %d, membase: %p\n", 724 i, td_chan->membase); 725 726 list_add_tail(&td_chan->chan.device_node, &td->dma.channels); 727 } 728 729 err = dma_async_device_register(&td->dma); 730 if (err) { 731 dev_err(&pdev->dev, "Failed to register async device\n"); 732 goto err_free_irq; 733 } 734 735 platform_set_drvdata(pdev, td); 736 737 dev_dbg(&pdev->dev, "Probe result: %d\n", err); 738 return err; 739 740 err_free_irq: 741 free_irq(irq, td); 742 err_tasklet_kill: 743 tasklet_kill(&td->tasklet); 744 iounmap(td->membase); 745 err_free_mem: 746 kfree(td); 747 err_release_region: 748 release_mem_region(iomem->start, resource_size(iomem)); 749 750 return err; 751 752 } 753 754 static int td_remove(struct platform_device *pdev) 755 { 756 struct timb_dma *td = platform_get_drvdata(pdev); 757 struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 758 int irq = platform_get_irq(pdev, 0); 759 760 dma_async_device_unregister(&td->dma); 761 free_irq(irq, td); 762 tasklet_kill(&td->tasklet); 763 iounmap(td->membase); 764 kfree(td); 765 release_mem_region(iomem->start, resource_size(iomem)); 766 767 dev_dbg(&pdev->dev, "Removed...\n"); 768 return 0; 769 } 770 771 static struct platform_driver td_driver = { 772 .driver = { 773 .name = DRIVER_NAME, 774 }, 775 .probe = td_probe, 776 .remove = td_remove, 777 }; 778 779 module_platform_driver(td_driver); 780 781 MODULE_LICENSE("GPL v2"); 782 MODULE_DESCRIPTION("Timberdale DMA controller driver"); 783 MODULE_AUTHOR("Pelagicore AB <info@pelagicore.com>"); 784 MODULE_ALIAS("platform:"DRIVER_NAME); 785