1 /* 2 * timb_dma.c timberdale FPGA DMA driver 3 * Copyright (c) 2010 Intel Corporation 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License version 2 as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 */ 14 15 /* Supports: 16 * Timberdale FPGA DMA engine 17 */ 18 19 #include <linux/dmaengine.h> 20 #include <linux/dma-mapping.h> 21 #include <linux/init.h> 22 #include <linux/interrupt.h> 23 #include <linux/io.h> 24 #include <linux/module.h> 25 #include <linux/platform_device.h> 26 #include <linux/slab.h> 27 28 #include <linux/timb_dma.h> 29 30 #include "dmaengine.h" 31 32 #define DRIVER_NAME "timb-dma" 33 34 /* Global DMA registers */ 35 #define TIMBDMA_ACR 0x34 36 #define TIMBDMA_32BIT_ADDR 0x01 37 38 #define TIMBDMA_ISR 0x080000 39 #define TIMBDMA_IPR 0x080004 40 #define TIMBDMA_IER 0x080008 41 42 /* Channel specific registers */ 43 /* RX instances base addresses are 0x00, 0x40, 0x80 ... 44 * TX instances base addresses are 0x18, 0x58, 0x98 ... 45 */ 46 #define TIMBDMA_INSTANCE_OFFSET 0x40 47 #define TIMBDMA_INSTANCE_TX_OFFSET 0x18 48 49 /* RX registers, relative the instance base */ 50 #define TIMBDMA_OFFS_RX_DHAR 0x00 51 #define TIMBDMA_OFFS_RX_DLAR 0x04 52 #define TIMBDMA_OFFS_RX_LR 0x0C 53 #define TIMBDMA_OFFS_RX_BLR 0x10 54 #define TIMBDMA_OFFS_RX_ER 0x14 55 #define TIMBDMA_RX_EN 0x01 56 /* bytes per Row, video specific register 57 * which is placed after the TX registers... 58 */ 59 #define TIMBDMA_OFFS_RX_BPRR 0x30 60 61 /* TX registers, relative the instance base */ 62 #define TIMBDMA_OFFS_TX_DHAR 0x00 63 #define TIMBDMA_OFFS_TX_DLAR 0x04 64 #define TIMBDMA_OFFS_TX_BLR 0x0C 65 #define TIMBDMA_OFFS_TX_LR 0x14 66 67 68 #define TIMB_DMA_DESC_SIZE 8 69 70 struct timb_dma_desc { 71 struct list_head desc_node; 72 struct dma_async_tx_descriptor txd; 73 u8 *desc_list; 74 unsigned int desc_list_len; 75 bool interrupt; 76 }; 77 78 struct timb_dma_chan { 79 struct dma_chan chan; 80 void __iomem *membase; 81 spinlock_t lock; /* Used to protect data structures, 82 especially the lists and descriptors, 83 from races between the tasklet and calls 84 from above */ 85 bool ongoing; 86 struct list_head active_list; 87 struct list_head queue; 88 struct list_head free_list; 89 unsigned int bytes_per_line; 90 enum dma_transfer_direction direction; 91 unsigned int descs; /* Descriptors to allocate */ 92 unsigned int desc_elems; /* number of elems per descriptor */ 93 }; 94 95 struct timb_dma { 96 struct dma_device dma; 97 void __iomem *membase; 98 struct tasklet_struct tasklet; 99 struct timb_dma_chan channels[0]; 100 }; 101 102 static struct device *chan2dev(struct dma_chan *chan) 103 { 104 return &chan->dev->device; 105 } 106 static struct device *chan2dmadev(struct dma_chan *chan) 107 { 108 return chan2dev(chan)->parent->parent; 109 } 110 111 static struct timb_dma *tdchantotd(struct timb_dma_chan *td_chan) 112 { 113 int id = td_chan->chan.chan_id; 114 return (struct timb_dma *)((u8 *)td_chan - 115 id * sizeof(struct timb_dma_chan) - sizeof(struct timb_dma)); 116 } 117 118 /* Must be called with the spinlock held */ 119 static void __td_enable_chan_irq(struct timb_dma_chan *td_chan) 120 { 121 int id = td_chan->chan.chan_id; 122 struct timb_dma *td = tdchantotd(td_chan); 123 u32 ier; 124 125 /* enable interrupt for this channel */ 126 ier = ioread32(td->membase + TIMBDMA_IER); 127 ier |= 1 << id; 128 dev_dbg(chan2dev(&td_chan->chan), "Enabling irq: %d, IER: 0x%x\n", id, 129 ier); 130 iowrite32(ier, td->membase + TIMBDMA_IER); 131 } 132 133 /* Should be called with the spinlock held */ 134 static bool __td_dma_done_ack(struct timb_dma_chan *td_chan) 135 { 136 int id = td_chan->chan.chan_id; 137 struct timb_dma *td = (struct timb_dma *)((u8 *)td_chan - 138 id * sizeof(struct timb_dma_chan) - sizeof(struct timb_dma)); 139 u32 isr; 140 bool done = false; 141 142 dev_dbg(chan2dev(&td_chan->chan), "Checking irq: %d, td: %p\n", id, td); 143 144 isr = ioread32(td->membase + TIMBDMA_ISR) & (1 << id); 145 if (isr) { 146 iowrite32(isr, td->membase + TIMBDMA_ISR); 147 done = true; 148 } 149 150 return done; 151 } 152 153 static int td_fill_desc(struct timb_dma_chan *td_chan, u8 *dma_desc, 154 struct scatterlist *sg, bool last) 155 { 156 if (sg_dma_len(sg) > USHRT_MAX) { 157 dev_err(chan2dev(&td_chan->chan), "Too big sg element\n"); 158 return -EINVAL; 159 } 160 161 /* length must be word aligned */ 162 if (sg_dma_len(sg) % sizeof(u32)) { 163 dev_err(chan2dev(&td_chan->chan), "Incorrect length: %d\n", 164 sg_dma_len(sg)); 165 return -EINVAL; 166 } 167 168 dev_dbg(chan2dev(&td_chan->chan), "desc: %p, addr: 0x%llx\n", 169 dma_desc, (unsigned long long)sg_dma_address(sg)); 170 171 dma_desc[7] = (sg_dma_address(sg) >> 24) & 0xff; 172 dma_desc[6] = (sg_dma_address(sg) >> 16) & 0xff; 173 dma_desc[5] = (sg_dma_address(sg) >> 8) & 0xff; 174 dma_desc[4] = (sg_dma_address(sg) >> 0) & 0xff; 175 176 dma_desc[3] = (sg_dma_len(sg) >> 8) & 0xff; 177 dma_desc[2] = (sg_dma_len(sg) >> 0) & 0xff; 178 179 dma_desc[1] = 0x00; 180 dma_desc[0] = 0x21 | (last ? 0x02 : 0); /* tran, valid */ 181 182 return 0; 183 } 184 185 /* Must be called with the spinlock held */ 186 static void __td_start_dma(struct timb_dma_chan *td_chan) 187 { 188 struct timb_dma_desc *td_desc; 189 190 if (td_chan->ongoing) { 191 dev_err(chan2dev(&td_chan->chan), 192 "Transfer already ongoing\n"); 193 return; 194 } 195 196 td_desc = list_entry(td_chan->active_list.next, struct timb_dma_desc, 197 desc_node); 198 199 dev_dbg(chan2dev(&td_chan->chan), 200 "td_chan: %p, chan: %d, membase: %p\n", 201 td_chan, td_chan->chan.chan_id, td_chan->membase); 202 203 if (td_chan->direction == DMA_DEV_TO_MEM) { 204 205 /* descriptor address */ 206 iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_DHAR); 207 iowrite32(td_desc->txd.phys, td_chan->membase + 208 TIMBDMA_OFFS_RX_DLAR); 209 /* Bytes per line */ 210 iowrite32(td_chan->bytes_per_line, td_chan->membase + 211 TIMBDMA_OFFS_RX_BPRR); 212 /* enable RX */ 213 iowrite32(TIMBDMA_RX_EN, td_chan->membase + TIMBDMA_OFFS_RX_ER); 214 } else { 215 /* address high */ 216 iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DHAR); 217 iowrite32(td_desc->txd.phys, td_chan->membase + 218 TIMBDMA_OFFS_TX_DLAR); 219 } 220 221 td_chan->ongoing = true; 222 223 if (td_desc->interrupt) 224 __td_enable_chan_irq(td_chan); 225 } 226 227 static void __td_finish(struct timb_dma_chan *td_chan) 228 { 229 struct dmaengine_desc_callback cb; 230 struct dma_async_tx_descriptor *txd; 231 struct timb_dma_desc *td_desc; 232 233 /* can happen if the descriptor is canceled */ 234 if (list_empty(&td_chan->active_list)) 235 return; 236 237 td_desc = list_entry(td_chan->active_list.next, struct timb_dma_desc, 238 desc_node); 239 txd = &td_desc->txd; 240 241 dev_dbg(chan2dev(&td_chan->chan), "descriptor %u complete\n", 242 txd->cookie); 243 244 /* make sure to stop the transfer */ 245 if (td_chan->direction == DMA_DEV_TO_MEM) 246 iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_ER); 247 /* Currently no support for stopping DMA transfers 248 else 249 iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DLAR); 250 */ 251 dma_cookie_complete(txd); 252 td_chan->ongoing = false; 253 254 dmaengine_desc_get_callback(txd, &cb); 255 256 list_move(&td_desc->desc_node, &td_chan->free_list); 257 258 dma_descriptor_unmap(txd); 259 /* 260 * The API requires that no submissions are done from a 261 * callback, so we don't need to drop the lock here 262 */ 263 dmaengine_desc_callback_invoke(&cb, NULL); 264 } 265 266 static u32 __td_ier_mask(struct timb_dma *td) 267 { 268 int i; 269 u32 ret = 0; 270 271 for (i = 0; i < td->dma.chancnt; i++) { 272 struct timb_dma_chan *td_chan = td->channels + i; 273 if (td_chan->ongoing) { 274 struct timb_dma_desc *td_desc = 275 list_entry(td_chan->active_list.next, 276 struct timb_dma_desc, desc_node); 277 if (td_desc->interrupt) 278 ret |= 1 << i; 279 } 280 } 281 282 return ret; 283 } 284 285 static void __td_start_next(struct timb_dma_chan *td_chan) 286 { 287 struct timb_dma_desc *td_desc; 288 289 BUG_ON(list_empty(&td_chan->queue)); 290 BUG_ON(td_chan->ongoing); 291 292 td_desc = list_entry(td_chan->queue.next, struct timb_dma_desc, 293 desc_node); 294 295 dev_dbg(chan2dev(&td_chan->chan), "%s: started %u\n", 296 __func__, td_desc->txd.cookie); 297 298 list_move(&td_desc->desc_node, &td_chan->active_list); 299 __td_start_dma(td_chan); 300 } 301 302 static dma_cookie_t td_tx_submit(struct dma_async_tx_descriptor *txd) 303 { 304 struct timb_dma_desc *td_desc = container_of(txd, struct timb_dma_desc, 305 txd); 306 struct timb_dma_chan *td_chan = container_of(txd->chan, 307 struct timb_dma_chan, chan); 308 dma_cookie_t cookie; 309 310 spin_lock_bh(&td_chan->lock); 311 cookie = dma_cookie_assign(txd); 312 313 if (list_empty(&td_chan->active_list)) { 314 dev_dbg(chan2dev(txd->chan), "%s: started %u\n", __func__, 315 txd->cookie); 316 list_add_tail(&td_desc->desc_node, &td_chan->active_list); 317 __td_start_dma(td_chan); 318 } else { 319 dev_dbg(chan2dev(txd->chan), "tx_submit: queued %u\n", 320 txd->cookie); 321 322 list_add_tail(&td_desc->desc_node, &td_chan->queue); 323 } 324 325 spin_unlock_bh(&td_chan->lock); 326 327 return cookie; 328 } 329 330 static struct timb_dma_desc *td_alloc_init_desc(struct timb_dma_chan *td_chan) 331 { 332 struct dma_chan *chan = &td_chan->chan; 333 struct timb_dma_desc *td_desc; 334 int err; 335 336 td_desc = kzalloc(sizeof(struct timb_dma_desc), GFP_KERNEL); 337 if (!td_desc) 338 goto out; 339 340 td_desc->desc_list_len = td_chan->desc_elems * TIMB_DMA_DESC_SIZE; 341 342 td_desc->desc_list = kzalloc(td_desc->desc_list_len, GFP_KERNEL); 343 if (!td_desc->desc_list) 344 goto err; 345 346 dma_async_tx_descriptor_init(&td_desc->txd, chan); 347 td_desc->txd.tx_submit = td_tx_submit; 348 td_desc->txd.flags = DMA_CTRL_ACK; 349 350 td_desc->txd.phys = dma_map_single(chan2dmadev(chan), 351 td_desc->desc_list, td_desc->desc_list_len, DMA_TO_DEVICE); 352 353 err = dma_mapping_error(chan2dmadev(chan), td_desc->txd.phys); 354 if (err) { 355 dev_err(chan2dev(chan), "DMA mapping error: %d\n", err); 356 goto err; 357 } 358 359 return td_desc; 360 err: 361 kfree(td_desc->desc_list); 362 kfree(td_desc); 363 out: 364 return NULL; 365 366 } 367 368 static void td_free_desc(struct timb_dma_desc *td_desc) 369 { 370 dev_dbg(chan2dev(td_desc->txd.chan), "Freeing desc: %p\n", td_desc); 371 dma_unmap_single(chan2dmadev(td_desc->txd.chan), td_desc->txd.phys, 372 td_desc->desc_list_len, DMA_TO_DEVICE); 373 374 kfree(td_desc->desc_list); 375 kfree(td_desc); 376 } 377 378 static void td_desc_put(struct timb_dma_chan *td_chan, 379 struct timb_dma_desc *td_desc) 380 { 381 dev_dbg(chan2dev(&td_chan->chan), "Putting desc: %p\n", td_desc); 382 383 spin_lock_bh(&td_chan->lock); 384 list_add(&td_desc->desc_node, &td_chan->free_list); 385 spin_unlock_bh(&td_chan->lock); 386 } 387 388 static struct timb_dma_desc *td_desc_get(struct timb_dma_chan *td_chan) 389 { 390 struct timb_dma_desc *td_desc, *_td_desc; 391 struct timb_dma_desc *ret = NULL; 392 393 spin_lock_bh(&td_chan->lock); 394 list_for_each_entry_safe(td_desc, _td_desc, &td_chan->free_list, 395 desc_node) { 396 if (async_tx_test_ack(&td_desc->txd)) { 397 list_del(&td_desc->desc_node); 398 ret = td_desc; 399 break; 400 } 401 dev_dbg(chan2dev(&td_chan->chan), "desc %p not ACKed\n", 402 td_desc); 403 } 404 spin_unlock_bh(&td_chan->lock); 405 406 return ret; 407 } 408 409 static int td_alloc_chan_resources(struct dma_chan *chan) 410 { 411 struct timb_dma_chan *td_chan = 412 container_of(chan, struct timb_dma_chan, chan); 413 int i; 414 415 dev_dbg(chan2dev(chan), "%s: entry\n", __func__); 416 417 BUG_ON(!list_empty(&td_chan->free_list)); 418 for (i = 0; i < td_chan->descs; i++) { 419 struct timb_dma_desc *td_desc = td_alloc_init_desc(td_chan); 420 if (!td_desc) { 421 if (i) 422 break; 423 else { 424 dev_err(chan2dev(chan), 425 "Couldn't allocate any descriptors\n"); 426 return -ENOMEM; 427 } 428 } 429 430 td_desc_put(td_chan, td_desc); 431 } 432 433 spin_lock_bh(&td_chan->lock); 434 dma_cookie_init(chan); 435 spin_unlock_bh(&td_chan->lock); 436 437 return 0; 438 } 439 440 static void td_free_chan_resources(struct dma_chan *chan) 441 { 442 struct timb_dma_chan *td_chan = 443 container_of(chan, struct timb_dma_chan, chan); 444 struct timb_dma_desc *td_desc, *_td_desc; 445 LIST_HEAD(list); 446 447 dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); 448 449 /* check that all descriptors are free */ 450 BUG_ON(!list_empty(&td_chan->active_list)); 451 BUG_ON(!list_empty(&td_chan->queue)); 452 453 spin_lock_bh(&td_chan->lock); 454 list_splice_init(&td_chan->free_list, &list); 455 spin_unlock_bh(&td_chan->lock); 456 457 list_for_each_entry_safe(td_desc, _td_desc, &list, desc_node) { 458 dev_dbg(chan2dev(chan), "%s: Freeing desc: %p\n", __func__, 459 td_desc); 460 td_free_desc(td_desc); 461 } 462 } 463 464 static enum dma_status td_tx_status(struct dma_chan *chan, dma_cookie_t cookie, 465 struct dma_tx_state *txstate) 466 { 467 enum dma_status ret; 468 469 dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); 470 471 ret = dma_cookie_status(chan, cookie, txstate); 472 473 dev_dbg(chan2dev(chan), "%s: exit, ret: %d\n", __func__, ret); 474 475 return ret; 476 } 477 478 static void td_issue_pending(struct dma_chan *chan) 479 { 480 struct timb_dma_chan *td_chan = 481 container_of(chan, struct timb_dma_chan, chan); 482 483 dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); 484 spin_lock_bh(&td_chan->lock); 485 486 if (!list_empty(&td_chan->active_list)) 487 /* transfer ongoing */ 488 if (__td_dma_done_ack(td_chan)) 489 __td_finish(td_chan); 490 491 if (list_empty(&td_chan->active_list) && !list_empty(&td_chan->queue)) 492 __td_start_next(td_chan); 493 494 spin_unlock_bh(&td_chan->lock); 495 } 496 497 static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan, 498 struct scatterlist *sgl, unsigned int sg_len, 499 enum dma_transfer_direction direction, unsigned long flags, 500 void *context) 501 { 502 struct timb_dma_chan *td_chan = 503 container_of(chan, struct timb_dma_chan, chan); 504 struct timb_dma_desc *td_desc; 505 struct scatterlist *sg; 506 unsigned int i; 507 unsigned int desc_usage = 0; 508 509 if (!sgl || !sg_len) { 510 dev_err(chan2dev(chan), "%s: No SG list\n", __func__); 511 return NULL; 512 } 513 514 /* even channels are for RX, odd for TX */ 515 if (td_chan->direction != direction) { 516 dev_err(chan2dev(chan), 517 "Requesting channel in wrong direction\n"); 518 return NULL; 519 } 520 521 td_desc = td_desc_get(td_chan); 522 if (!td_desc) { 523 dev_err(chan2dev(chan), "Not enough descriptors available\n"); 524 return NULL; 525 } 526 527 td_desc->interrupt = (flags & DMA_PREP_INTERRUPT) != 0; 528 529 for_each_sg(sgl, sg, sg_len, i) { 530 int err; 531 if (desc_usage > td_desc->desc_list_len) { 532 dev_err(chan2dev(chan), "No descriptor space\n"); 533 return NULL; 534 } 535 536 err = td_fill_desc(td_chan, td_desc->desc_list + desc_usage, sg, 537 i == (sg_len - 1)); 538 if (err) { 539 dev_err(chan2dev(chan), "Failed to update desc: %d\n", 540 err); 541 td_desc_put(td_chan, td_desc); 542 return NULL; 543 } 544 desc_usage += TIMB_DMA_DESC_SIZE; 545 } 546 547 dma_sync_single_for_device(chan2dmadev(chan), td_desc->txd.phys, 548 td_desc->desc_list_len, DMA_TO_DEVICE); 549 550 return &td_desc->txd; 551 } 552 553 static int td_terminate_all(struct dma_chan *chan) 554 { 555 struct timb_dma_chan *td_chan = 556 container_of(chan, struct timb_dma_chan, chan); 557 struct timb_dma_desc *td_desc, *_td_desc; 558 559 dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); 560 561 /* first the easy part, put the queue into the free list */ 562 spin_lock_bh(&td_chan->lock); 563 list_for_each_entry_safe(td_desc, _td_desc, &td_chan->queue, 564 desc_node) 565 list_move(&td_desc->desc_node, &td_chan->free_list); 566 567 /* now tear down the running */ 568 __td_finish(td_chan); 569 spin_unlock_bh(&td_chan->lock); 570 571 return 0; 572 } 573 574 static void td_tasklet(unsigned long data) 575 { 576 struct timb_dma *td = (struct timb_dma *)data; 577 u32 isr; 578 u32 ipr; 579 u32 ier; 580 int i; 581 582 isr = ioread32(td->membase + TIMBDMA_ISR); 583 ipr = isr & __td_ier_mask(td); 584 585 /* ack the interrupts */ 586 iowrite32(ipr, td->membase + TIMBDMA_ISR); 587 588 for (i = 0; i < td->dma.chancnt; i++) 589 if (ipr & (1 << i)) { 590 struct timb_dma_chan *td_chan = td->channels + i; 591 spin_lock(&td_chan->lock); 592 __td_finish(td_chan); 593 if (!list_empty(&td_chan->queue)) 594 __td_start_next(td_chan); 595 spin_unlock(&td_chan->lock); 596 } 597 598 ier = __td_ier_mask(td); 599 iowrite32(ier, td->membase + TIMBDMA_IER); 600 } 601 602 603 static irqreturn_t td_irq(int irq, void *devid) 604 { 605 struct timb_dma *td = devid; 606 u32 ipr = ioread32(td->membase + TIMBDMA_IPR); 607 608 if (ipr) { 609 /* disable interrupts, will be re-enabled in tasklet */ 610 iowrite32(0, td->membase + TIMBDMA_IER); 611 612 tasklet_schedule(&td->tasklet); 613 614 return IRQ_HANDLED; 615 } else 616 return IRQ_NONE; 617 } 618 619 620 static int td_probe(struct platform_device *pdev) 621 { 622 struct timb_dma_platform_data *pdata = dev_get_platdata(&pdev->dev); 623 struct timb_dma *td; 624 struct resource *iomem; 625 int irq; 626 int err; 627 int i; 628 629 if (!pdata) { 630 dev_err(&pdev->dev, "No platform data\n"); 631 return -EINVAL; 632 } 633 634 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 635 if (!iomem) 636 return -EINVAL; 637 638 irq = platform_get_irq(pdev, 0); 639 if (irq < 0) 640 return irq; 641 642 if (!request_mem_region(iomem->start, resource_size(iomem), 643 DRIVER_NAME)) 644 return -EBUSY; 645 646 td = kzalloc(sizeof(struct timb_dma) + 647 sizeof(struct timb_dma_chan) * pdata->nr_channels, GFP_KERNEL); 648 if (!td) { 649 err = -ENOMEM; 650 goto err_release_region; 651 } 652 653 dev_dbg(&pdev->dev, "Allocated TD: %p\n", td); 654 655 td->membase = ioremap(iomem->start, resource_size(iomem)); 656 if (!td->membase) { 657 dev_err(&pdev->dev, "Failed to remap I/O memory\n"); 658 err = -ENOMEM; 659 goto err_free_mem; 660 } 661 662 /* 32bit addressing */ 663 iowrite32(TIMBDMA_32BIT_ADDR, td->membase + TIMBDMA_ACR); 664 665 /* disable and clear any interrupts */ 666 iowrite32(0x0, td->membase + TIMBDMA_IER); 667 iowrite32(0xFFFFFFFF, td->membase + TIMBDMA_ISR); 668 669 tasklet_init(&td->tasklet, td_tasklet, (unsigned long)td); 670 671 err = request_irq(irq, td_irq, IRQF_SHARED, DRIVER_NAME, td); 672 if (err) { 673 dev_err(&pdev->dev, "Failed to request IRQ\n"); 674 goto err_tasklet_kill; 675 } 676 677 td->dma.device_alloc_chan_resources = td_alloc_chan_resources; 678 td->dma.device_free_chan_resources = td_free_chan_resources; 679 td->dma.device_tx_status = td_tx_status; 680 td->dma.device_issue_pending = td_issue_pending; 681 682 dma_cap_set(DMA_SLAVE, td->dma.cap_mask); 683 dma_cap_set(DMA_PRIVATE, td->dma.cap_mask); 684 td->dma.device_prep_slave_sg = td_prep_slave_sg; 685 td->dma.device_terminate_all = td_terminate_all; 686 687 td->dma.dev = &pdev->dev; 688 689 INIT_LIST_HEAD(&td->dma.channels); 690 691 for (i = 0; i < pdata->nr_channels; i++) { 692 struct timb_dma_chan *td_chan = &td->channels[i]; 693 struct timb_dma_platform_data_channel *pchan = 694 pdata->channels + i; 695 696 /* even channels are RX, odd are TX */ 697 if ((i % 2) == pchan->rx) { 698 dev_err(&pdev->dev, "Wrong channel configuration\n"); 699 err = -EINVAL; 700 goto err_free_irq; 701 } 702 703 td_chan->chan.device = &td->dma; 704 dma_cookie_init(&td_chan->chan); 705 spin_lock_init(&td_chan->lock); 706 INIT_LIST_HEAD(&td_chan->active_list); 707 INIT_LIST_HEAD(&td_chan->queue); 708 INIT_LIST_HEAD(&td_chan->free_list); 709 710 td_chan->descs = pchan->descriptors; 711 td_chan->desc_elems = pchan->descriptor_elements; 712 td_chan->bytes_per_line = pchan->bytes_per_line; 713 td_chan->direction = pchan->rx ? DMA_DEV_TO_MEM : 714 DMA_MEM_TO_DEV; 715 716 td_chan->membase = td->membase + 717 (i / 2) * TIMBDMA_INSTANCE_OFFSET + 718 (pchan->rx ? 0 : TIMBDMA_INSTANCE_TX_OFFSET); 719 720 dev_dbg(&pdev->dev, "Chan: %d, membase: %p\n", 721 i, td_chan->membase); 722 723 list_add_tail(&td_chan->chan.device_node, &td->dma.channels); 724 } 725 726 err = dma_async_device_register(&td->dma); 727 if (err) { 728 dev_err(&pdev->dev, "Failed to register async device\n"); 729 goto err_free_irq; 730 } 731 732 platform_set_drvdata(pdev, td); 733 734 dev_dbg(&pdev->dev, "Probe result: %d\n", err); 735 return err; 736 737 err_free_irq: 738 free_irq(irq, td); 739 err_tasklet_kill: 740 tasklet_kill(&td->tasklet); 741 iounmap(td->membase); 742 err_free_mem: 743 kfree(td); 744 err_release_region: 745 release_mem_region(iomem->start, resource_size(iomem)); 746 747 return err; 748 749 } 750 751 static int td_remove(struct platform_device *pdev) 752 { 753 struct timb_dma *td = platform_get_drvdata(pdev); 754 struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 755 int irq = platform_get_irq(pdev, 0); 756 757 dma_async_device_unregister(&td->dma); 758 free_irq(irq, td); 759 tasklet_kill(&td->tasklet); 760 iounmap(td->membase); 761 kfree(td); 762 release_mem_region(iomem->start, resource_size(iomem)); 763 764 dev_dbg(&pdev->dev, "Removed...\n"); 765 return 0; 766 } 767 768 static struct platform_driver td_driver = { 769 .driver = { 770 .name = DRIVER_NAME, 771 }, 772 .probe = td_probe, 773 .remove = td_remove, 774 }; 775 776 module_platform_driver(td_driver); 777 778 MODULE_LICENSE("GPL v2"); 779 MODULE_DESCRIPTION("Timberdale DMA controller driver"); 780 MODULE_AUTHOR("Pelagicore AB <info@pelagicore.com>"); 781 MODULE_ALIAS("platform:"DRIVER_NAME); 782