1 /* 2 * BCM2835 DMA engine support 3 * 4 * This driver only supports cyclic DMA transfers 5 * as needed for the I2S module. 6 * 7 * Author: Florian Meier <florian.meier@koalo.de> 8 * Copyright 2013 9 * 10 * Based on 11 * OMAP DMAengine support by Russell King 12 * 13 * BCM2708 DMA Driver 14 * Copyright (C) 2010 Broadcom 15 * 16 * Raspberry Pi PCM I2S ALSA Driver 17 * Copyright (c) by Phil Poole 2013 18 * 19 * MARVELL MMP Peripheral DMA Driver 20 * Copyright 2012 Marvell International Ltd. 21 * 22 * This program is free software; you can redistribute it and/or modify 23 * it under the terms of the GNU General Public License as published by 24 * the Free Software Foundation; either version 2 of the License, or 25 * (at your option) any later version. 26 * 27 * This program is distributed in the hope that it will be useful, 28 * but WITHOUT ANY WARRANTY; without even the implied warranty of 29 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 30 * GNU General Public License for more details. 31 */ 32 #include <linux/dmaengine.h> 33 #include <linux/dma-mapping.h> 34 #include <linux/dmapool.h> 35 #include <linux/err.h> 36 #include <linux/init.h> 37 #include <linux/interrupt.h> 38 #include <linux/list.h> 39 #include <linux/module.h> 40 #include <linux/platform_device.h> 41 #include <linux/slab.h> 42 #include <linux/io.h> 43 #include <linux/spinlock.h> 44 #include <linux/of.h> 45 #include <linux/of_dma.h> 46 47 #include "virt-dma.h" 48 49 struct bcm2835_dmadev { 50 struct dma_device ddev; 51 spinlock_t lock; 52 void __iomem *base; 53 struct device_dma_parameters dma_parms; 54 }; 55 56 struct bcm2835_dma_cb { 57 uint32_t info; 58 uint32_t src; 59 uint32_t dst; 60 uint32_t length; 61 uint32_t stride; 62 uint32_t next; 63 uint32_t pad[2]; 64 }; 65 66 struct bcm2835_cb_entry { 67 struct bcm2835_dma_cb *cb; 68 dma_addr_t paddr; 69 }; 70 71 struct bcm2835_chan { 72 struct virt_dma_chan vc; 73 struct list_head node; 74 75 struct dma_slave_config cfg; 76 bool cyclic; 77 unsigned int dreq; 78 79 int ch; 80 struct bcm2835_desc *desc; 81 struct dma_pool *cb_pool; 82 83 void __iomem *chan_base; 84 int irq_number; 85 }; 86 87 struct bcm2835_desc { 88 struct bcm2835_chan *c; 89 struct virt_dma_desc vd; 90 enum dma_transfer_direction dir; 91 92 struct bcm2835_cb_entry *cb_list; 93 94 unsigned int frames; 95 size_t size; 96 }; 97 98 #define BCM2835_DMA_CS 0x00 99 #define BCM2835_DMA_ADDR 0x04 100 #define BCM2835_DMA_SOURCE_AD 0x0c 101 #define BCM2835_DMA_DEST_AD 0x10 102 #define BCM2835_DMA_NEXTCB 0x1C 103 104 /* DMA CS Control and Status bits */ 105 #define BCM2835_DMA_ACTIVE BIT(0) 106 #define BCM2835_DMA_INT BIT(2) 107 #define BCM2835_DMA_ISPAUSED BIT(4) /* Pause requested or not active */ 108 #define BCM2835_DMA_ISHELD BIT(5) /* Is held by DREQ flow control */ 109 #define BCM2835_DMA_ERR BIT(8) 110 #define BCM2835_DMA_ABORT BIT(30) /* Stop current CB, go to next, WO */ 111 #define BCM2835_DMA_RESET BIT(31) /* WO, self clearing */ 112 113 #define BCM2835_DMA_INT_EN BIT(0) 114 #define BCM2835_DMA_D_INC BIT(4) 115 #define BCM2835_DMA_D_DREQ BIT(6) 116 #define BCM2835_DMA_S_INC BIT(8) 117 #define BCM2835_DMA_S_DREQ BIT(10) 118 119 #define BCM2835_DMA_PER_MAP(x) ((x) << 16) 120 121 #define BCM2835_DMA_DATA_TYPE_S8 1 122 #define BCM2835_DMA_DATA_TYPE_S16 2 123 #define BCM2835_DMA_DATA_TYPE_S32 4 124 #define BCM2835_DMA_DATA_TYPE_S128 16 125 126 #define BCM2835_DMA_BULK_MASK BIT(0) 127 #define BCM2835_DMA_FIQ_MASK (BIT(2) | BIT(3)) 128 129 /* Valid only for channels 0 - 14, 15 has its own base address */ 130 #define BCM2835_DMA_CHAN(n) ((n) << 8) /* Base address */ 131 #define BCM2835_DMA_CHANIO(base, n) ((base) + BCM2835_DMA_CHAN(n)) 132 133 static inline struct bcm2835_dmadev *to_bcm2835_dma_dev(struct dma_device *d) 134 { 135 return container_of(d, struct bcm2835_dmadev, ddev); 136 } 137 138 static inline struct bcm2835_chan *to_bcm2835_dma_chan(struct dma_chan *c) 139 { 140 return container_of(c, struct bcm2835_chan, vc.chan); 141 } 142 143 static inline struct bcm2835_desc *to_bcm2835_dma_desc( 144 struct dma_async_tx_descriptor *t) 145 { 146 return container_of(t, struct bcm2835_desc, vd.tx); 147 } 148 149 static void bcm2835_dma_desc_free(struct virt_dma_desc *vd) 150 { 151 struct bcm2835_desc *desc = container_of(vd, struct bcm2835_desc, vd); 152 int i; 153 154 for (i = 0; i < desc->frames; i++) 155 dma_pool_free(desc->c->cb_pool, desc->cb_list[i].cb, 156 desc->cb_list[i].paddr); 157 158 kfree(desc->cb_list); 159 kfree(desc); 160 } 161 162 static int bcm2835_dma_abort(void __iomem *chan_base) 163 { 164 unsigned long cs; 165 long int timeout = 10000; 166 167 cs = readl(chan_base + BCM2835_DMA_CS); 168 if (!(cs & BCM2835_DMA_ACTIVE)) 169 return 0; 170 171 /* Write 0 to the active bit - Pause the DMA */ 172 writel(0, chan_base + BCM2835_DMA_CS); 173 174 /* Wait for any current AXI transfer to complete */ 175 while ((cs & BCM2835_DMA_ISPAUSED) && --timeout) { 176 cpu_relax(); 177 cs = readl(chan_base + BCM2835_DMA_CS); 178 } 179 180 /* We'll un-pause when we set of our next DMA */ 181 if (!timeout) 182 return -ETIMEDOUT; 183 184 if (!(cs & BCM2835_DMA_ACTIVE)) 185 return 0; 186 187 /* Terminate the control block chain */ 188 writel(0, chan_base + BCM2835_DMA_NEXTCB); 189 190 /* Abort the whole DMA */ 191 writel(BCM2835_DMA_ABORT | BCM2835_DMA_ACTIVE, 192 chan_base + BCM2835_DMA_CS); 193 194 return 0; 195 } 196 197 static void bcm2835_dma_start_desc(struct bcm2835_chan *c) 198 { 199 struct virt_dma_desc *vd = vchan_next_desc(&c->vc); 200 struct bcm2835_desc *d; 201 202 if (!vd) { 203 c->desc = NULL; 204 return; 205 } 206 207 list_del(&vd->node); 208 209 c->desc = d = to_bcm2835_dma_desc(&vd->tx); 210 211 writel(d->cb_list[0].paddr, c->chan_base + BCM2835_DMA_ADDR); 212 writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS); 213 } 214 215 static irqreturn_t bcm2835_dma_callback(int irq, void *data) 216 { 217 struct bcm2835_chan *c = data; 218 struct bcm2835_desc *d; 219 unsigned long flags; 220 221 spin_lock_irqsave(&c->vc.lock, flags); 222 223 /* Acknowledge interrupt */ 224 writel(BCM2835_DMA_INT, c->chan_base + BCM2835_DMA_CS); 225 226 d = c->desc; 227 228 if (d) { 229 /* TODO Only works for cyclic DMA */ 230 vchan_cyclic_callback(&d->vd); 231 } 232 233 /* Keep the DMA engine running */ 234 writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS); 235 236 spin_unlock_irqrestore(&c->vc.lock, flags); 237 238 return IRQ_HANDLED; 239 } 240 241 static int bcm2835_dma_alloc_chan_resources(struct dma_chan *chan) 242 { 243 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); 244 struct device *dev = c->vc.chan.device->dev; 245 246 dev_dbg(dev, "Allocating DMA channel %d\n", c->ch); 247 248 c->cb_pool = dma_pool_create(dev_name(dev), dev, 249 sizeof(struct bcm2835_dma_cb), 0, 0); 250 if (!c->cb_pool) { 251 dev_err(dev, "unable to allocate descriptor pool\n"); 252 return -ENOMEM; 253 } 254 255 return request_irq(c->irq_number, 256 bcm2835_dma_callback, 0, "DMA IRQ", c); 257 } 258 259 static void bcm2835_dma_free_chan_resources(struct dma_chan *chan) 260 { 261 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); 262 263 vchan_free_chan_resources(&c->vc); 264 free_irq(c->irq_number, c); 265 dma_pool_destroy(c->cb_pool); 266 267 dev_dbg(c->vc.chan.device->dev, "Freeing DMA channel %u\n", c->ch); 268 } 269 270 static size_t bcm2835_dma_desc_size(struct bcm2835_desc *d) 271 { 272 return d->size; 273 } 274 275 static size_t bcm2835_dma_desc_size_pos(struct bcm2835_desc *d, dma_addr_t addr) 276 { 277 unsigned int i; 278 size_t size; 279 280 for (size = i = 0; i < d->frames; i++) { 281 struct bcm2835_dma_cb *control_block = d->cb_list[i].cb; 282 size_t this_size = control_block->length; 283 dma_addr_t dma; 284 285 if (d->dir == DMA_DEV_TO_MEM) 286 dma = control_block->dst; 287 else 288 dma = control_block->src; 289 290 if (size) 291 size += this_size; 292 else if (addr >= dma && addr < dma + this_size) 293 size += dma + this_size - addr; 294 } 295 296 return size; 297 } 298 299 static enum dma_status bcm2835_dma_tx_status(struct dma_chan *chan, 300 dma_cookie_t cookie, struct dma_tx_state *txstate) 301 { 302 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); 303 struct virt_dma_desc *vd; 304 enum dma_status ret; 305 unsigned long flags; 306 307 ret = dma_cookie_status(chan, cookie, txstate); 308 if (ret == DMA_COMPLETE || !txstate) 309 return ret; 310 311 spin_lock_irqsave(&c->vc.lock, flags); 312 vd = vchan_find_desc(&c->vc, cookie); 313 if (vd) { 314 txstate->residue = 315 bcm2835_dma_desc_size(to_bcm2835_dma_desc(&vd->tx)); 316 } else if (c->desc && c->desc->vd.tx.cookie == cookie) { 317 struct bcm2835_desc *d = c->desc; 318 dma_addr_t pos; 319 320 if (d->dir == DMA_MEM_TO_DEV) 321 pos = readl(c->chan_base + BCM2835_DMA_SOURCE_AD); 322 else if (d->dir == DMA_DEV_TO_MEM) 323 pos = readl(c->chan_base + BCM2835_DMA_DEST_AD); 324 else 325 pos = 0; 326 327 txstate->residue = bcm2835_dma_desc_size_pos(d, pos); 328 } else { 329 txstate->residue = 0; 330 } 331 332 spin_unlock_irqrestore(&c->vc.lock, flags); 333 334 return ret; 335 } 336 337 static void bcm2835_dma_issue_pending(struct dma_chan *chan) 338 { 339 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); 340 unsigned long flags; 341 342 c->cyclic = true; /* Nothing else is implemented */ 343 344 spin_lock_irqsave(&c->vc.lock, flags); 345 if (vchan_issue_pending(&c->vc) && !c->desc) 346 bcm2835_dma_start_desc(c); 347 348 spin_unlock_irqrestore(&c->vc.lock, flags); 349 } 350 351 static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic( 352 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, 353 size_t period_len, enum dma_transfer_direction direction, 354 unsigned long flags) 355 { 356 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); 357 enum dma_slave_buswidth dev_width; 358 struct bcm2835_desc *d; 359 dma_addr_t dev_addr; 360 unsigned int es, sync_type; 361 unsigned int frame; 362 int i; 363 364 /* Grab configuration */ 365 if (!is_slave_direction(direction)) { 366 dev_err(chan->device->dev, "%s: bad direction?\n", __func__); 367 return NULL; 368 } 369 370 if (direction == DMA_DEV_TO_MEM) { 371 dev_addr = c->cfg.src_addr; 372 dev_width = c->cfg.src_addr_width; 373 sync_type = BCM2835_DMA_S_DREQ; 374 } else { 375 dev_addr = c->cfg.dst_addr; 376 dev_width = c->cfg.dst_addr_width; 377 sync_type = BCM2835_DMA_D_DREQ; 378 } 379 380 /* Bus width translates to the element size (ES) */ 381 switch (dev_width) { 382 case DMA_SLAVE_BUSWIDTH_4_BYTES: 383 es = BCM2835_DMA_DATA_TYPE_S32; 384 break; 385 default: 386 return NULL; 387 } 388 389 /* Now allocate and setup the descriptor. */ 390 d = kzalloc(sizeof(*d), GFP_NOWAIT); 391 if (!d) 392 return NULL; 393 394 d->c = c; 395 d->dir = direction; 396 d->frames = buf_len / period_len; 397 398 d->cb_list = kcalloc(d->frames, sizeof(*d->cb_list), GFP_KERNEL); 399 if (!d->cb_list) { 400 kfree(d); 401 return NULL; 402 } 403 /* Allocate memory for control blocks */ 404 for (i = 0; i < d->frames; i++) { 405 struct bcm2835_cb_entry *cb_entry = &d->cb_list[i]; 406 407 cb_entry->cb = dma_pool_zalloc(c->cb_pool, GFP_ATOMIC, 408 &cb_entry->paddr); 409 if (!cb_entry->cb) 410 goto error_cb; 411 } 412 413 /* 414 * Iterate over all frames, create a control block 415 * for each frame and link them together. 416 */ 417 for (frame = 0; frame < d->frames; frame++) { 418 struct bcm2835_dma_cb *control_block = d->cb_list[frame].cb; 419 420 /* Setup adresses */ 421 if (d->dir == DMA_DEV_TO_MEM) { 422 control_block->info = BCM2835_DMA_D_INC; 423 control_block->src = dev_addr; 424 control_block->dst = buf_addr + frame * period_len; 425 } else { 426 control_block->info = BCM2835_DMA_S_INC; 427 control_block->src = buf_addr + frame * period_len; 428 control_block->dst = dev_addr; 429 } 430 431 /* Enable interrupt */ 432 control_block->info |= BCM2835_DMA_INT_EN; 433 434 /* Setup synchronization */ 435 if (sync_type != 0) 436 control_block->info |= sync_type; 437 438 /* Setup DREQ channel */ 439 if (c->dreq != 0) 440 control_block->info |= 441 BCM2835_DMA_PER_MAP(c->dreq); 442 443 /* Length of a frame */ 444 control_block->length = period_len; 445 d->size += control_block->length; 446 447 /* 448 * Next block is the next frame. 449 * This DMA engine driver currently only supports cyclic DMA. 450 * Therefore, wrap around at number of frames. 451 */ 452 control_block->next = d->cb_list[((frame + 1) % d->frames)].paddr; 453 } 454 455 return vchan_tx_prep(&c->vc, &d->vd, flags); 456 error_cb: 457 i--; 458 for (; i >= 0; i--) { 459 struct bcm2835_cb_entry *cb_entry = &d->cb_list[i]; 460 461 dma_pool_free(c->cb_pool, cb_entry->cb, cb_entry->paddr); 462 } 463 464 kfree(d->cb_list); 465 kfree(d); 466 return NULL; 467 } 468 469 static int bcm2835_dma_slave_config(struct dma_chan *chan, 470 struct dma_slave_config *cfg) 471 { 472 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); 473 474 if ((cfg->direction == DMA_DEV_TO_MEM && 475 cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) || 476 (cfg->direction == DMA_MEM_TO_DEV && 477 cfg->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) || 478 !is_slave_direction(cfg->direction)) { 479 return -EINVAL; 480 } 481 482 c->cfg = *cfg; 483 484 return 0; 485 } 486 487 static int bcm2835_dma_terminate_all(struct dma_chan *chan) 488 { 489 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); 490 struct bcm2835_dmadev *d = to_bcm2835_dma_dev(c->vc.chan.device); 491 unsigned long flags; 492 int timeout = 10000; 493 LIST_HEAD(head); 494 495 spin_lock_irqsave(&c->vc.lock, flags); 496 497 /* Prevent this channel being scheduled */ 498 spin_lock(&d->lock); 499 list_del_init(&c->node); 500 spin_unlock(&d->lock); 501 502 /* 503 * Stop DMA activity: we assume the callback will not be called 504 * after bcm_dma_abort() returns (even if it does, it will see 505 * c->desc is NULL and exit.) 506 */ 507 if (c->desc) { 508 bcm2835_dma_desc_free(&c->desc->vd); 509 c->desc = NULL; 510 bcm2835_dma_abort(c->chan_base); 511 512 /* Wait for stopping */ 513 while (--timeout) { 514 if (!(readl(c->chan_base + BCM2835_DMA_CS) & 515 BCM2835_DMA_ACTIVE)) 516 break; 517 518 cpu_relax(); 519 } 520 521 if (!timeout) 522 dev_err(d->ddev.dev, "DMA transfer could not be terminated\n"); 523 } 524 525 vchan_get_all_descriptors(&c->vc, &head); 526 spin_unlock_irqrestore(&c->vc.lock, flags); 527 vchan_dma_desc_free_list(&c->vc, &head); 528 529 return 0; 530 } 531 532 static int bcm2835_dma_chan_init(struct bcm2835_dmadev *d, int chan_id, int irq) 533 { 534 struct bcm2835_chan *c; 535 536 c = devm_kzalloc(d->ddev.dev, sizeof(*c), GFP_KERNEL); 537 if (!c) 538 return -ENOMEM; 539 540 c->vc.desc_free = bcm2835_dma_desc_free; 541 vchan_init(&c->vc, &d->ddev); 542 INIT_LIST_HEAD(&c->node); 543 544 c->chan_base = BCM2835_DMA_CHANIO(d->base, chan_id); 545 c->ch = chan_id; 546 c->irq_number = irq; 547 548 return 0; 549 } 550 551 static void bcm2835_dma_free(struct bcm2835_dmadev *od) 552 { 553 struct bcm2835_chan *c, *next; 554 555 list_for_each_entry_safe(c, next, &od->ddev.channels, 556 vc.chan.device_node) { 557 list_del(&c->vc.chan.device_node); 558 tasklet_kill(&c->vc.task); 559 } 560 } 561 562 static const struct of_device_id bcm2835_dma_of_match[] = { 563 { .compatible = "brcm,bcm2835-dma", }, 564 {}, 565 }; 566 MODULE_DEVICE_TABLE(of, bcm2835_dma_of_match); 567 568 static struct dma_chan *bcm2835_dma_xlate(struct of_phandle_args *spec, 569 struct of_dma *ofdma) 570 { 571 struct bcm2835_dmadev *d = ofdma->of_dma_data; 572 struct dma_chan *chan; 573 574 chan = dma_get_any_slave_channel(&d->ddev); 575 if (!chan) 576 return NULL; 577 578 /* Set DREQ from param */ 579 to_bcm2835_dma_chan(chan)->dreq = spec->args[0]; 580 581 return chan; 582 } 583 584 static int bcm2835_dma_probe(struct platform_device *pdev) 585 { 586 struct bcm2835_dmadev *od; 587 struct resource *res; 588 void __iomem *base; 589 int rc; 590 int i; 591 int irq; 592 uint32_t chans_available; 593 594 if (!pdev->dev.dma_mask) 595 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; 596 597 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 598 if (rc) 599 return rc; 600 601 od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL); 602 if (!od) 603 return -ENOMEM; 604 605 pdev->dev.dma_parms = &od->dma_parms; 606 dma_set_max_seg_size(&pdev->dev, 0x3FFFFFFF); 607 608 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 609 base = devm_ioremap_resource(&pdev->dev, res); 610 if (IS_ERR(base)) 611 return PTR_ERR(base); 612 613 od->base = base; 614 615 dma_cap_set(DMA_SLAVE, od->ddev.cap_mask); 616 dma_cap_set(DMA_PRIVATE, od->ddev.cap_mask); 617 dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask); 618 od->ddev.device_alloc_chan_resources = bcm2835_dma_alloc_chan_resources; 619 od->ddev.device_free_chan_resources = bcm2835_dma_free_chan_resources; 620 od->ddev.device_tx_status = bcm2835_dma_tx_status; 621 od->ddev.device_issue_pending = bcm2835_dma_issue_pending; 622 od->ddev.device_prep_dma_cyclic = bcm2835_dma_prep_dma_cyclic; 623 od->ddev.device_config = bcm2835_dma_slave_config; 624 od->ddev.device_terminate_all = bcm2835_dma_terminate_all; 625 od->ddev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); 626 od->ddev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); 627 od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); 628 od->ddev.dev = &pdev->dev; 629 INIT_LIST_HEAD(&od->ddev.channels); 630 spin_lock_init(&od->lock); 631 632 platform_set_drvdata(pdev, od); 633 634 /* Request DMA channel mask from device tree */ 635 if (of_property_read_u32(pdev->dev.of_node, 636 "brcm,dma-channel-mask", 637 &chans_available)) { 638 dev_err(&pdev->dev, "Failed to get channel mask\n"); 639 rc = -EINVAL; 640 goto err_no_dma; 641 } 642 643 /* 644 * Do not use the FIQ and BULK channels, 645 * because they are used by the GPU. 646 */ 647 chans_available &= ~(BCM2835_DMA_FIQ_MASK | BCM2835_DMA_BULK_MASK); 648 649 for (i = 0; i < pdev->num_resources; i++) { 650 irq = platform_get_irq(pdev, i); 651 if (irq < 0) 652 break; 653 654 if (chans_available & (1 << i)) { 655 rc = bcm2835_dma_chan_init(od, i, irq); 656 if (rc) 657 goto err_no_dma; 658 } 659 } 660 661 dev_dbg(&pdev->dev, "Initialized %i DMA channels\n", i); 662 663 /* Device-tree DMA controller registration */ 664 rc = of_dma_controller_register(pdev->dev.of_node, 665 bcm2835_dma_xlate, od); 666 if (rc) { 667 dev_err(&pdev->dev, "Failed to register DMA controller\n"); 668 goto err_no_dma; 669 } 670 671 rc = dma_async_device_register(&od->ddev); 672 if (rc) { 673 dev_err(&pdev->dev, 674 "Failed to register slave DMA engine device: %d\n", rc); 675 goto err_no_dma; 676 } 677 678 dev_dbg(&pdev->dev, "Load BCM2835 DMA engine driver\n"); 679 680 return 0; 681 682 err_no_dma: 683 bcm2835_dma_free(od); 684 return rc; 685 } 686 687 static int bcm2835_dma_remove(struct platform_device *pdev) 688 { 689 struct bcm2835_dmadev *od = platform_get_drvdata(pdev); 690 691 dma_async_device_unregister(&od->ddev); 692 bcm2835_dma_free(od); 693 694 return 0; 695 } 696 697 static struct platform_driver bcm2835_dma_driver = { 698 .probe = bcm2835_dma_probe, 699 .remove = bcm2835_dma_remove, 700 .driver = { 701 .name = "bcm2835-dma", 702 .of_match_table = of_match_ptr(bcm2835_dma_of_match), 703 }, 704 }; 705 706 module_platform_driver(bcm2835_dma_driver); 707 708 MODULE_ALIAS("platform:bcm2835-dma"); 709 MODULE_DESCRIPTION("BCM2835 DMA engine driver"); 710 MODULE_AUTHOR("Florian Meier <florian.meier@koalo.de>"); 711 MODULE_LICENSE("GPL v2"); 712