1 /* 2 * Driver for the Analog Devices AXI-DMAC core 3 * 4 * Copyright 2013-2015 Analog Devices Inc. 5 * Author: Lars-Peter Clausen <lars@metafoo.de> 6 * 7 * Licensed under the GPL-2. 8 */ 9 10 #include <linux/clk.h> 11 #include <linux/device.h> 12 #include <linux/dma-mapping.h> 13 #include <linux/dmaengine.h> 14 #include <linux/err.h> 15 #include <linux/interrupt.h> 16 #include <linux/io.h> 17 #include <linux/kernel.h> 18 #include <linux/module.h> 19 #include <linux/of.h> 20 #include <linux/of_dma.h> 21 #include <linux/platform_device.h> 22 #include <linux/slab.h> 23 24 #include <dt-bindings/dma/axi-dmac.h> 25 26 #include "dmaengine.h" 27 #include "virt-dma.h" 28 29 /* 30 * The AXI-DMAC is a soft IP core that is used in FPGA designs. The core has 31 * various instantiation parameters which decided the exact feature set support 32 * by the core. 33 * 34 * Each channel of the core has a source interface and a destination interface. 35 * The number of channels and the type of the channel interfaces is selected at 36 * configuration time. A interface can either be a connected to a central memory 37 * interconnect, which allows access to system memory, or it can be connected to 38 * a dedicated bus which is directly connected to a data port on a peripheral. 39 * Given that those are configuration options of the core that are selected when 40 * it is instantiated this means that they can not be changed by software at 41 * runtime. By extension this means that each channel is uni-directional. It can 42 * either be device to memory or memory to device, but not both. Also since the 43 * device side is a dedicated data bus only connected to a single peripheral 44 * there is no address than can or needs to be configured for the device side. 45 */ 46 47 #define AXI_DMAC_REG_IRQ_MASK 0x80 48 #define AXI_DMAC_REG_IRQ_PENDING 0x84 49 #define AXI_DMAC_REG_IRQ_SOURCE 0x88 50 51 #define AXI_DMAC_REG_CTRL 0x400 52 #define AXI_DMAC_REG_TRANSFER_ID 0x404 53 #define AXI_DMAC_REG_START_TRANSFER 0x408 54 #define AXI_DMAC_REG_FLAGS 0x40c 55 #define AXI_DMAC_REG_DEST_ADDRESS 0x410 56 #define AXI_DMAC_REG_SRC_ADDRESS 0x414 57 #define AXI_DMAC_REG_X_LENGTH 0x418 58 #define AXI_DMAC_REG_Y_LENGTH 0x41c 59 #define AXI_DMAC_REG_DEST_STRIDE 0x420 60 #define AXI_DMAC_REG_SRC_STRIDE 0x424 61 #define AXI_DMAC_REG_TRANSFER_DONE 0x428 62 #define AXI_DMAC_REG_ACTIVE_TRANSFER_ID 0x42c 63 #define AXI_DMAC_REG_STATUS 0x430 64 #define AXI_DMAC_REG_CURRENT_SRC_ADDR 0x434 65 #define AXI_DMAC_REG_CURRENT_DEST_ADDR 0x438 66 67 #define AXI_DMAC_CTRL_ENABLE BIT(0) 68 #define AXI_DMAC_CTRL_PAUSE BIT(1) 69 70 #define AXI_DMAC_IRQ_SOT BIT(0) 71 #define AXI_DMAC_IRQ_EOT BIT(1) 72 73 #define AXI_DMAC_FLAG_CYCLIC BIT(0) 74 75 struct axi_dmac_sg { 76 dma_addr_t src_addr; 77 dma_addr_t dest_addr; 78 unsigned int x_len; 79 unsigned int y_len; 80 unsigned int dest_stride; 81 unsigned int src_stride; 82 unsigned int id; 83 }; 84 85 struct axi_dmac_desc { 86 struct virt_dma_desc vdesc; 87 bool cyclic; 88 89 unsigned int num_submitted; 90 unsigned int num_completed; 91 unsigned int num_sgs; 92 struct axi_dmac_sg sg[]; 93 }; 94 95 struct axi_dmac_chan { 96 struct virt_dma_chan vchan; 97 98 struct axi_dmac_desc *next_desc; 99 struct list_head active_descs; 100 enum dma_transfer_direction direction; 101 102 unsigned int src_width; 103 unsigned int dest_width; 104 unsigned int src_type; 105 unsigned int dest_type; 106 107 unsigned int max_length; 108 unsigned int align_mask; 109 110 bool hw_cyclic; 111 bool hw_2d; 112 }; 113 114 struct axi_dmac { 115 void __iomem *base; 116 int irq; 117 118 struct clk *clk; 119 120 struct dma_device dma_dev; 121 struct axi_dmac_chan chan; 122 123 struct device_dma_parameters dma_parms; 124 }; 125 126 static struct axi_dmac *chan_to_axi_dmac(struct axi_dmac_chan *chan) 127 { 128 return container_of(chan->vchan.chan.device, struct axi_dmac, 129 dma_dev); 130 } 131 132 static struct axi_dmac_chan *to_axi_dmac_chan(struct dma_chan *c) 133 { 134 return container_of(c, struct axi_dmac_chan, vchan.chan); 135 } 136 137 static struct axi_dmac_desc *to_axi_dmac_desc(struct virt_dma_desc *vdesc) 138 { 139 return container_of(vdesc, struct axi_dmac_desc, vdesc); 140 } 141 142 static void axi_dmac_write(struct axi_dmac *axi_dmac, unsigned int reg, 143 unsigned int val) 144 { 145 writel(val, axi_dmac->base + reg); 146 } 147 148 static int axi_dmac_read(struct axi_dmac *axi_dmac, unsigned int reg) 149 { 150 return readl(axi_dmac->base + reg); 151 } 152 153 static int axi_dmac_src_is_mem(struct axi_dmac_chan *chan) 154 { 155 return chan->src_type == AXI_DMAC_BUS_TYPE_AXI_MM; 156 } 157 158 static int axi_dmac_dest_is_mem(struct axi_dmac_chan *chan) 159 { 160 return chan->dest_type == AXI_DMAC_BUS_TYPE_AXI_MM; 161 } 162 163 static bool axi_dmac_check_len(struct axi_dmac_chan *chan, unsigned int len) 164 { 165 if (len == 0 || len > chan->max_length) 166 return false; 167 if ((len & chan->align_mask) != 0) /* Not aligned */ 168 return false; 169 return true; 170 } 171 172 static bool axi_dmac_check_addr(struct axi_dmac_chan *chan, dma_addr_t addr) 173 { 174 if ((addr & chan->align_mask) != 0) /* Not aligned */ 175 return false; 176 return true; 177 } 178 179 static void axi_dmac_start_transfer(struct axi_dmac_chan *chan) 180 { 181 struct axi_dmac *dmac = chan_to_axi_dmac(chan); 182 struct virt_dma_desc *vdesc; 183 struct axi_dmac_desc *desc; 184 struct axi_dmac_sg *sg; 185 unsigned int flags = 0; 186 unsigned int val; 187 188 val = axi_dmac_read(dmac, AXI_DMAC_REG_START_TRANSFER); 189 if (val) /* Queue is full, wait for the next SOT IRQ */ 190 return; 191 192 desc = chan->next_desc; 193 194 if (!desc) { 195 vdesc = vchan_next_desc(&chan->vchan); 196 if (!vdesc) 197 return; 198 list_move_tail(&vdesc->node, &chan->active_descs); 199 desc = to_axi_dmac_desc(vdesc); 200 } 201 sg = &desc->sg[desc->num_submitted]; 202 203 desc->num_submitted++; 204 if (desc->num_submitted == desc->num_sgs) 205 chan->next_desc = NULL; 206 else 207 chan->next_desc = desc; 208 209 sg->id = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_ID); 210 211 if (axi_dmac_dest_is_mem(chan)) { 212 axi_dmac_write(dmac, AXI_DMAC_REG_DEST_ADDRESS, sg->dest_addr); 213 axi_dmac_write(dmac, AXI_DMAC_REG_DEST_STRIDE, sg->dest_stride); 214 } 215 216 if (axi_dmac_src_is_mem(chan)) { 217 axi_dmac_write(dmac, AXI_DMAC_REG_SRC_ADDRESS, sg->src_addr); 218 axi_dmac_write(dmac, AXI_DMAC_REG_SRC_STRIDE, sg->src_stride); 219 } 220 221 /* 222 * If the hardware supports cyclic transfers and there is no callback to 223 * call, enable hw cyclic mode to avoid unnecessary interrupts. 224 */ 225 if (chan->hw_cyclic && desc->cyclic && !desc->vdesc.tx.callback) 226 flags |= AXI_DMAC_FLAG_CYCLIC; 227 228 axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, sg->x_len - 1); 229 axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, sg->y_len - 1); 230 axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, flags); 231 axi_dmac_write(dmac, AXI_DMAC_REG_START_TRANSFER, 1); 232 } 233 234 static struct axi_dmac_desc *axi_dmac_active_desc(struct axi_dmac_chan *chan) 235 { 236 return list_first_entry_or_null(&chan->active_descs, 237 struct axi_dmac_desc, vdesc.node); 238 } 239 240 static void axi_dmac_transfer_done(struct axi_dmac_chan *chan, 241 unsigned int completed_transfers) 242 { 243 struct axi_dmac_desc *active; 244 struct axi_dmac_sg *sg; 245 246 active = axi_dmac_active_desc(chan); 247 if (!active) 248 return; 249 250 if (active->cyclic) { 251 vchan_cyclic_callback(&active->vdesc); 252 } else { 253 do { 254 sg = &active->sg[active->num_completed]; 255 if (!(BIT(sg->id) & completed_transfers)) 256 break; 257 active->num_completed++; 258 if (active->num_completed == active->num_sgs) { 259 list_del(&active->vdesc.node); 260 vchan_cookie_complete(&active->vdesc); 261 active = axi_dmac_active_desc(chan); 262 } 263 } while (active); 264 } 265 } 266 267 static irqreturn_t axi_dmac_interrupt_handler(int irq, void *devid) 268 { 269 struct axi_dmac *dmac = devid; 270 unsigned int pending; 271 272 pending = axi_dmac_read(dmac, AXI_DMAC_REG_IRQ_PENDING); 273 if (!pending) 274 return IRQ_NONE; 275 276 axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_PENDING, pending); 277 278 spin_lock(&dmac->chan.vchan.lock); 279 /* One or more transfers have finished */ 280 if (pending & AXI_DMAC_IRQ_EOT) { 281 unsigned int completed; 282 283 completed = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_DONE); 284 axi_dmac_transfer_done(&dmac->chan, completed); 285 } 286 /* Space has become available in the descriptor queue */ 287 if (pending & AXI_DMAC_IRQ_SOT) 288 axi_dmac_start_transfer(&dmac->chan); 289 spin_unlock(&dmac->chan.vchan.lock); 290 291 return IRQ_HANDLED; 292 } 293 294 static int axi_dmac_terminate_all(struct dma_chan *c) 295 { 296 struct axi_dmac_chan *chan = to_axi_dmac_chan(c); 297 struct axi_dmac *dmac = chan_to_axi_dmac(chan); 298 unsigned long flags; 299 LIST_HEAD(head); 300 301 spin_lock_irqsave(&chan->vchan.lock, flags); 302 axi_dmac_write(dmac, AXI_DMAC_REG_CTRL, 0); 303 chan->next_desc = NULL; 304 vchan_get_all_descriptors(&chan->vchan, &head); 305 list_splice_tail_init(&chan->active_descs, &head); 306 spin_unlock_irqrestore(&chan->vchan.lock, flags); 307 308 vchan_dma_desc_free_list(&chan->vchan, &head); 309 310 return 0; 311 } 312 313 static void axi_dmac_synchronize(struct dma_chan *c) 314 { 315 struct axi_dmac_chan *chan = to_axi_dmac_chan(c); 316 317 vchan_synchronize(&chan->vchan); 318 } 319 320 static void axi_dmac_issue_pending(struct dma_chan *c) 321 { 322 struct axi_dmac_chan *chan = to_axi_dmac_chan(c); 323 struct axi_dmac *dmac = chan_to_axi_dmac(chan); 324 unsigned long flags; 325 326 axi_dmac_write(dmac, AXI_DMAC_REG_CTRL, AXI_DMAC_CTRL_ENABLE); 327 328 spin_lock_irqsave(&chan->vchan.lock, flags); 329 if (vchan_issue_pending(&chan->vchan)) 330 axi_dmac_start_transfer(chan); 331 spin_unlock_irqrestore(&chan->vchan.lock, flags); 332 } 333 334 static struct axi_dmac_desc *axi_dmac_alloc_desc(unsigned int num_sgs) 335 { 336 struct axi_dmac_desc *desc; 337 338 desc = kzalloc(sizeof(struct axi_dmac_desc) + 339 sizeof(struct axi_dmac_sg) * num_sgs, GFP_NOWAIT); 340 if (!desc) 341 return NULL; 342 343 desc->num_sgs = num_sgs; 344 345 return desc; 346 } 347 348 static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg( 349 struct dma_chan *c, struct scatterlist *sgl, 350 unsigned int sg_len, enum dma_transfer_direction direction, 351 unsigned long flags, void *context) 352 { 353 struct axi_dmac_chan *chan = to_axi_dmac_chan(c); 354 struct axi_dmac_desc *desc; 355 struct scatterlist *sg; 356 unsigned int i; 357 358 if (direction != chan->direction) 359 return NULL; 360 361 desc = axi_dmac_alloc_desc(sg_len); 362 if (!desc) 363 return NULL; 364 365 for_each_sg(sgl, sg, sg_len, i) { 366 if (!axi_dmac_check_addr(chan, sg_dma_address(sg)) || 367 !axi_dmac_check_len(chan, sg_dma_len(sg))) { 368 kfree(desc); 369 return NULL; 370 } 371 372 if (direction == DMA_DEV_TO_MEM) 373 desc->sg[i].dest_addr = sg_dma_address(sg); 374 else 375 desc->sg[i].src_addr = sg_dma_address(sg); 376 desc->sg[i].x_len = sg_dma_len(sg); 377 desc->sg[i].y_len = 1; 378 } 379 380 desc->cyclic = false; 381 382 return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); 383 } 384 385 static struct dma_async_tx_descriptor *axi_dmac_prep_dma_cyclic( 386 struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len, 387 size_t period_len, enum dma_transfer_direction direction, 388 unsigned long flags) 389 { 390 struct axi_dmac_chan *chan = to_axi_dmac_chan(c); 391 struct axi_dmac_desc *desc; 392 unsigned int num_periods, i; 393 394 if (direction != chan->direction) 395 return NULL; 396 397 if (!axi_dmac_check_len(chan, buf_len) || 398 !axi_dmac_check_addr(chan, buf_addr)) 399 return NULL; 400 401 if (period_len == 0 || buf_len % period_len) 402 return NULL; 403 404 num_periods = buf_len / period_len; 405 406 desc = axi_dmac_alloc_desc(num_periods); 407 if (!desc) 408 return NULL; 409 410 for (i = 0; i < num_periods; i++) { 411 if (direction == DMA_DEV_TO_MEM) 412 desc->sg[i].dest_addr = buf_addr; 413 else 414 desc->sg[i].src_addr = buf_addr; 415 desc->sg[i].x_len = period_len; 416 desc->sg[i].y_len = 1; 417 buf_addr += period_len; 418 } 419 420 desc->cyclic = true; 421 422 return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); 423 } 424 425 static struct dma_async_tx_descriptor *axi_dmac_prep_interleaved( 426 struct dma_chan *c, struct dma_interleaved_template *xt, 427 unsigned long flags) 428 { 429 struct axi_dmac_chan *chan = to_axi_dmac_chan(c); 430 struct axi_dmac_desc *desc; 431 size_t dst_icg, src_icg; 432 433 if (xt->frame_size != 1) 434 return NULL; 435 436 if (xt->dir != chan->direction) 437 return NULL; 438 439 if (axi_dmac_src_is_mem(chan)) { 440 if (!xt->src_inc || !axi_dmac_check_addr(chan, xt->src_start)) 441 return NULL; 442 } 443 444 if (axi_dmac_dest_is_mem(chan)) { 445 if (!xt->dst_inc || !axi_dmac_check_addr(chan, xt->dst_start)) 446 return NULL; 447 } 448 449 dst_icg = dmaengine_get_dst_icg(xt, &xt->sgl[0]); 450 src_icg = dmaengine_get_src_icg(xt, &xt->sgl[0]); 451 452 if (chan->hw_2d) { 453 if (!axi_dmac_check_len(chan, xt->sgl[0].size) || 454 !axi_dmac_check_len(chan, xt->numf)) 455 return NULL; 456 if (xt->sgl[0].size + dst_icg > chan->max_length || 457 xt->sgl[0].size + src_icg > chan->max_length) 458 return NULL; 459 } else { 460 if (dst_icg != 0 || src_icg != 0) 461 return NULL; 462 if (chan->max_length / xt->sgl[0].size < xt->numf) 463 return NULL; 464 if (!axi_dmac_check_len(chan, xt->sgl[0].size * xt->numf)) 465 return NULL; 466 } 467 468 desc = axi_dmac_alloc_desc(1); 469 if (!desc) 470 return NULL; 471 472 if (axi_dmac_src_is_mem(chan)) { 473 desc->sg[0].src_addr = xt->src_start; 474 desc->sg[0].src_stride = xt->sgl[0].size + src_icg; 475 } 476 477 if (axi_dmac_dest_is_mem(chan)) { 478 desc->sg[0].dest_addr = xt->dst_start; 479 desc->sg[0].dest_stride = xt->sgl[0].size + dst_icg; 480 } 481 482 if (chan->hw_2d) { 483 desc->sg[0].x_len = xt->sgl[0].size; 484 desc->sg[0].y_len = xt->numf; 485 } else { 486 desc->sg[0].x_len = xt->sgl[0].size * xt->numf; 487 desc->sg[0].y_len = 1; 488 } 489 490 return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); 491 } 492 493 static void axi_dmac_free_chan_resources(struct dma_chan *c) 494 { 495 vchan_free_chan_resources(to_virt_chan(c)); 496 } 497 498 static void axi_dmac_desc_free(struct virt_dma_desc *vdesc) 499 { 500 kfree(container_of(vdesc, struct axi_dmac_desc, vdesc)); 501 } 502 503 /* 504 * The configuration stored in the devicetree matches the configuration 505 * parameters of the peripheral instance and allows the driver to know which 506 * features are implemented and how it should behave. 507 */ 508 static int axi_dmac_parse_chan_dt(struct device_node *of_chan, 509 struct axi_dmac_chan *chan) 510 { 511 u32 val; 512 int ret; 513 514 ret = of_property_read_u32(of_chan, "reg", &val); 515 if (ret) 516 return ret; 517 518 /* We only support 1 channel for now */ 519 if (val != 0) 520 return -EINVAL; 521 522 ret = of_property_read_u32(of_chan, "adi,source-bus-type", &val); 523 if (ret) 524 return ret; 525 if (val > AXI_DMAC_BUS_TYPE_FIFO) 526 return -EINVAL; 527 chan->src_type = val; 528 529 ret = of_property_read_u32(of_chan, "adi,destination-bus-type", &val); 530 if (ret) 531 return ret; 532 if (val > AXI_DMAC_BUS_TYPE_FIFO) 533 return -EINVAL; 534 chan->dest_type = val; 535 536 ret = of_property_read_u32(of_chan, "adi,source-bus-width", &val); 537 if (ret) 538 return ret; 539 chan->src_width = val / 8; 540 541 ret = of_property_read_u32(of_chan, "adi,destination-bus-width", &val); 542 if (ret) 543 return ret; 544 chan->dest_width = val / 8; 545 546 ret = of_property_read_u32(of_chan, "adi,length-width", &val); 547 if (ret) 548 return ret; 549 550 if (val >= 32) 551 chan->max_length = UINT_MAX; 552 else 553 chan->max_length = (1ULL << val) - 1; 554 555 chan->align_mask = max(chan->dest_width, chan->src_width) - 1; 556 557 if (axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan)) 558 chan->direction = DMA_MEM_TO_MEM; 559 else if (!axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan)) 560 chan->direction = DMA_MEM_TO_DEV; 561 else if (axi_dmac_dest_is_mem(chan) && !axi_dmac_src_is_mem(chan)) 562 chan->direction = DMA_DEV_TO_MEM; 563 else 564 chan->direction = DMA_DEV_TO_DEV; 565 566 chan->hw_cyclic = of_property_read_bool(of_chan, "adi,cyclic"); 567 chan->hw_2d = of_property_read_bool(of_chan, "adi,2d"); 568 569 return 0; 570 } 571 572 static int axi_dmac_probe(struct platform_device *pdev) 573 { 574 struct device_node *of_channels, *of_chan; 575 struct dma_device *dma_dev; 576 struct axi_dmac *dmac; 577 struct resource *res; 578 int ret; 579 580 dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL); 581 if (!dmac) 582 return -ENOMEM; 583 584 dmac->irq = platform_get_irq(pdev, 0); 585 if (dmac->irq < 0) 586 return dmac->irq; 587 if (dmac->irq == 0) 588 return -EINVAL; 589 590 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 591 dmac->base = devm_ioremap_resource(&pdev->dev, res); 592 if (IS_ERR(dmac->base)) 593 return PTR_ERR(dmac->base); 594 595 dmac->clk = devm_clk_get(&pdev->dev, NULL); 596 if (IS_ERR(dmac->clk)) 597 return PTR_ERR(dmac->clk); 598 599 INIT_LIST_HEAD(&dmac->chan.active_descs); 600 601 of_channels = of_get_child_by_name(pdev->dev.of_node, "adi,channels"); 602 if (of_channels == NULL) 603 return -ENODEV; 604 605 for_each_child_of_node(of_channels, of_chan) { 606 ret = axi_dmac_parse_chan_dt(of_chan, &dmac->chan); 607 if (ret) { 608 of_node_put(of_chan); 609 of_node_put(of_channels); 610 return -EINVAL; 611 } 612 } 613 of_node_put(of_channels); 614 615 pdev->dev.dma_parms = &dmac->dma_parms; 616 dma_set_max_seg_size(&pdev->dev, dmac->chan.max_length); 617 618 dma_dev = &dmac->dma_dev; 619 dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); 620 dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask); 621 dma_dev->device_free_chan_resources = axi_dmac_free_chan_resources; 622 dma_dev->device_tx_status = dma_cookie_status; 623 dma_dev->device_issue_pending = axi_dmac_issue_pending; 624 dma_dev->device_prep_slave_sg = axi_dmac_prep_slave_sg; 625 dma_dev->device_prep_dma_cyclic = axi_dmac_prep_dma_cyclic; 626 dma_dev->device_prep_interleaved_dma = axi_dmac_prep_interleaved; 627 dma_dev->device_terminate_all = axi_dmac_terminate_all; 628 dma_dev->device_synchronize = axi_dmac_synchronize; 629 dma_dev->dev = &pdev->dev; 630 dma_dev->chancnt = 1; 631 dma_dev->src_addr_widths = BIT(dmac->chan.src_width); 632 dma_dev->dst_addr_widths = BIT(dmac->chan.dest_width); 633 dma_dev->directions = BIT(dmac->chan.direction); 634 dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; 635 INIT_LIST_HEAD(&dma_dev->channels); 636 637 dmac->chan.vchan.desc_free = axi_dmac_desc_free; 638 vchan_init(&dmac->chan.vchan, dma_dev); 639 640 ret = clk_prepare_enable(dmac->clk); 641 if (ret < 0) 642 return ret; 643 644 axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_MASK, 0x00); 645 646 ret = dma_async_device_register(dma_dev); 647 if (ret) 648 goto err_clk_disable; 649 650 ret = of_dma_controller_register(pdev->dev.of_node, 651 of_dma_xlate_by_chan_id, dma_dev); 652 if (ret) 653 goto err_unregister_device; 654 655 ret = request_irq(dmac->irq, axi_dmac_interrupt_handler, 0, 656 dev_name(&pdev->dev), dmac); 657 if (ret) 658 goto err_unregister_of; 659 660 platform_set_drvdata(pdev, dmac); 661 662 return 0; 663 664 err_unregister_of: 665 of_dma_controller_free(pdev->dev.of_node); 666 err_unregister_device: 667 dma_async_device_unregister(&dmac->dma_dev); 668 err_clk_disable: 669 clk_disable_unprepare(dmac->clk); 670 671 return ret; 672 } 673 674 static int axi_dmac_remove(struct platform_device *pdev) 675 { 676 struct axi_dmac *dmac = platform_get_drvdata(pdev); 677 678 of_dma_controller_free(pdev->dev.of_node); 679 free_irq(dmac->irq, dmac); 680 tasklet_kill(&dmac->chan.vchan.task); 681 dma_async_device_unregister(&dmac->dma_dev); 682 clk_disable_unprepare(dmac->clk); 683 684 return 0; 685 } 686 687 static const struct of_device_id axi_dmac_of_match_table[] = { 688 { .compatible = "adi,axi-dmac-1.00.a" }, 689 { }, 690 }; 691 MODULE_DEVICE_TABLE(of, axi_dmac_of_match_table); 692 693 static struct platform_driver axi_dmac_driver = { 694 .driver = { 695 .name = "dma-axi-dmac", 696 .of_match_table = axi_dmac_of_match_table, 697 }, 698 .probe = axi_dmac_probe, 699 .remove = axi_dmac_remove, 700 }; 701 module_platform_driver(axi_dmac_driver); 702 703 MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); 704 MODULE_DESCRIPTION("DMA controller driver for the AXI-DMAC controller"); 705 MODULE_LICENSE("GPL v2"); 706