1 /* 2 * Driver for the Analog Devices AXI-DMAC core 3 * 4 * Copyright 2013-2015 Analog Devices Inc. 5 * Author: Lars-Peter Clausen <lars@metafoo.de> 6 * 7 * Licensed under the GPL-2. 8 */ 9 10 #include <linux/clk.h> 11 #include <linux/device.h> 12 #include <linux/dma-mapping.h> 13 #include <linux/dmaengine.h> 14 #include <linux/err.h> 15 #include <linux/interrupt.h> 16 #include <linux/io.h> 17 #include <linux/kernel.h> 18 #include <linux/module.h> 19 #include <linux/of.h> 20 #include <linux/of_dma.h> 21 #include <linux/platform_device.h> 22 #include <linux/slab.h> 23 24 #include <dt-bindings/dma/axi-dmac.h> 25 26 #include "dmaengine.h" 27 #include "virt-dma.h" 28 29 /* 30 * The AXI-DMAC is a soft IP core that is used in FPGA designs. The core has 31 * various instantiation parameters which decided the exact feature set support 32 * by the core. 33 * 34 * Each channel of the core has a source interface and a destination interface. 35 * The number of channels and the type of the channel interfaces is selected at 36 * configuration time. A interface can either be a connected to a central memory 37 * interconnect, which allows access to system memory, or it can be connected to 38 * a dedicated bus which is directly connected to a data port on a peripheral. 39 * Given that those are configuration options of the core that are selected when 40 * it is instantiated this means that they can not be changed by software at 41 * runtime. By extension this means that each channel is uni-directional. It can 42 * either be device to memory or memory to device, but not both. Also since the 43 * device side is a dedicated data bus only connected to a single peripheral 44 * there is no address than can or needs to be configured for the device side. 45 */ 46 47 #define AXI_DMAC_REG_IRQ_MASK 0x80 48 #define AXI_DMAC_REG_IRQ_PENDING 0x84 49 #define AXI_DMAC_REG_IRQ_SOURCE 0x88 50 51 #define AXI_DMAC_REG_CTRL 0x400 52 #define AXI_DMAC_REG_TRANSFER_ID 0x404 53 #define AXI_DMAC_REG_START_TRANSFER 0x408 54 #define AXI_DMAC_REG_FLAGS 0x40c 55 #define AXI_DMAC_REG_DEST_ADDRESS 0x410 56 #define AXI_DMAC_REG_SRC_ADDRESS 0x414 57 #define AXI_DMAC_REG_X_LENGTH 0x418 58 #define AXI_DMAC_REG_Y_LENGTH 0x41c 59 #define AXI_DMAC_REG_DEST_STRIDE 0x420 60 #define AXI_DMAC_REG_SRC_STRIDE 0x424 61 #define AXI_DMAC_REG_TRANSFER_DONE 0x428 62 #define AXI_DMAC_REG_ACTIVE_TRANSFER_ID 0x42c 63 #define AXI_DMAC_REG_STATUS 0x430 64 #define AXI_DMAC_REG_CURRENT_SRC_ADDR 0x434 65 #define AXI_DMAC_REG_CURRENT_DEST_ADDR 0x438 66 67 #define AXI_DMAC_CTRL_ENABLE BIT(0) 68 #define AXI_DMAC_CTRL_PAUSE BIT(1) 69 70 #define AXI_DMAC_IRQ_SOT BIT(0) 71 #define AXI_DMAC_IRQ_EOT BIT(1) 72 73 #define AXI_DMAC_FLAG_CYCLIC BIT(0) 74 75 /* The maximum ID allocated by the hardware is 31 */ 76 #define AXI_DMAC_SG_UNUSED 32U 77 78 struct axi_dmac_sg { 79 dma_addr_t src_addr; 80 dma_addr_t dest_addr; 81 unsigned int x_len; 82 unsigned int y_len; 83 unsigned int dest_stride; 84 unsigned int src_stride; 85 unsigned int id; 86 bool schedule_when_free; 87 }; 88 89 struct axi_dmac_desc { 90 struct virt_dma_desc vdesc; 91 bool cyclic; 92 93 unsigned int num_submitted; 94 unsigned int num_completed; 95 unsigned int num_sgs; 96 struct axi_dmac_sg sg[]; 97 }; 98 99 struct axi_dmac_chan { 100 struct virt_dma_chan vchan; 101 102 struct axi_dmac_desc *next_desc; 103 struct list_head active_descs; 104 enum dma_transfer_direction direction; 105 106 unsigned int src_width; 107 unsigned int dest_width; 108 unsigned int src_type; 109 unsigned int dest_type; 110 111 unsigned int max_length; 112 unsigned int align_mask; 113 114 bool hw_cyclic; 115 bool hw_2d; 116 }; 117 118 struct axi_dmac { 119 void __iomem *base; 120 int irq; 121 122 struct clk *clk; 123 124 struct dma_device dma_dev; 125 struct axi_dmac_chan chan; 126 127 struct device_dma_parameters dma_parms; 128 }; 129 130 static struct axi_dmac *chan_to_axi_dmac(struct axi_dmac_chan *chan) 131 { 132 return container_of(chan->vchan.chan.device, struct axi_dmac, 133 dma_dev); 134 } 135 136 static struct axi_dmac_chan *to_axi_dmac_chan(struct dma_chan *c) 137 { 138 return container_of(c, struct axi_dmac_chan, vchan.chan); 139 } 140 141 static struct axi_dmac_desc *to_axi_dmac_desc(struct virt_dma_desc *vdesc) 142 { 143 return container_of(vdesc, struct axi_dmac_desc, vdesc); 144 } 145 146 static void axi_dmac_write(struct axi_dmac *axi_dmac, unsigned int reg, 147 unsigned int val) 148 { 149 writel(val, axi_dmac->base + reg); 150 } 151 152 static int axi_dmac_read(struct axi_dmac *axi_dmac, unsigned int reg) 153 { 154 return readl(axi_dmac->base + reg); 155 } 156 157 static int axi_dmac_src_is_mem(struct axi_dmac_chan *chan) 158 { 159 return chan->src_type == AXI_DMAC_BUS_TYPE_AXI_MM; 160 } 161 162 static int axi_dmac_dest_is_mem(struct axi_dmac_chan *chan) 163 { 164 return chan->dest_type == AXI_DMAC_BUS_TYPE_AXI_MM; 165 } 166 167 static bool axi_dmac_check_len(struct axi_dmac_chan *chan, unsigned int len) 168 { 169 if (len == 0) 170 return false; 171 if ((len & chan->align_mask) != 0) /* Not aligned */ 172 return false; 173 return true; 174 } 175 176 static bool axi_dmac_check_addr(struct axi_dmac_chan *chan, dma_addr_t addr) 177 { 178 if ((addr & chan->align_mask) != 0) /* Not aligned */ 179 return false; 180 return true; 181 } 182 183 static void axi_dmac_start_transfer(struct axi_dmac_chan *chan) 184 { 185 struct axi_dmac *dmac = chan_to_axi_dmac(chan); 186 struct virt_dma_desc *vdesc; 187 struct axi_dmac_desc *desc; 188 struct axi_dmac_sg *sg; 189 unsigned int flags = 0; 190 unsigned int val; 191 192 val = axi_dmac_read(dmac, AXI_DMAC_REG_START_TRANSFER); 193 if (val) /* Queue is full, wait for the next SOT IRQ */ 194 return; 195 196 desc = chan->next_desc; 197 198 if (!desc) { 199 vdesc = vchan_next_desc(&chan->vchan); 200 if (!vdesc) 201 return; 202 list_move_tail(&vdesc->node, &chan->active_descs); 203 desc = to_axi_dmac_desc(vdesc); 204 } 205 sg = &desc->sg[desc->num_submitted]; 206 207 /* Already queued in cyclic mode. Wait for it to finish */ 208 if (sg->id != AXI_DMAC_SG_UNUSED) { 209 sg->schedule_when_free = true; 210 return; 211 } 212 213 desc->num_submitted++; 214 if (desc->num_submitted == desc->num_sgs) { 215 if (desc->cyclic) 216 desc->num_submitted = 0; /* Start again */ 217 else 218 chan->next_desc = NULL; 219 } else { 220 chan->next_desc = desc; 221 } 222 223 sg->id = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_ID); 224 225 if (axi_dmac_dest_is_mem(chan)) { 226 axi_dmac_write(dmac, AXI_DMAC_REG_DEST_ADDRESS, sg->dest_addr); 227 axi_dmac_write(dmac, AXI_DMAC_REG_DEST_STRIDE, sg->dest_stride); 228 } 229 230 if (axi_dmac_src_is_mem(chan)) { 231 axi_dmac_write(dmac, AXI_DMAC_REG_SRC_ADDRESS, sg->src_addr); 232 axi_dmac_write(dmac, AXI_DMAC_REG_SRC_STRIDE, sg->src_stride); 233 } 234 235 /* 236 * If the hardware supports cyclic transfers and there is no callback to 237 * call and only a single segment, enable hw cyclic mode to avoid 238 * unnecessary interrupts. 239 */ 240 if (chan->hw_cyclic && desc->cyclic && !desc->vdesc.tx.callback && 241 desc->num_sgs == 1) 242 flags |= AXI_DMAC_FLAG_CYCLIC; 243 244 axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, sg->x_len - 1); 245 axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, sg->y_len - 1); 246 axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, flags); 247 axi_dmac_write(dmac, AXI_DMAC_REG_START_TRANSFER, 1); 248 } 249 250 static struct axi_dmac_desc *axi_dmac_active_desc(struct axi_dmac_chan *chan) 251 { 252 return list_first_entry_or_null(&chan->active_descs, 253 struct axi_dmac_desc, vdesc.node); 254 } 255 256 static bool axi_dmac_transfer_done(struct axi_dmac_chan *chan, 257 unsigned int completed_transfers) 258 { 259 struct axi_dmac_desc *active; 260 struct axi_dmac_sg *sg; 261 bool start_next = false; 262 263 active = axi_dmac_active_desc(chan); 264 if (!active) 265 return false; 266 267 do { 268 sg = &active->sg[active->num_completed]; 269 if (sg->id == AXI_DMAC_SG_UNUSED) /* Not yet submitted */ 270 break; 271 if (!(BIT(sg->id) & completed_transfers)) 272 break; 273 active->num_completed++; 274 sg->id = AXI_DMAC_SG_UNUSED; 275 if (sg->schedule_when_free) { 276 sg->schedule_when_free = false; 277 start_next = true; 278 } 279 280 if (active->cyclic) 281 vchan_cyclic_callback(&active->vdesc); 282 283 if (active->num_completed == active->num_sgs) { 284 if (active->cyclic) { 285 active->num_completed = 0; /* wrap around */ 286 } else { 287 list_del(&active->vdesc.node); 288 vchan_cookie_complete(&active->vdesc); 289 active = axi_dmac_active_desc(chan); 290 } 291 } 292 } while (active); 293 294 return start_next; 295 } 296 297 static irqreturn_t axi_dmac_interrupt_handler(int irq, void *devid) 298 { 299 struct axi_dmac *dmac = devid; 300 unsigned int pending; 301 bool start_next = false; 302 303 pending = axi_dmac_read(dmac, AXI_DMAC_REG_IRQ_PENDING); 304 if (!pending) 305 return IRQ_NONE; 306 307 axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_PENDING, pending); 308 309 spin_lock(&dmac->chan.vchan.lock); 310 /* One or more transfers have finished */ 311 if (pending & AXI_DMAC_IRQ_EOT) { 312 unsigned int completed; 313 314 completed = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_DONE); 315 start_next = axi_dmac_transfer_done(&dmac->chan, completed); 316 } 317 /* Space has become available in the descriptor queue */ 318 if ((pending & AXI_DMAC_IRQ_SOT) || start_next) 319 axi_dmac_start_transfer(&dmac->chan); 320 spin_unlock(&dmac->chan.vchan.lock); 321 322 return IRQ_HANDLED; 323 } 324 325 static int axi_dmac_terminate_all(struct dma_chan *c) 326 { 327 struct axi_dmac_chan *chan = to_axi_dmac_chan(c); 328 struct axi_dmac *dmac = chan_to_axi_dmac(chan); 329 unsigned long flags; 330 LIST_HEAD(head); 331 332 spin_lock_irqsave(&chan->vchan.lock, flags); 333 axi_dmac_write(dmac, AXI_DMAC_REG_CTRL, 0); 334 chan->next_desc = NULL; 335 vchan_get_all_descriptors(&chan->vchan, &head); 336 list_splice_tail_init(&chan->active_descs, &head); 337 spin_unlock_irqrestore(&chan->vchan.lock, flags); 338 339 vchan_dma_desc_free_list(&chan->vchan, &head); 340 341 return 0; 342 } 343 344 static void axi_dmac_synchronize(struct dma_chan *c) 345 { 346 struct axi_dmac_chan *chan = to_axi_dmac_chan(c); 347 348 vchan_synchronize(&chan->vchan); 349 } 350 351 static void axi_dmac_issue_pending(struct dma_chan *c) 352 { 353 struct axi_dmac_chan *chan = to_axi_dmac_chan(c); 354 struct axi_dmac *dmac = chan_to_axi_dmac(chan); 355 unsigned long flags; 356 357 axi_dmac_write(dmac, AXI_DMAC_REG_CTRL, AXI_DMAC_CTRL_ENABLE); 358 359 spin_lock_irqsave(&chan->vchan.lock, flags); 360 if (vchan_issue_pending(&chan->vchan)) 361 axi_dmac_start_transfer(chan); 362 spin_unlock_irqrestore(&chan->vchan.lock, flags); 363 } 364 365 static struct axi_dmac_desc *axi_dmac_alloc_desc(unsigned int num_sgs) 366 { 367 struct axi_dmac_desc *desc; 368 unsigned int i; 369 370 desc = kzalloc(struct_size(desc, sg, num_sgs), GFP_NOWAIT); 371 if (!desc) 372 return NULL; 373 374 for (i = 0; i < num_sgs; i++) 375 desc->sg[i].id = AXI_DMAC_SG_UNUSED; 376 377 desc->num_sgs = num_sgs; 378 379 return desc; 380 } 381 382 static struct axi_dmac_sg *axi_dmac_fill_linear_sg(struct axi_dmac_chan *chan, 383 enum dma_transfer_direction direction, dma_addr_t addr, 384 unsigned int num_periods, unsigned int period_len, 385 struct axi_dmac_sg *sg) 386 { 387 unsigned int num_segments, i; 388 unsigned int segment_size; 389 unsigned int len; 390 391 /* Split into multiple equally sized segments if necessary */ 392 num_segments = DIV_ROUND_UP(period_len, chan->max_length); 393 segment_size = DIV_ROUND_UP(period_len, num_segments); 394 /* Take care of alignment */ 395 segment_size = ((segment_size - 1) | chan->align_mask) + 1; 396 397 for (i = 0; i < num_periods; i++) { 398 len = period_len; 399 400 while (len > segment_size) { 401 if (direction == DMA_DEV_TO_MEM) 402 sg->dest_addr = addr; 403 else 404 sg->src_addr = addr; 405 sg->x_len = segment_size; 406 sg->y_len = 1; 407 sg++; 408 addr += segment_size; 409 len -= segment_size; 410 } 411 412 if (direction == DMA_DEV_TO_MEM) 413 sg->dest_addr = addr; 414 else 415 sg->src_addr = addr; 416 sg->x_len = len; 417 sg->y_len = 1; 418 sg++; 419 addr += len; 420 } 421 422 return sg; 423 } 424 425 static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg( 426 struct dma_chan *c, struct scatterlist *sgl, 427 unsigned int sg_len, enum dma_transfer_direction direction, 428 unsigned long flags, void *context) 429 { 430 struct axi_dmac_chan *chan = to_axi_dmac_chan(c); 431 struct axi_dmac_desc *desc; 432 struct axi_dmac_sg *dsg; 433 struct scatterlist *sg; 434 unsigned int num_sgs; 435 unsigned int i; 436 437 if (direction != chan->direction) 438 return NULL; 439 440 num_sgs = 0; 441 for_each_sg(sgl, sg, sg_len, i) 442 num_sgs += DIV_ROUND_UP(sg_dma_len(sg), chan->max_length); 443 444 desc = axi_dmac_alloc_desc(num_sgs); 445 if (!desc) 446 return NULL; 447 448 dsg = desc->sg; 449 450 for_each_sg(sgl, sg, sg_len, i) { 451 if (!axi_dmac_check_addr(chan, sg_dma_address(sg)) || 452 !axi_dmac_check_len(chan, sg_dma_len(sg))) { 453 kfree(desc); 454 return NULL; 455 } 456 457 dsg = axi_dmac_fill_linear_sg(chan, direction, sg_dma_address(sg), 1, 458 sg_dma_len(sg), dsg); 459 } 460 461 desc->cyclic = false; 462 463 return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); 464 } 465 466 static struct dma_async_tx_descriptor *axi_dmac_prep_dma_cyclic( 467 struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len, 468 size_t period_len, enum dma_transfer_direction direction, 469 unsigned long flags) 470 { 471 struct axi_dmac_chan *chan = to_axi_dmac_chan(c); 472 struct axi_dmac_desc *desc; 473 unsigned int num_periods, num_segments; 474 475 if (direction != chan->direction) 476 return NULL; 477 478 if (!axi_dmac_check_len(chan, buf_len) || 479 !axi_dmac_check_addr(chan, buf_addr)) 480 return NULL; 481 482 if (period_len == 0 || buf_len % period_len) 483 return NULL; 484 485 num_periods = buf_len / period_len; 486 num_segments = DIV_ROUND_UP(period_len, chan->max_length); 487 488 desc = axi_dmac_alloc_desc(num_periods * num_segments); 489 if (!desc) 490 return NULL; 491 492 axi_dmac_fill_linear_sg(chan, direction, buf_addr, num_periods, 493 period_len, desc->sg); 494 495 desc->cyclic = true; 496 497 return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); 498 } 499 500 static struct dma_async_tx_descriptor *axi_dmac_prep_interleaved( 501 struct dma_chan *c, struct dma_interleaved_template *xt, 502 unsigned long flags) 503 { 504 struct axi_dmac_chan *chan = to_axi_dmac_chan(c); 505 struct axi_dmac_desc *desc; 506 size_t dst_icg, src_icg; 507 508 if (xt->frame_size != 1) 509 return NULL; 510 511 if (xt->dir != chan->direction) 512 return NULL; 513 514 if (axi_dmac_src_is_mem(chan)) { 515 if (!xt->src_inc || !axi_dmac_check_addr(chan, xt->src_start)) 516 return NULL; 517 } 518 519 if (axi_dmac_dest_is_mem(chan)) { 520 if (!xt->dst_inc || !axi_dmac_check_addr(chan, xt->dst_start)) 521 return NULL; 522 } 523 524 dst_icg = dmaengine_get_dst_icg(xt, &xt->sgl[0]); 525 src_icg = dmaengine_get_src_icg(xt, &xt->sgl[0]); 526 527 if (chan->hw_2d) { 528 if (!axi_dmac_check_len(chan, xt->sgl[0].size) || 529 xt->numf == 0) 530 return NULL; 531 if (xt->sgl[0].size + dst_icg > chan->max_length || 532 xt->sgl[0].size + src_icg > chan->max_length) 533 return NULL; 534 } else { 535 if (dst_icg != 0 || src_icg != 0) 536 return NULL; 537 if (chan->max_length / xt->sgl[0].size < xt->numf) 538 return NULL; 539 if (!axi_dmac_check_len(chan, xt->sgl[0].size * xt->numf)) 540 return NULL; 541 } 542 543 desc = axi_dmac_alloc_desc(1); 544 if (!desc) 545 return NULL; 546 547 if (axi_dmac_src_is_mem(chan)) { 548 desc->sg[0].src_addr = xt->src_start; 549 desc->sg[0].src_stride = xt->sgl[0].size + src_icg; 550 } 551 552 if (axi_dmac_dest_is_mem(chan)) { 553 desc->sg[0].dest_addr = xt->dst_start; 554 desc->sg[0].dest_stride = xt->sgl[0].size + dst_icg; 555 } 556 557 if (chan->hw_2d) { 558 desc->sg[0].x_len = xt->sgl[0].size; 559 desc->sg[0].y_len = xt->numf; 560 } else { 561 desc->sg[0].x_len = xt->sgl[0].size * xt->numf; 562 desc->sg[0].y_len = 1; 563 } 564 565 return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); 566 } 567 568 static void axi_dmac_free_chan_resources(struct dma_chan *c) 569 { 570 vchan_free_chan_resources(to_virt_chan(c)); 571 } 572 573 static void axi_dmac_desc_free(struct virt_dma_desc *vdesc) 574 { 575 kfree(container_of(vdesc, struct axi_dmac_desc, vdesc)); 576 } 577 578 /* 579 * The configuration stored in the devicetree matches the configuration 580 * parameters of the peripheral instance and allows the driver to know which 581 * features are implemented and how it should behave. 582 */ 583 static int axi_dmac_parse_chan_dt(struct device_node *of_chan, 584 struct axi_dmac_chan *chan) 585 { 586 u32 val; 587 int ret; 588 589 ret = of_property_read_u32(of_chan, "reg", &val); 590 if (ret) 591 return ret; 592 593 /* We only support 1 channel for now */ 594 if (val != 0) 595 return -EINVAL; 596 597 ret = of_property_read_u32(of_chan, "adi,source-bus-type", &val); 598 if (ret) 599 return ret; 600 if (val > AXI_DMAC_BUS_TYPE_FIFO) 601 return -EINVAL; 602 chan->src_type = val; 603 604 ret = of_property_read_u32(of_chan, "adi,destination-bus-type", &val); 605 if (ret) 606 return ret; 607 if (val > AXI_DMAC_BUS_TYPE_FIFO) 608 return -EINVAL; 609 chan->dest_type = val; 610 611 ret = of_property_read_u32(of_chan, "adi,source-bus-width", &val); 612 if (ret) 613 return ret; 614 chan->src_width = val / 8; 615 616 ret = of_property_read_u32(of_chan, "adi,destination-bus-width", &val); 617 if (ret) 618 return ret; 619 chan->dest_width = val / 8; 620 621 chan->align_mask = max(chan->dest_width, chan->src_width) - 1; 622 623 if (axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan)) 624 chan->direction = DMA_MEM_TO_MEM; 625 else if (!axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan)) 626 chan->direction = DMA_MEM_TO_DEV; 627 else if (axi_dmac_dest_is_mem(chan) && !axi_dmac_src_is_mem(chan)) 628 chan->direction = DMA_DEV_TO_MEM; 629 else 630 chan->direction = DMA_DEV_TO_DEV; 631 632 return 0; 633 } 634 635 static void axi_dmac_detect_caps(struct axi_dmac *dmac) 636 { 637 struct axi_dmac_chan *chan = &dmac->chan; 638 639 axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, AXI_DMAC_FLAG_CYCLIC); 640 if (axi_dmac_read(dmac, AXI_DMAC_REG_FLAGS) == AXI_DMAC_FLAG_CYCLIC) 641 chan->hw_cyclic = true; 642 643 axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, 1); 644 if (axi_dmac_read(dmac, AXI_DMAC_REG_Y_LENGTH) == 1) 645 chan->hw_2d = true; 646 647 axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, 0xffffffff); 648 chan->max_length = axi_dmac_read(dmac, AXI_DMAC_REG_X_LENGTH); 649 if (chan->max_length != UINT_MAX) 650 chan->max_length++; 651 } 652 653 static int axi_dmac_probe(struct platform_device *pdev) 654 { 655 struct device_node *of_channels, *of_chan; 656 struct dma_device *dma_dev; 657 struct axi_dmac *dmac; 658 struct resource *res; 659 int ret; 660 661 dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL); 662 if (!dmac) 663 return -ENOMEM; 664 665 dmac->irq = platform_get_irq(pdev, 0); 666 if (dmac->irq < 0) 667 return dmac->irq; 668 if (dmac->irq == 0) 669 return -EINVAL; 670 671 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 672 dmac->base = devm_ioremap_resource(&pdev->dev, res); 673 if (IS_ERR(dmac->base)) 674 return PTR_ERR(dmac->base); 675 676 dmac->clk = devm_clk_get(&pdev->dev, NULL); 677 if (IS_ERR(dmac->clk)) 678 return PTR_ERR(dmac->clk); 679 680 INIT_LIST_HEAD(&dmac->chan.active_descs); 681 682 of_channels = of_get_child_by_name(pdev->dev.of_node, "adi,channels"); 683 if (of_channels == NULL) 684 return -ENODEV; 685 686 for_each_child_of_node(of_channels, of_chan) { 687 ret = axi_dmac_parse_chan_dt(of_chan, &dmac->chan); 688 if (ret) { 689 of_node_put(of_chan); 690 of_node_put(of_channels); 691 return -EINVAL; 692 } 693 } 694 of_node_put(of_channels); 695 696 pdev->dev.dma_parms = &dmac->dma_parms; 697 dma_set_max_seg_size(&pdev->dev, UINT_MAX); 698 699 dma_dev = &dmac->dma_dev; 700 dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); 701 dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask); 702 dma_cap_set(DMA_INTERLEAVE, dma_dev->cap_mask); 703 dma_dev->device_free_chan_resources = axi_dmac_free_chan_resources; 704 dma_dev->device_tx_status = dma_cookie_status; 705 dma_dev->device_issue_pending = axi_dmac_issue_pending; 706 dma_dev->device_prep_slave_sg = axi_dmac_prep_slave_sg; 707 dma_dev->device_prep_dma_cyclic = axi_dmac_prep_dma_cyclic; 708 dma_dev->device_prep_interleaved_dma = axi_dmac_prep_interleaved; 709 dma_dev->device_terminate_all = axi_dmac_terminate_all; 710 dma_dev->device_synchronize = axi_dmac_synchronize; 711 dma_dev->dev = &pdev->dev; 712 dma_dev->chancnt = 1; 713 dma_dev->src_addr_widths = BIT(dmac->chan.src_width); 714 dma_dev->dst_addr_widths = BIT(dmac->chan.dest_width); 715 dma_dev->directions = BIT(dmac->chan.direction); 716 dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; 717 INIT_LIST_HEAD(&dma_dev->channels); 718 719 dmac->chan.vchan.desc_free = axi_dmac_desc_free; 720 vchan_init(&dmac->chan.vchan, dma_dev); 721 722 ret = clk_prepare_enable(dmac->clk); 723 if (ret < 0) 724 return ret; 725 726 axi_dmac_detect_caps(dmac); 727 728 axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_MASK, 0x00); 729 730 ret = dma_async_device_register(dma_dev); 731 if (ret) 732 goto err_clk_disable; 733 734 ret = of_dma_controller_register(pdev->dev.of_node, 735 of_dma_xlate_by_chan_id, dma_dev); 736 if (ret) 737 goto err_unregister_device; 738 739 ret = request_irq(dmac->irq, axi_dmac_interrupt_handler, IRQF_SHARED, 740 dev_name(&pdev->dev), dmac); 741 if (ret) 742 goto err_unregister_of; 743 744 platform_set_drvdata(pdev, dmac); 745 746 return 0; 747 748 err_unregister_of: 749 of_dma_controller_free(pdev->dev.of_node); 750 err_unregister_device: 751 dma_async_device_unregister(&dmac->dma_dev); 752 err_clk_disable: 753 clk_disable_unprepare(dmac->clk); 754 755 return ret; 756 } 757 758 static int axi_dmac_remove(struct platform_device *pdev) 759 { 760 struct axi_dmac *dmac = platform_get_drvdata(pdev); 761 762 of_dma_controller_free(pdev->dev.of_node); 763 free_irq(dmac->irq, dmac); 764 tasklet_kill(&dmac->chan.vchan.task); 765 dma_async_device_unregister(&dmac->dma_dev); 766 clk_disable_unprepare(dmac->clk); 767 768 return 0; 769 } 770 771 static const struct of_device_id axi_dmac_of_match_table[] = { 772 { .compatible = "adi,axi-dmac-1.00.a" }, 773 { }, 774 }; 775 MODULE_DEVICE_TABLE(of, axi_dmac_of_match_table); 776 777 static struct platform_driver axi_dmac_driver = { 778 .driver = { 779 .name = "dma-axi-dmac", 780 .of_match_table = axi_dmac_of_match_table, 781 }, 782 .probe = axi_dmac_probe, 783 .remove = axi_dmac_remove, 784 }; 785 module_platform_driver(axi_dmac_driver); 786 787 MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); 788 MODULE_DESCRIPTION("DMA controller driver for the AXI-DMAC controller"); 789 MODULE_LICENSE("GPL v2"); 790