1 /* 2 * Copyright (c) 2013 Linaro Ltd. 3 * Copyright (c) 2013 Hisilicon Limited. 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License version 2 as 7 * published by the Free Software Foundation. 8 */ 9 #include <linux/sched.h> 10 #include <linux/device.h> 11 #include <linux/dmaengine.h> 12 #include <linux/init.h> 13 #include <linux/interrupt.h> 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/platform_device.h> 17 #include <linux/slab.h> 18 #include <linux/spinlock.h> 19 #include <linux/of_device.h> 20 #include <linux/of.h> 21 #include <linux/clk.h> 22 #include <linux/of_dma.h> 23 24 #include "virt-dma.h" 25 26 #define DRIVER_NAME "k3-dma" 27 #define DMA_MAX_SIZE 0x1ffc 28 29 #define INT_STAT 0x00 30 #define INT_TC1 0x04 31 #define INT_ERR1 0x0c 32 #define INT_ERR2 0x10 33 #define INT_TC1_MASK 0x18 34 #define INT_ERR1_MASK 0x20 35 #define INT_ERR2_MASK 0x24 36 #define INT_TC1_RAW 0x600 37 #define INT_ERR1_RAW 0x608 38 #define INT_ERR2_RAW 0x610 39 #define CH_PRI 0x688 40 #define CH_STAT 0x690 41 #define CX_CUR_CNT 0x704 42 #define CX_LLI 0x800 43 #define CX_CNT 0x810 44 #define CX_SRC 0x814 45 #define CX_DST 0x818 46 #define CX_CFG 0x81c 47 #define AXI_CFG 0x820 48 #define AXI_CFG_DEFAULT 0x201201 49 50 #define CX_LLI_CHAIN_EN 0x2 51 #define CX_CFG_EN 0x1 52 #define CX_CFG_MEM2PER (0x1 << 2) 53 #define CX_CFG_PER2MEM (0x2 << 2) 54 #define CX_CFG_SRCINCR (0x1 << 31) 55 #define CX_CFG_DSTINCR (0x1 << 30) 56 57 struct k3_desc_hw { 58 u32 lli; 59 u32 reserved[3]; 60 u32 count; 61 u32 saddr; 62 u32 daddr; 63 u32 config; 64 } __aligned(32); 65 66 struct k3_dma_desc_sw { 67 struct virt_dma_desc vd; 68 dma_addr_t desc_hw_lli; 69 size_t desc_num; 70 size_t size; 71 struct k3_desc_hw desc_hw[0]; 72 }; 73 74 struct k3_dma_phy; 75 76 struct k3_dma_chan { 77 u32 ccfg; 78 struct virt_dma_chan vc; 79 struct k3_dma_phy *phy; 80 struct list_head node; 81 enum dma_transfer_direction dir; 82 dma_addr_t dev_addr; 83 enum dma_status status; 84 }; 85 86 struct k3_dma_phy { 87 u32 idx; 88 void __iomem *base; 89 struct k3_dma_chan *vchan; 90 struct k3_dma_desc_sw *ds_run; 91 struct k3_dma_desc_sw *ds_done; 92 }; 93 94 struct k3_dma_dev { 95 struct dma_device slave; 96 void __iomem *base; 97 struct tasklet_struct task; 98 spinlock_t lock; 99 struct list_head chan_pending; 100 struct k3_dma_phy *phy; 101 struct k3_dma_chan *chans; 102 struct clk *clk; 103 u32 dma_channels; 104 u32 dma_requests; 105 unsigned int irq; 106 }; 107 108 #define to_k3_dma(dmadev) container_of(dmadev, struct k3_dma_dev, slave) 109 110 static struct k3_dma_chan *to_k3_chan(struct dma_chan *chan) 111 { 112 return container_of(chan, struct k3_dma_chan, vc.chan); 113 } 114 115 static void k3_dma_pause_dma(struct k3_dma_phy *phy, bool on) 116 { 117 u32 val = 0; 118 119 if (on) { 120 val = readl_relaxed(phy->base + CX_CFG); 121 val |= CX_CFG_EN; 122 writel_relaxed(val, phy->base + CX_CFG); 123 } else { 124 val = readl_relaxed(phy->base + CX_CFG); 125 val &= ~CX_CFG_EN; 126 writel_relaxed(val, phy->base + CX_CFG); 127 } 128 } 129 130 static void k3_dma_terminate_chan(struct k3_dma_phy *phy, struct k3_dma_dev *d) 131 { 132 u32 val = 0; 133 134 k3_dma_pause_dma(phy, false); 135 136 val = 0x1 << phy->idx; 137 writel_relaxed(val, d->base + INT_TC1_RAW); 138 writel_relaxed(val, d->base + INT_ERR1_RAW); 139 writel_relaxed(val, d->base + INT_ERR2_RAW); 140 } 141 142 static void k3_dma_set_desc(struct k3_dma_phy *phy, struct k3_desc_hw *hw) 143 { 144 writel_relaxed(hw->lli, phy->base + CX_LLI); 145 writel_relaxed(hw->count, phy->base + CX_CNT); 146 writel_relaxed(hw->saddr, phy->base + CX_SRC); 147 writel_relaxed(hw->daddr, phy->base + CX_DST); 148 writel_relaxed(AXI_CFG_DEFAULT, phy->base + AXI_CFG); 149 writel_relaxed(hw->config, phy->base + CX_CFG); 150 } 151 152 static u32 k3_dma_get_curr_cnt(struct k3_dma_dev *d, struct k3_dma_phy *phy) 153 { 154 u32 cnt = 0; 155 156 cnt = readl_relaxed(d->base + CX_CUR_CNT + phy->idx * 0x10); 157 cnt &= 0xffff; 158 return cnt; 159 } 160 161 static u32 k3_dma_get_curr_lli(struct k3_dma_phy *phy) 162 { 163 return readl_relaxed(phy->base + CX_LLI); 164 } 165 166 static u32 k3_dma_get_chan_stat(struct k3_dma_dev *d) 167 { 168 return readl_relaxed(d->base + CH_STAT); 169 } 170 171 static void k3_dma_enable_dma(struct k3_dma_dev *d, bool on) 172 { 173 if (on) { 174 /* set same priority */ 175 writel_relaxed(0x0, d->base + CH_PRI); 176 177 /* unmask irq */ 178 writel_relaxed(0xffff, d->base + INT_TC1_MASK); 179 writel_relaxed(0xffff, d->base + INT_ERR1_MASK); 180 writel_relaxed(0xffff, d->base + INT_ERR2_MASK); 181 } else { 182 /* mask irq */ 183 writel_relaxed(0x0, d->base + INT_TC1_MASK); 184 writel_relaxed(0x0, d->base + INT_ERR1_MASK); 185 writel_relaxed(0x0, d->base + INT_ERR2_MASK); 186 } 187 } 188 189 static irqreturn_t k3_dma_int_handler(int irq, void *dev_id) 190 { 191 struct k3_dma_dev *d = (struct k3_dma_dev *)dev_id; 192 struct k3_dma_phy *p; 193 struct k3_dma_chan *c; 194 u32 stat = readl_relaxed(d->base + INT_STAT); 195 u32 tc1 = readl_relaxed(d->base + INT_TC1); 196 u32 err1 = readl_relaxed(d->base + INT_ERR1); 197 u32 err2 = readl_relaxed(d->base + INT_ERR2); 198 u32 i, irq_chan = 0; 199 200 while (stat) { 201 i = __ffs(stat); 202 stat &= (stat - 1); 203 if (likely(tc1 & BIT(i))) { 204 p = &d->phy[i]; 205 c = p->vchan; 206 if (c) { 207 unsigned long flags; 208 209 spin_lock_irqsave(&c->vc.lock, flags); 210 vchan_cookie_complete(&p->ds_run->vd); 211 p->ds_done = p->ds_run; 212 spin_unlock_irqrestore(&c->vc.lock, flags); 213 } 214 irq_chan |= BIT(i); 215 } 216 if (unlikely((err1 & BIT(i)) || (err2 & BIT(i)))) 217 dev_warn(d->slave.dev, "DMA ERR\n"); 218 } 219 220 writel_relaxed(irq_chan, d->base + INT_TC1_RAW); 221 writel_relaxed(err1, d->base + INT_ERR1_RAW); 222 writel_relaxed(err2, d->base + INT_ERR2_RAW); 223 224 if (irq_chan) { 225 tasklet_schedule(&d->task); 226 return IRQ_HANDLED; 227 } else 228 return IRQ_NONE; 229 } 230 231 static int k3_dma_start_txd(struct k3_dma_chan *c) 232 { 233 struct k3_dma_dev *d = to_k3_dma(c->vc.chan.device); 234 struct virt_dma_desc *vd = vchan_next_desc(&c->vc); 235 236 if (!c->phy) 237 return -EAGAIN; 238 239 if (BIT(c->phy->idx) & k3_dma_get_chan_stat(d)) 240 return -EAGAIN; 241 242 if (vd) { 243 struct k3_dma_desc_sw *ds = 244 container_of(vd, struct k3_dma_desc_sw, vd); 245 /* 246 * fetch and remove request from vc->desc_issued 247 * so vc->desc_issued only contains desc pending 248 */ 249 list_del(&ds->vd.node); 250 c->phy->ds_run = ds; 251 c->phy->ds_done = NULL; 252 /* start dma */ 253 k3_dma_set_desc(c->phy, &ds->desc_hw[0]); 254 return 0; 255 } 256 c->phy->ds_done = NULL; 257 c->phy->ds_run = NULL; 258 return -EAGAIN; 259 } 260 261 static void k3_dma_tasklet(unsigned long arg) 262 { 263 struct k3_dma_dev *d = (struct k3_dma_dev *)arg; 264 struct k3_dma_phy *p; 265 struct k3_dma_chan *c, *cn; 266 unsigned pch, pch_alloc = 0; 267 268 /* check new dma request of running channel in vc->desc_issued */ 269 list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) { 270 spin_lock_irq(&c->vc.lock); 271 p = c->phy; 272 if (p && p->ds_done) { 273 if (k3_dma_start_txd(c)) { 274 /* No current txd associated with this channel */ 275 dev_dbg(d->slave.dev, "pchan %u: free\n", p->idx); 276 /* Mark this channel free */ 277 c->phy = NULL; 278 p->vchan = NULL; 279 } 280 } 281 spin_unlock_irq(&c->vc.lock); 282 } 283 284 /* check new channel request in d->chan_pending */ 285 spin_lock_irq(&d->lock); 286 for (pch = 0; pch < d->dma_channels; pch++) { 287 p = &d->phy[pch]; 288 289 if (p->vchan == NULL && !list_empty(&d->chan_pending)) { 290 c = list_first_entry(&d->chan_pending, 291 struct k3_dma_chan, node); 292 /* remove from d->chan_pending */ 293 list_del_init(&c->node); 294 pch_alloc |= 1 << pch; 295 /* Mark this channel allocated */ 296 p->vchan = c; 297 c->phy = p; 298 dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc); 299 } 300 } 301 spin_unlock_irq(&d->lock); 302 303 for (pch = 0; pch < d->dma_channels; pch++) { 304 if (pch_alloc & (1 << pch)) { 305 p = &d->phy[pch]; 306 c = p->vchan; 307 if (c) { 308 spin_lock_irq(&c->vc.lock); 309 k3_dma_start_txd(c); 310 spin_unlock_irq(&c->vc.lock); 311 } 312 } 313 } 314 } 315 316 static void k3_dma_free_chan_resources(struct dma_chan *chan) 317 { 318 struct k3_dma_chan *c = to_k3_chan(chan); 319 struct k3_dma_dev *d = to_k3_dma(chan->device); 320 unsigned long flags; 321 322 spin_lock_irqsave(&d->lock, flags); 323 list_del_init(&c->node); 324 spin_unlock_irqrestore(&d->lock, flags); 325 326 vchan_free_chan_resources(&c->vc); 327 c->ccfg = 0; 328 } 329 330 static enum dma_status k3_dma_tx_status(struct dma_chan *chan, 331 dma_cookie_t cookie, struct dma_tx_state *state) 332 { 333 struct k3_dma_chan *c = to_k3_chan(chan); 334 struct k3_dma_dev *d = to_k3_dma(chan->device); 335 struct k3_dma_phy *p; 336 struct virt_dma_desc *vd; 337 unsigned long flags; 338 enum dma_status ret; 339 size_t bytes = 0; 340 341 ret = dma_cookie_status(&c->vc.chan, cookie, state); 342 if (ret == DMA_COMPLETE) 343 return ret; 344 345 spin_lock_irqsave(&c->vc.lock, flags); 346 p = c->phy; 347 ret = c->status; 348 349 /* 350 * If the cookie is on our issue queue, then the residue is 351 * its total size. 352 */ 353 vd = vchan_find_desc(&c->vc, cookie); 354 if (vd) { 355 bytes = container_of(vd, struct k3_dma_desc_sw, vd)->size; 356 } else if ((!p) || (!p->ds_run)) { 357 bytes = 0; 358 } else { 359 struct k3_dma_desc_sw *ds = p->ds_run; 360 u32 clli = 0, index = 0; 361 362 bytes = k3_dma_get_curr_cnt(d, p); 363 clli = k3_dma_get_curr_lli(p); 364 index = (clli - ds->desc_hw_lli) / sizeof(struct k3_desc_hw); 365 for (; index < ds->desc_num; index++) { 366 bytes += ds->desc_hw[index].count; 367 /* end of lli */ 368 if (!ds->desc_hw[index].lli) 369 break; 370 } 371 } 372 spin_unlock_irqrestore(&c->vc.lock, flags); 373 dma_set_residue(state, bytes); 374 return ret; 375 } 376 377 static void k3_dma_issue_pending(struct dma_chan *chan) 378 { 379 struct k3_dma_chan *c = to_k3_chan(chan); 380 struct k3_dma_dev *d = to_k3_dma(chan->device); 381 unsigned long flags; 382 383 spin_lock_irqsave(&c->vc.lock, flags); 384 /* add request to vc->desc_issued */ 385 if (vchan_issue_pending(&c->vc)) { 386 spin_lock(&d->lock); 387 if (!c->phy) { 388 if (list_empty(&c->node)) { 389 /* if new channel, add chan_pending */ 390 list_add_tail(&c->node, &d->chan_pending); 391 /* check in tasklet */ 392 tasklet_schedule(&d->task); 393 dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc); 394 } 395 } 396 spin_unlock(&d->lock); 397 } else 398 dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc); 399 spin_unlock_irqrestore(&c->vc.lock, flags); 400 } 401 402 static void k3_dma_fill_desc(struct k3_dma_desc_sw *ds, dma_addr_t dst, 403 dma_addr_t src, size_t len, u32 num, u32 ccfg) 404 { 405 if ((num + 1) < ds->desc_num) 406 ds->desc_hw[num].lli = ds->desc_hw_lli + (num + 1) * 407 sizeof(struct k3_desc_hw); 408 ds->desc_hw[num].lli |= CX_LLI_CHAIN_EN; 409 ds->desc_hw[num].count = len; 410 ds->desc_hw[num].saddr = src; 411 ds->desc_hw[num].daddr = dst; 412 ds->desc_hw[num].config = ccfg; 413 } 414 415 static struct dma_async_tx_descriptor *k3_dma_prep_memcpy( 416 struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, 417 size_t len, unsigned long flags) 418 { 419 struct k3_dma_chan *c = to_k3_chan(chan); 420 struct k3_dma_desc_sw *ds; 421 size_t copy = 0; 422 int num = 0; 423 424 if (!len) 425 return NULL; 426 427 num = DIV_ROUND_UP(len, DMA_MAX_SIZE); 428 ds = kzalloc(sizeof(*ds) + num * sizeof(ds->desc_hw[0]), GFP_ATOMIC); 429 if (!ds) 430 return NULL; 431 432 ds->desc_hw_lli = __virt_to_phys((unsigned long)&ds->desc_hw[0]); 433 ds->size = len; 434 ds->desc_num = num; 435 num = 0; 436 437 if (!c->ccfg) { 438 /* default is memtomem, without calling device_config */ 439 c->ccfg = CX_CFG_SRCINCR | CX_CFG_DSTINCR | CX_CFG_EN; 440 c->ccfg |= (0xf << 20) | (0xf << 24); /* burst = 16 */ 441 c->ccfg |= (0x3 << 12) | (0x3 << 16); /* width = 64 bit */ 442 } 443 444 do { 445 copy = min_t(size_t, len, DMA_MAX_SIZE); 446 k3_dma_fill_desc(ds, dst, src, copy, num++, c->ccfg); 447 448 if (c->dir == DMA_MEM_TO_DEV) { 449 src += copy; 450 } else if (c->dir == DMA_DEV_TO_MEM) { 451 dst += copy; 452 } else { 453 src += copy; 454 dst += copy; 455 } 456 len -= copy; 457 } while (len); 458 459 ds->desc_hw[num-1].lli = 0; /* end of link */ 460 return vchan_tx_prep(&c->vc, &ds->vd, flags); 461 } 462 463 static struct dma_async_tx_descriptor *k3_dma_prep_slave_sg( 464 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sglen, 465 enum dma_transfer_direction dir, unsigned long flags, void *context) 466 { 467 struct k3_dma_chan *c = to_k3_chan(chan); 468 struct k3_dma_desc_sw *ds; 469 size_t len, avail, total = 0; 470 struct scatterlist *sg; 471 dma_addr_t addr, src = 0, dst = 0; 472 int num = sglen, i; 473 474 if (sgl == NULL) 475 return NULL; 476 477 for_each_sg(sgl, sg, sglen, i) { 478 avail = sg_dma_len(sg); 479 if (avail > DMA_MAX_SIZE) 480 num += DIV_ROUND_UP(avail, DMA_MAX_SIZE) - 1; 481 } 482 483 ds = kzalloc(sizeof(*ds) + num * sizeof(ds->desc_hw[0]), GFP_ATOMIC); 484 if (!ds) 485 return NULL; 486 487 ds->desc_hw_lli = __virt_to_phys((unsigned long)&ds->desc_hw[0]); 488 ds->desc_num = num; 489 num = 0; 490 491 for_each_sg(sgl, sg, sglen, i) { 492 addr = sg_dma_address(sg); 493 avail = sg_dma_len(sg); 494 total += avail; 495 496 do { 497 len = min_t(size_t, avail, DMA_MAX_SIZE); 498 499 if (dir == DMA_MEM_TO_DEV) { 500 src = addr; 501 dst = c->dev_addr; 502 } else if (dir == DMA_DEV_TO_MEM) { 503 src = c->dev_addr; 504 dst = addr; 505 } 506 507 k3_dma_fill_desc(ds, dst, src, len, num++, c->ccfg); 508 509 addr += len; 510 avail -= len; 511 } while (avail); 512 } 513 514 ds->desc_hw[num-1].lli = 0; /* end of link */ 515 ds->size = total; 516 return vchan_tx_prep(&c->vc, &ds->vd, flags); 517 } 518 519 static int k3_dma_config(struct dma_chan *chan, 520 struct dma_slave_config *cfg) 521 { 522 struct k3_dma_chan *c = to_k3_chan(chan); 523 u32 maxburst = 0, val = 0; 524 enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED; 525 526 if (cfg == NULL) 527 return -EINVAL; 528 c->dir = cfg->direction; 529 if (c->dir == DMA_DEV_TO_MEM) { 530 c->ccfg = CX_CFG_DSTINCR; 531 c->dev_addr = cfg->src_addr; 532 maxburst = cfg->src_maxburst; 533 width = cfg->src_addr_width; 534 } else if (c->dir == DMA_MEM_TO_DEV) { 535 c->ccfg = CX_CFG_SRCINCR; 536 c->dev_addr = cfg->dst_addr; 537 maxburst = cfg->dst_maxburst; 538 width = cfg->dst_addr_width; 539 } 540 switch (width) { 541 case DMA_SLAVE_BUSWIDTH_1_BYTE: 542 case DMA_SLAVE_BUSWIDTH_2_BYTES: 543 case DMA_SLAVE_BUSWIDTH_4_BYTES: 544 case DMA_SLAVE_BUSWIDTH_8_BYTES: 545 val = __ffs(width); 546 break; 547 default: 548 val = 3; 549 break; 550 } 551 c->ccfg |= (val << 12) | (val << 16); 552 553 if ((maxburst == 0) || (maxburst > 16)) 554 val = 16; 555 else 556 val = maxburst - 1; 557 c->ccfg |= (val << 20) | (val << 24); 558 c->ccfg |= CX_CFG_MEM2PER | CX_CFG_EN; 559 560 /* specific request line */ 561 c->ccfg |= c->vc.chan.chan_id << 4; 562 563 return 0; 564 } 565 566 static int k3_dma_terminate_all(struct dma_chan *chan) 567 { 568 struct k3_dma_chan *c = to_k3_chan(chan); 569 struct k3_dma_dev *d = to_k3_dma(chan->device); 570 struct k3_dma_phy *p = c->phy; 571 unsigned long flags; 572 LIST_HEAD(head); 573 574 dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc); 575 576 /* Prevent this channel being scheduled */ 577 spin_lock(&d->lock); 578 list_del_init(&c->node); 579 spin_unlock(&d->lock); 580 581 /* Clear the tx descriptor lists */ 582 spin_lock_irqsave(&c->vc.lock, flags); 583 vchan_get_all_descriptors(&c->vc, &head); 584 if (p) { 585 /* vchan is assigned to a pchan - stop the channel */ 586 k3_dma_terminate_chan(p, d); 587 c->phy = NULL; 588 p->vchan = NULL; 589 p->ds_run = p->ds_done = NULL; 590 } 591 spin_unlock_irqrestore(&c->vc.lock, flags); 592 vchan_dma_desc_free_list(&c->vc, &head); 593 594 return 0; 595 } 596 597 static int k3_dma_transfer_pause(struct dma_chan *chan) 598 { 599 struct k3_dma_chan *c = to_k3_chan(chan); 600 struct k3_dma_dev *d = to_k3_dma(chan->device); 601 struct k3_dma_phy *p = c->phy; 602 603 dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc); 604 if (c->status == DMA_IN_PROGRESS) { 605 c->status = DMA_PAUSED; 606 if (p) { 607 k3_dma_pause_dma(p, false); 608 } else { 609 spin_lock(&d->lock); 610 list_del_init(&c->node); 611 spin_unlock(&d->lock); 612 } 613 } 614 615 return 0; 616 } 617 618 static int k3_dma_transfer_resume(struct dma_chan *chan) 619 { 620 struct k3_dma_chan *c = to_k3_chan(chan); 621 struct k3_dma_dev *d = to_k3_dma(chan->device); 622 struct k3_dma_phy *p = c->phy; 623 unsigned long flags; 624 625 dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc); 626 spin_lock_irqsave(&c->vc.lock, flags); 627 if (c->status == DMA_PAUSED) { 628 c->status = DMA_IN_PROGRESS; 629 if (p) { 630 k3_dma_pause_dma(p, true); 631 } else if (!list_empty(&c->vc.desc_issued)) { 632 spin_lock(&d->lock); 633 list_add_tail(&c->node, &d->chan_pending); 634 spin_unlock(&d->lock); 635 } 636 } 637 spin_unlock_irqrestore(&c->vc.lock, flags); 638 639 return 0; 640 } 641 642 static void k3_dma_free_desc(struct virt_dma_desc *vd) 643 { 644 struct k3_dma_desc_sw *ds = 645 container_of(vd, struct k3_dma_desc_sw, vd); 646 647 kfree(ds); 648 } 649 650 static const struct of_device_id k3_pdma_dt_ids[] = { 651 { .compatible = "hisilicon,k3-dma-1.0", }, 652 {} 653 }; 654 MODULE_DEVICE_TABLE(of, k3_pdma_dt_ids); 655 656 static struct dma_chan *k3_of_dma_simple_xlate(struct of_phandle_args *dma_spec, 657 struct of_dma *ofdma) 658 { 659 struct k3_dma_dev *d = ofdma->of_dma_data; 660 unsigned int request = dma_spec->args[0]; 661 662 if (request > d->dma_requests) 663 return NULL; 664 665 return dma_get_slave_channel(&(d->chans[request].vc.chan)); 666 } 667 668 static int k3_dma_probe(struct platform_device *op) 669 { 670 struct k3_dma_dev *d; 671 const struct of_device_id *of_id; 672 struct resource *iores; 673 int i, ret, irq = 0; 674 675 iores = platform_get_resource(op, IORESOURCE_MEM, 0); 676 if (!iores) 677 return -EINVAL; 678 679 d = devm_kzalloc(&op->dev, sizeof(*d), GFP_KERNEL); 680 if (!d) 681 return -ENOMEM; 682 683 d->base = devm_ioremap_resource(&op->dev, iores); 684 if (IS_ERR(d->base)) 685 return PTR_ERR(d->base); 686 687 of_id = of_match_device(k3_pdma_dt_ids, &op->dev); 688 if (of_id) { 689 of_property_read_u32((&op->dev)->of_node, 690 "dma-channels", &d->dma_channels); 691 of_property_read_u32((&op->dev)->of_node, 692 "dma-requests", &d->dma_requests); 693 } 694 695 d->clk = devm_clk_get(&op->dev, NULL); 696 if (IS_ERR(d->clk)) { 697 dev_err(&op->dev, "no dma clk\n"); 698 return PTR_ERR(d->clk); 699 } 700 701 irq = platform_get_irq(op, 0); 702 ret = devm_request_irq(&op->dev, irq, 703 k3_dma_int_handler, 0, DRIVER_NAME, d); 704 if (ret) 705 return ret; 706 707 d->irq = irq; 708 709 /* init phy channel */ 710 d->phy = devm_kzalloc(&op->dev, 711 d->dma_channels * sizeof(struct k3_dma_phy), GFP_KERNEL); 712 if (d->phy == NULL) 713 return -ENOMEM; 714 715 for (i = 0; i < d->dma_channels; i++) { 716 struct k3_dma_phy *p = &d->phy[i]; 717 718 p->idx = i; 719 p->base = d->base + i * 0x40; 720 } 721 722 INIT_LIST_HEAD(&d->slave.channels); 723 dma_cap_set(DMA_SLAVE, d->slave.cap_mask); 724 dma_cap_set(DMA_MEMCPY, d->slave.cap_mask); 725 d->slave.dev = &op->dev; 726 d->slave.device_free_chan_resources = k3_dma_free_chan_resources; 727 d->slave.device_tx_status = k3_dma_tx_status; 728 d->slave.device_prep_dma_memcpy = k3_dma_prep_memcpy; 729 d->slave.device_prep_slave_sg = k3_dma_prep_slave_sg; 730 d->slave.device_issue_pending = k3_dma_issue_pending; 731 d->slave.device_config = k3_dma_config; 732 d->slave.device_pause = k3_dma_transfer_pause; 733 d->slave.device_resume = k3_dma_transfer_resume; 734 d->slave.device_terminate_all = k3_dma_terminate_all; 735 d->slave.copy_align = DMAENGINE_ALIGN_8_BYTES; 736 737 /* init virtual channel */ 738 d->chans = devm_kzalloc(&op->dev, 739 d->dma_requests * sizeof(struct k3_dma_chan), GFP_KERNEL); 740 if (d->chans == NULL) 741 return -ENOMEM; 742 743 for (i = 0; i < d->dma_requests; i++) { 744 struct k3_dma_chan *c = &d->chans[i]; 745 746 c->status = DMA_IN_PROGRESS; 747 INIT_LIST_HEAD(&c->node); 748 c->vc.desc_free = k3_dma_free_desc; 749 vchan_init(&c->vc, &d->slave); 750 } 751 752 /* Enable clock before accessing registers */ 753 ret = clk_prepare_enable(d->clk); 754 if (ret < 0) { 755 dev_err(&op->dev, "clk_prepare_enable failed: %d\n", ret); 756 return ret; 757 } 758 759 k3_dma_enable_dma(d, true); 760 761 ret = dma_async_device_register(&d->slave); 762 if (ret) 763 goto dma_async_register_fail; 764 765 ret = of_dma_controller_register((&op->dev)->of_node, 766 k3_of_dma_simple_xlate, d); 767 if (ret) 768 goto of_dma_register_fail; 769 770 spin_lock_init(&d->lock); 771 INIT_LIST_HEAD(&d->chan_pending); 772 tasklet_init(&d->task, k3_dma_tasklet, (unsigned long)d); 773 platform_set_drvdata(op, d); 774 dev_info(&op->dev, "initialized\n"); 775 776 return 0; 777 778 of_dma_register_fail: 779 dma_async_device_unregister(&d->slave); 780 dma_async_register_fail: 781 clk_disable_unprepare(d->clk); 782 return ret; 783 } 784 785 static int k3_dma_remove(struct platform_device *op) 786 { 787 struct k3_dma_chan *c, *cn; 788 struct k3_dma_dev *d = platform_get_drvdata(op); 789 790 dma_async_device_unregister(&d->slave); 791 of_dma_controller_free((&op->dev)->of_node); 792 793 devm_free_irq(&op->dev, d->irq, d); 794 795 list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) { 796 list_del(&c->vc.chan.device_node); 797 tasklet_kill(&c->vc.task); 798 } 799 tasklet_kill(&d->task); 800 clk_disable_unprepare(d->clk); 801 return 0; 802 } 803 804 #ifdef CONFIG_PM_SLEEP 805 static int k3_dma_suspend_dev(struct device *dev) 806 { 807 struct k3_dma_dev *d = dev_get_drvdata(dev); 808 u32 stat = 0; 809 810 stat = k3_dma_get_chan_stat(d); 811 if (stat) { 812 dev_warn(d->slave.dev, 813 "chan %d is running fail to suspend\n", stat); 814 return -1; 815 } 816 k3_dma_enable_dma(d, false); 817 clk_disable_unprepare(d->clk); 818 return 0; 819 } 820 821 static int k3_dma_resume_dev(struct device *dev) 822 { 823 struct k3_dma_dev *d = dev_get_drvdata(dev); 824 int ret = 0; 825 826 ret = clk_prepare_enable(d->clk); 827 if (ret < 0) { 828 dev_err(d->slave.dev, "clk_prepare_enable failed: %d\n", ret); 829 return ret; 830 } 831 k3_dma_enable_dma(d, true); 832 return 0; 833 } 834 #endif 835 836 static SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend_dev, k3_dma_resume_dev); 837 838 static struct platform_driver k3_pdma_driver = { 839 .driver = { 840 .name = DRIVER_NAME, 841 .pm = &k3_dma_pmops, 842 .of_match_table = k3_pdma_dt_ids, 843 }, 844 .probe = k3_dma_probe, 845 .remove = k3_dma_remove, 846 }; 847 848 module_platform_driver(k3_pdma_driver); 849 850 MODULE_DESCRIPTION("Hisilicon k3 DMA Driver"); 851 MODULE_ALIAS("platform:k3dma"); 852 MODULE_LICENSE("GPL v2"); 853