1 /* 2 * Copyright (c) 2013 - 2015 Linaro Ltd. 3 * Copyright (c) 2013 Hisilicon Limited. 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License version 2 as 7 * published by the Free Software Foundation. 8 */ 9 #include <linux/sched.h> 10 #include <linux/device.h> 11 #include <linux/dma-mapping.h> 12 #include <linux/dmapool.h> 13 #include <linux/dmaengine.h> 14 #include <linux/init.h> 15 #include <linux/interrupt.h> 16 #include <linux/kernel.h> 17 #include <linux/module.h> 18 #include <linux/platform_device.h> 19 #include <linux/slab.h> 20 #include <linux/spinlock.h> 21 #include <linux/of_device.h> 22 #include <linux/of.h> 23 #include <linux/clk.h> 24 #include <linux/of_dma.h> 25 26 #include "virt-dma.h" 27 28 #define DRIVER_NAME "k3-dma" 29 #define DMA_MAX_SIZE 0x1ffc 30 #define DMA_CYCLIC_MAX_PERIOD 0x1000 31 #define LLI_BLOCK_SIZE (4 * PAGE_SIZE) 32 33 #define INT_STAT 0x00 34 #define INT_TC1 0x04 35 #define INT_TC2 0x08 36 #define INT_ERR1 0x0c 37 #define INT_ERR2 0x10 38 #define INT_TC1_MASK 0x18 39 #define INT_TC2_MASK 0x1c 40 #define INT_ERR1_MASK 0x20 41 #define INT_ERR2_MASK 0x24 42 #define INT_TC1_RAW 0x600 43 #define INT_TC2_RAW 0x608 44 #define INT_ERR1_RAW 0x610 45 #define INT_ERR2_RAW 0x618 46 #define CH_PRI 0x688 47 #define CH_STAT 0x690 48 #define CX_CUR_CNT 0x704 49 #define CX_LLI 0x800 50 #define CX_CNT1 0x80c 51 #define CX_CNT0 0x810 52 #define CX_SRC 0x814 53 #define CX_DST 0x818 54 #define CX_CFG 0x81c 55 #define AXI_CFG 0x820 56 #define AXI_CFG_DEFAULT 0x201201 57 58 #define CX_LLI_CHAIN_EN 0x2 59 #define CX_CFG_EN 0x1 60 #define CX_CFG_NODEIRQ BIT(1) 61 #define CX_CFG_MEM2PER (0x1 << 2) 62 #define CX_CFG_PER2MEM (0x2 << 2) 63 #define CX_CFG_SRCINCR (0x1 << 31) 64 #define CX_CFG_DSTINCR (0x1 << 30) 65 66 struct k3_desc_hw { 67 u32 lli; 68 u32 reserved[3]; 69 u32 count; 70 u32 saddr; 71 u32 daddr; 72 u32 config; 73 } __aligned(32); 74 75 struct k3_dma_desc_sw { 76 struct virt_dma_desc vd; 77 dma_addr_t desc_hw_lli; 78 size_t desc_num; 79 size_t size; 80 struct k3_desc_hw *desc_hw; 81 }; 82 83 struct k3_dma_phy; 84 85 struct k3_dma_chan { 86 u32 ccfg; 87 struct virt_dma_chan vc; 88 struct k3_dma_phy *phy; 89 struct list_head node; 90 enum dma_transfer_direction dir; 91 dma_addr_t dev_addr; 92 enum dma_status status; 93 bool cyclic; 94 }; 95 96 struct k3_dma_phy { 97 u32 idx; 98 void __iomem *base; 99 struct k3_dma_chan *vchan; 100 struct k3_dma_desc_sw *ds_run; 101 struct k3_dma_desc_sw *ds_done; 102 }; 103 104 struct k3_dma_dev { 105 struct dma_device slave; 106 void __iomem *base; 107 struct tasklet_struct task; 108 spinlock_t lock; 109 struct list_head chan_pending; 110 struct k3_dma_phy *phy; 111 struct k3_dma_chan *chans; 112 struct clk *clk; 113 struct dma_pool *pool; 114 u32 dma_channels; 115 u32 dma_requests; 116 unsigned int irq; 117 }; 118 119 #define to_k3_dma(dmadev) container_of(dmadev, struct k3_dma_dev, slave) 120 121 static struct k3_dma_chan *to_k3_chan(struct dma_chan *chan) 122 { 123 return container_of(chan, struct k3_dma_chan, vc.chan); 124 } 125 126 static void k3_dma_pause_dma(struct k3_dma_phy *phy, bool on) 127 { 128 u32 val = 0; 129 130 if (on) { 131 val = readl_relaxed(phy->base + CX_CFG); 132 val |= CX_CFG_EN; 133 writel_relaxed(val, phy->base + CX_CFG); 134 } else { 135 val = readl_relaxed(phy->base + CX_CFG); 136 val &= ~CX_CFG_EN; 137 writel_relaxed(val, phy->base + CX_CFG); 138 } 139 } 140 141 static void k3_dma_terminate_chan(struct k3_dma_phy *phy, struct k3_dma_dev *d) 142 { 143 u32 val = 0; 144 145 k3_dma_pause_dma(phy, false); 146 147 val = 0x1 << phy->idx; 148 writel_relaxed(val, d->base + INT_TC1_RAW); 149 writel_relaxed(val, d->base + INT_TC2_RAW); 150 writel_relaxed(val, d->base + INT_ERR1_RAW); 151 writel_relaxed(val, d->base + INT_ERR2_RAW); 152 } 153 154 static void k3_dma_set_desc(struct k3_dma_phy *phy, struct k3_desc_hw *hw) 155 { 156 writel_relaxed(hw->lli, phy->base + CX_LLI); 157 writel_relaxed(hw->count, phy->base + CX_CNT0); 158 writel_relaxed(hw->saddr, phy->base + CX_SRC); 159 writel_relaxed(hw->daddr, phy->base + CX_DST); 160 writel_relaxed(AXI_CFG_DEFAULT, phy->base + AXI_CFG); 161 writel_relaxed(hw->config, phy->base + CX_CFG); 162 } 163 164 static u32 k3_dma_get_curr_cnt(struct k3_dma_dev *d, struct k3_dma_phy *phy) 165 { 166 u32 cnt = 0; 167 168 cnt = readl_relaxed(d->base + CX_CUR_CNT + phy->idx * 0x10); 169 cnt &= 0xffff; 170 return cnt; 171 } 172 173 static u32 k3_dma_get_curr_lli(struct k3_dma_phy *phy) 174 { 175 return readl_relaxed(phy->base + CX_LLI); 176 } 177 178 static u32 k3_dma_get_chan_stat(struct k3_dma_dev *d) 179 { 180 return readl_relaxed(d->base + CH_STAT); 181 } 182 183 static void k3_dma_enable_dma(struct k3_dma_dev *d, bool on) 184 { 185 if (on) { 186 /* set same priority */ 187 writel_relaxed(0x0, d->base + CH_PRI); 188 189 /* unmask irq */ 190 writel_relaxed(0xffff, d->base + INT_TC1_MASK); 191 writel_relaxed(0xffff, d->base + INT_TC2_MASK); 192 writel_relaxed(0xffff, d->base + INT_ERR1_MASK); 193 writel_relaxed(0xffff, d->base + INT_ERR2_MASK); 194 } else { 195 /* mask irq */ 196 writel_relaxed(0x0, d->base + INT_TC1_MASK); 197 writel_relaxed(0x0, d->base + INT_TC2_MASK); 198 writel_relaxed(0x0, d->base + INT_ERR1_MASK); 199 writel_relaxed(0x0, d->base + INT_ERR2_MASK); 200 } 201 } 202 203 static irqreturn_t k3_dma_int_handler(int irq, void *dev_id) 204 { 205 struct k3_dma_dev *d = (struct k3_dma_dev *)dev_id; 206 struct k3_dma_phy *p; 207 struct k3_dma_chan *c; 208 u32 stat = readl_relaxed(d->base + INT_STAT); 209 u32 tc1 = readl_relaxed(d->base + INT_TC1); 210 u32 tc2 = readl_relaxed(d->base + INT_TC2); 211 u32 err1 = readl_relaxed(d->base + INT_ERR1); 212 u32 err2 = readl_relaxed(d->base + INT_ERR2); 213 u32 i, irq_chan = 0; 214 215 while (stat) { 216 i = __ffs(stat); 217 stat &= ~BIT(i); 218 if (likely(tc1 & BIT(i)) || (tc2 & BIT(i))) { 219 unsigned long flags; 220 221 p = &d->phy[i]; 222 c = p->vchan; 223 if (c && (tc1 & BIT(i))) { 224 spin_lock_irqsave(&c->vc.lock, flags); 225 vchan_cookie_complete(&p->ds_run->vd); 226 WARN_ON_ONCE(p->ds_done); 227 p->ds_done = p->ds_run; 228 p->ds_run = NULL; 229 spin_unlock_irqrestore(&c->vc.lock, flags); 230 } 231 if (c && (tc2 & BIT(i))) { 232 spin_lock_irqsave(&c->vc.lock, flags); 233 if (p->ds_run != NULL) 234 vchan_cyclic_callback(&p->ds_run->vd); 235 spin_unlock_irqrestore(&c->vc.lock, flags); 236 } 237 irq_chan |= BIT(i); 238 } 239 if (unlikely((err1 & BIT(i)) || (err2 & BIT(i)))) 240 dev_warn(d->slave.dev, "DMA ERR\n"); 241 } 242 243 writel_relaxed(irq_chan, d->base + INT_TC1_RAW); 244 writel_relaxed(irq_chan, d->base + INT_TC2_RAW); 245 writel_relaxed(err1, d->base + INT_ERR1_RAW); 246 writel_relaxed(err2, d->base + INT_ERR2_RAW); 247 248 if (irq_chan) 249 tasklet_schedule(&d->task); 250 251 if (irq_chan || err1 || err2) 252 return IRQ_HANDLED; 253 254 return IRQ_NONE; 255 } 256 257 static int k3_dma_start_txd(struct k3_dma_chan *c) 258 { 259 struct k3_dma_dev *d = to_k3_dma(c->vc.chan.device); 260 struct virt_dma_desc *vd = vchan_next_desc(&c->vc); 261 262 if (!c->phy) 263 return -EAGAIN; 264 265 if (BIT(c->phy->idx) & k3_dma_get_chan_stat(d)) 266 return -EAGAIN; 267 268 if (vd) { 269 struct k3_dma_desc_sw *ds = 270 container_of(vd, struct k3_dma_desc_sw, vd); 271 /* 272 * fetch and remove request from vc->desc_issued 273 * so vc->desc_issued only contains desc pending 274 */ 275 list_del(&ds->vd.node); 276 277 WARN_ON_ONCE(c->phy->ds_run); 278 WARN_ON_ONCE(c->phy->ds_done); 279 c->phy->ds_run = ds; 280 /* start dma */ 281 k3_dma_set_desc(c->phy, &ds->desc_hw[0]); 282 return 0; 283 } 284 return -EAGAIN; 285 } 286 287 static void k3_dma_tasklet(unsigned long arg) 288 { 289 struct k3_dma_dev *d = (struct k3_dma_dev *)arg; 290 struct k3_dma_phy *p; 291 struct k3_dma_chan *c, *cn; 292 unsigned pch, pch_alloc = 0; 293 294 /* check new dma request of running channel in vc->desc_issued */ 295 list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) { 296 spin_lock_irq(&c->vc.lock); 297 p = c->phy; 298 if (p && p->ds_done) { 299 if (k3_dma_start_txd(c)) { 300 /* No current txd associated with this channel */ 301 dev_dbg(d->slave.dev, "pchan %u: free\n", p->idx); 302 /* Mark this channel free */ 303 c->phy = NULL; 304 p->vchan = NULL; 305 } 306 } 307 spin_unlock_irq(&c->vc.lock); 308 } 309 310 /* check new channel request in d->chan_pending */ 311 spin_lock_irq(&d->lock); 312 for (pch = 0; pch < d->dma_channels; pch++) { 313 p = &d->phy[pch]; 314 315 if (p->vchan == NULL && !list_empty(&d->chan_pending)) { 316 c = list_first_entry(&d->chan_pending, 317 struct k3_dma_chan, node); 318 /* remove from d->chan_pending */ 319 list_del_init(&c->node); 320 pch_alloc |= 1 << pch; 321 /* Mark this channel allocated */ 322 p->vchan = c; 323 c->phy = p; 324 dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc); 325 } 326 } 327 spin_unlock_irq(&d->lock); 328 329 for (pch = 0; pch < d->dma_channels; pch++) { 330 if (pch_alloc & (1 << pch)) { 331 p = &d->phy[pch]; 332 c = p->vchan; 333 if (c) { 334 spin_lock_irq(&c->vc.lock); 335 k3_dma_start_txd(c); 336 spin_unlock_irq(&c->vc.lock); 337 } 338 } 339 } 340 } 341 342 static void k3_dma_free_chan_resources(struct dma_chan *chan) 343 { 344 struct k3_dma_chan *c = to_k3_chan(chan); 345 struct k3_dma_dev *d = to_k3_dma(chan->device); 346 unsigned long flags; 347 348 spin_lock_irqsave(&d->lock, flags); 349 list_del_init(&c->node); 350 spin_unlock_irqrestore(&d->lock, flags); 351 352 vchan_free_chan_resources(&c->vc); 353 c->ccfg = 0; 354 } 355 356 static enum dma_status k3_dma_tx_status(struct dma_chan *chan, 357 dma_cookie_t cookie, struct dma_tx_state *state) 358 { 359 struct k3_dma_chan *c = to_k3_chan(chan); 360 struct k3_dma_dev *d = to_k3_dma(chan->device); 361 struct k3_dma_phy *p; 362 struct virt_dma_desc *vd; 363 unsigned long flags; 364 enum dma_status ret; 365 size_t bytes = 0; 366 367 ret = dma_cookie_status(&c->vc.chan, cookie, state); 368 if (ret == DMA_COMPLETE) 369 return ret; 370 371 spin_lock_irqsave(&c->vc.lock, flags); 372 p = c->phy; 373 ret = c->status; 374 375 /* 376 * If the cookie is on our issue queue, then the residue is 377 * its total size. 378 */ 379 vd = vchan_find_desc(&c->vc, cookie); 380 if (vd && !c->cyclic) { 381 bytes = container_of(vd, struct k3_dma_desc_sw, vd)->size; 382 } else if ((!p) || (!p->ds_run)) { 383 bytes = 0; 384 } else { 385 struct k3_dma_desc_sw *ds = p->ds_run; 386 u32 clli = 0, index = 0; 387 388 bytes = k3_dma_get_curr_cnt(d, p); 389 clli = k3_dma_get_curr_lli(p); 390 index = ((clli - ds->desc_hw_lli) / 391 sizeof(struct k3_desc_hw)) + 1; 392 for (; index < ds->desc_num; index++) { 393 bytes += ds->desc_hw[index].count; 394 /* end of lli */ 395 if (!ds->desc_hw[index].lli) 396 break; 397 } 398 } 399 spin_unlock_irqrestore(&c->vc.lock, flags); 400 dma_set_residue(state, bytes); 401 return ret; 402 } 403 404 static void k3_dma_issue_pending(struct dma_chan *chan) 405 { 406 struct k3_dma_chan *c = to_k3_chan(chan); 407 struct k3_dma_dev *d = to_k3_dma(chan->device); 408 unsigned long flags; 409 410 spin_lock_irqsave(&c->vc.lock, flags); 411 /* add request to vc->desc_issued */ 412 if (vchan_issue_pending(&c->vc)) { 413 spin_lock(&d->lock); 414 if (!c->phy) { 415 if (list_empty(&c->node)) { 416 /* if new channel, add chan_pending */ 417 list_add_tail(&c->node, &d->chan_pending); 418 /* check in tasklet */ 419 tasklet_schedule(&d->task); 420 dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc); 421 } 422 } 423 spin_unlock(&d->lock); 424 } else 425 dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc); 426 spin_unlock_irqrestore(&c->vc.lock, flags); 427 } 428 429 static void k3_dma_fill_desc(struct k3_dma_desc_sw *ds, dma_addr_t dst, 430 dma_addr_t src, size_t len, u32 num, u32 ccfg) 431 { 432 if (num != ds->desc_num - 1) 433 ds->desc_hw[num].lli = ds->desc_hw_lli + (num + 1) * 434 sizeof(struct k3_desc_hw); 435 436 ds->desc_hw[num].lli |= CX_LLI_CHAIN_EN; 437 ds->desc_hw[num].count = len; 438 ds->desc_hw[num].saddr = src; 439 ds->desc_hw[num].daddr = dst; 440 ds->desc_hw[num].config = ccfg; 441 } 442 443 static struct k3_dma_desc_sw *k3_dma_alloc_desc_resource(int num, 444 struct dma_chan *chan) 445 { 446 struct k3_dma_chan *c = to_k3_chan(chan); 447 struct k3_dma_desc_sw *ds; 448 struct k3_dma_dev *d = to_k3_dma(chan->device); 449 int lli_limit = LLI_BLOCK_SIZE / sizeof(struct k3_desc_hw); 450 451 if (num > lli_limit) { 452 dev_dbg(chan->device->dev, "vch %p: sg num %d exceed max %d\n", 453 &c->vc, num, lli_limit); 454 return NULL; 455 } 456 457 ds = kzalloc(sizeof(*ds), GFP_NOWAIT); 458 if (!ds) 459 return NULL; 460 461 ds->desc_hw = dma_pool_alloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli); 462 if (!ds->desc_hw) { 463 dev_dbg(chan->device->dev, "vch %p: dma alloc fail\n", &c->vc); 464 kfree(ds); 465 return NULL; 466 } 467 memset(ds->desc_hw, 0, sizeof(struct k3_desc_hw) * num); 468 ds->desc_num = num; 469 return ds; 470 } 471 472 static struct dma_async_tx_descriptor *k3_dma_prep_memcpy( 473 struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, 474 size_t len, unsigned long flags) 475 { 476 struct k3_dma_chan *c = to_k3_chan(chan); 477 struct k3_dma_desc_sw *ds; 478 size_t copy = 0; 479 int num = 0; 480 481 if (!len) 482 return NULL; 483 484 num = DIV_ROUND_UP(len, DMA_MAX_SIZE); 485 486 ds = k3_dma_alloc_desc_resource(num, chan); 487 if (!ds) 488 return NULL; 489 490 c->cyclic = 0; 491 ds->size = len; 492 num = 0; 493 494 if (!c->ccfg) { 495 /* default is memtomem, without calling device_config */ 496 c->ccfg = CX_CFG_SRCINCR | CX_CFG_DSTINCR | CX_CFG_EN; 497 c->ccfg |= (0xf << 20) | (0xf << 24); /* burst = 16 */ 498 c->ccfg |= (0x3 << 12) | (0x3 << 16); /* width = 64 bit */ 499 } 500 501 do { 502 copy = min_t(size_t, len, DMA_MAX_SIZE); 503 k3_dma_fill_desc(ds, dst, src, copy, num++, c->ccfg); 504 505 if (c->dir == DMA_MEM_TO_DEV) { 506 src += copy; 507 } else if (c->dir == DMA_DEV_TO_MEM) { 508 dst += copy; 509 } else { 510 src += copy; 511 dst += copy; 512 } 513 len -= copy; 514 } while (len); 515 516 ds->desc_hw[num-1].lli = 0; /* end of link */ 517 return vchan_tx_prep(&c->vc, &ds->vd, flags); 518 } 519 520 static struct dma_async_tx_descriptor *k3_dma_prep_slave_sg( 521 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sglen, 522 enum dma_transfer_direction dir, unsigned long flags, void *context) 523 { 524 struct k3_dma_chan *c = to_k3_chan(chan); 525 struct k3_dma_desc_sw *ds; 526 size_t len, avail, total = 0; 527 struct scatterlist *sg; 528 dma_addr_t addr, src = 0, dst = 0; 529 int num = sglen, i; 530 531 if (sgl == NULL) 532 return NULL; 533 534 c->cyclic = 0; 535 536 for_each_sg(sgl, sg, sglen, i) { 537 avail = sg_dma_len(sg); 538 if (avail > DMA_MAX_SIZE) 539 num += DIV_ROUND_UP(avail, DMA_MAX_SIZE) - 1; 540 } 541 542 ds = k3_dma_alloc_desc_resource(num, chan); 543 if (!ds) 544 return NULL; 545 num = 0; 546 547 for_each_sg(sgl, sg, sglen, i) { 548 addr = sg_dma_address(sg); 549 avail = sg_dma_len(sg); 550 total += avail; 551 552 do { 553 len = min_t(size_t, avail, DMA_MAX_SIZE); 554 555 if (dir == DMA_MEM_TO_DEV) { 556 src = addr; 557 dst = c->dev_addr; 558 } else if (dir == DMA_DEV_TO_MEM) { 559 src = c->dev_addr; 560 dst = addr; 561 } 562 563 k3_dma_fill_desc(ds, dst, src, len, num++, c->ccfg); 564 565 addr += len; 566 avail -= len; 567 } while (avail); 568 } 569 570 ds->desc_hw[num-1].lli = 0; /* end of link */ 571 ds->size = total; 572 return vchan_tx_prep(&c->vc, &ds->vd, flags); 573 } 574 575 static struct dma_async_tx_descriptor * 576 k3_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, 577 size_t buf_len, size_t period_len, 578 enum dma_transfer_direction dir, 579 unsigned long flags) 580 { 581 struct k3_dma_chan *c = to_k3_chan(chan); 582 struct k3_dma_desc_sw *ds; 583 size_t len, avail, total = 0; 584 dma_addr_t addr, src = 0, dst = 0; 585 int num = 1, since = 0; 586 size_t modulo = DMA_CYCLIC_MAX_PERIOD; 587 u32 en_tc2 = 0; 588 589 dev_dbg(chan->device->dev, "%s: buf %pad, dst %pad, buf len %zu, period_len = %zu, dir %d\n", 590 __func__, &buf_addr, &to_k3_chan(chan)->dev_addr, 591 buf_len, period_len, (int)dir); 592 593 avail = buf_len; 594 if (avail > modulo) 595 num += DIV_ROUND_UP(avail, modulo) - 1; 596 597 ds = k3_dma_alloc_desc_resource(num, chan); 598 if (!ds) 599 return NULL; 600 601 c->cyclic = 1; 602 addr = buf_addr; 603 avail = buf_len; 604 total = avail; 605 num = 0; 606 607 if (period_len < modulo) 608 modulo = period_len; 609 610 do { 611 len = min_t(size_t, avail, modulo); 612 613 if (dir == DMA_MEM_TO_DEV) { 614 src = addr; 615 dst = c->dev_addr; 616 } else if (dir == DMA_DEV_TO_MEM) { 617 src = c->dev_addr; 618 dst = addr; 619 } 620 since += len; 621 if (since >= period_len) { 622 /* descriptor asks for TC2 interrupt on completion */ 623 en_tc2 = CX_CFG_NODEIRQ; 624 since -= period_len; 625 } else 626 en_tc2 = 0; 627 628 k3_dma_fill_desc(ds, dst, src, len, num++, c->ccfg | en_tc2); 629 630 addr += len; 631 avail -= len; 632 } while (avail); 633 634 /* "Cyclic" == end of link points back to start of link */ 635 ds->desc_hw[num - 1].lli |= ds->desc_hw_lli; 636 637 ds->size = total; 638 639 return vchan_tx_prep(&c->vc, &ds->vd, flags); 640 } 641 642 static int k3_dma_config(struct dma_chan *chan, 643 struct dma_slave_config *cfg) 644 { 645 struct k3_dma_chan *c = to_k3_chan(chan); 646 u32 maxburst = 0, val = 0; 647 enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED; 648 649 if (cfg == NULL) 650 return -EINVAL; 651 c->dir = cfg->direction; 652 if (c->dir == DMA_DEV_TO_MEM) { 653 c->ccfg = CX_CFG_DSTINCR; 654 c->dev_addr = cfg->src_addr; 655 maxburst = cfg->src_maxburst; 656 width = cfg->src_addr_width; 657 } else if (c->dir == DMA_MEM_TO_DEV) { 658 c->ccfg = CX_CFG_SRCINCR; 659 c->dev_addr = cfg->dst_addr; 660 maxburst = cfg->dst_maxburst; 661 width = cfg->dst_addr_width; 662 } 663 switch (width) { 664 case DMA_SLAVE_BUSWIDTH_1_BYTE: 665 case DMA_SLAVE_BUSWIDTH_2_BYTES: 666 case DMA_SLAVE_BUSWIDTH_4_BYTES: 667 case DMA_SLAVE_BUSWIDTH_8_BYTES: 668 val = __ffs(width); 669 break; 670 default: 671 val = 3; 672 break; 673 } 674 c->ccfg |= (val << 12) | (val << 16); 675 676 if ((maxburst == 0) || (maxburst > 16)) 677 val = 15; 678 else 679 val = maxburst - 1; 680 c->ccfg |= (val << 20) | (val << 24); 681 c->ccfg |= CX_CFG_MEM2PER | CX_CFG_EN; 682 683 /* specific request line */ 684 c->ccfg |= c->vc.chan.chan_id << 4; 685 686 return 0; 687 } 688 689 static void k3_dma_free_desc(struct virt_dma_desc *vd) 690 { 691 struct k3_dma_desc_sw *ds = 692 container_of(vd, struct k3_dma_desc_sw, vd); 693 struct k3_dma_dev *d = to_k3_dma(vd->tx.chan->device); 694 695 dma_pool_free(d->pool, ds->desc_hw, ds->desc_hw_lli); 696 kfree(ds); 697 } 698 699 static int k3_dma_terminate_all(struct dma_chan *chan) 700 { 701 struct k3_dma_chan *c = to_k3_chan(chan); 702 struct k3_dma_dev *d = to_k3_dma(chan->device); 703 struct k3_dma_phy *p = c->phy; 704 unsigned long flags; 705 LIST_HEAD(head); 706 707 dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc); 708 709 /* Prevent this channel being scheduled */ 710 spin_lock(&d->lock); 711 list_del_init(&c->node); 712 spin_unlock(&d->lock); 713 714 /* Clear the tx descriptor lists */ 715 spin_lock_irqsave(&c->vc.lock, flags); 716 vchan_get_all_descriptors(&c->vc, &head); 717 if (p) { 718 /* vchan is assigned to a pchan - stop the channel */ 719 k3_dma_terminate_chan(p, d); 720 c->phy = NULL; 721 p->vchan = NULL; 722 if (p->ds_run) { 723 k3_dma_free_desc(&p->ds_run->vd); 724 p->ds_run = NULL; 725 } 726 if (p->ds_done) { 727 k3_dma_free_desc(&p->ds_done->vd); 728 p->ds_done = NULL; 729 } 730 731 } 732 spin_unlock_irqrestore(&c->vc.lock, flags); 733 vchan_dma_desc_free_list(&c->vc, &head); 734 735 return 0; 736 } 737 738 static int k3_dma_transfer_pause(struct dma_chan *chan) 739 { 740 struct k3_dma_chan *c = to_k3_chan(chan); 741 struct k3_dma_dev *d = to_k3_dma(chan->device); 742 struct k3_dma_phy *p = c->phy; 743 744 dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc); 745 if (c->status == DMA_IN_PROGRESS) { 746 c->status = DMA_PAUSED; 747 if (p) { 748 k3_dma_pause_dma(p, false); 749 } else { 750 spin_lock(&d->lock); 751 list_del_init(&c->node); 752 spin_unlock(&d->lock); 753 } 754 } 755 756 return 0; 757 } 758 759 static int k3_dma_transfer_resume(struct dma_chan *chan) 760 { 761 struct k3_dma_chan *c = to_k3_chan(chan); 762 struct k3_dma_dev *d = to_k3_dma(chan->device); 763 struct k3_dma_phy *p = c->phy; 764 unsigned long flags; 765 766 dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc); 767 spin_lock_irqsave(&c->vc.lock, flags); 768 if (c->status == DMA_PAUSED) { 769 c->status = DMA_IN_PROGRESS; 770 if (p) { 771 k3_dma_pause_dma(p, true); 772 } else if (!list_empty(&c->vc.desc_issued)) { 773 spin_lock(&d->lock); 774 list_add_tail(&c->node, &d->chan_pending); 775 spin_unlock(&d->lock); 776 } 777 } 778 spin_unlock_irqrestore(&c->vc.lock, flags); 779 780 return 0; 781 } 782 783 static const struct of_device_id k3_pdma_dt_ids[] = { 784 { .compatible = "hisilicon,k3-dma-1.0", }, 785 {} 786 }; 787 MODULE_DEVICE_TABLE(of, k3_pdma_dt_ids); 788 789 static struct dma_chan *k3_of_dma_simple_xlate(struct of_phandle_args *dma_spec, 790 struct of_dma *ofdma) 791 { 792 struct k3_dma_dev *d = ofdma->of_dma_data; 793 unsigned int request = dma_spec->args[0]; 794 795 if (request > d->dma_requests) 796 return NULL; 797 798 return dma_get_slave_channel(&(d->chans[request].vc.chan)); 799 } 800 801 static int k3_dma_probe(struct platform_device *op) 802 { 803 struct k3_dma_dev *d; 804 const struct of_device_id *of_id; 805 struct resource *iores; 806 int i, ret, irq = 0; 807 808 iores = platform_get_resource(op, IORESOURCE_MEM, 0); 809 if (!iores) 810 return -EINVAL; 811 812 d = devm_kzalloc(&op->dev, sizeof(*d), GFP_KERNEL); 813 if (!d) 814 return -ENOMEM; 815 816 d->base = devm_ioremap_resource(&op->dev, iores); 817 if (IS_ERR(d->base)) 818 return PTR_ERR(d->base); 819 820 of_id = of_match_device(k3_pdma_dt_ids, &op->dev); 821 if (of_id) { 822 of_property_read_u32((&op->dev)->of_node, 823 "dma-channels", &d->dma_channels); 824 of_property_read_u32((&op->dev)->of_node, 825 "dma-requests", &d->dma_requests); 826 } 827 828 d->clk = devm_clk_get(&op->dev, NULL); 829 if (IS_ERR(d->clk)) { 830 dev_err(&op->dev, "no dma clk\n"); 831 return PTR_ERR(d->clk); 832 } 833 834 irq = platform_get_irq(op, 0); 835 ret = devm_request_irq(&op->dev, irq, 836 k3_dma_int_handler, 0, DRIVER_NAME, d); 837 if (ret) 838 return ret; 839 840 d->irq = irq; 841 842 /* A DMA memory pool for LLIs, align on 32-byte boundary */ 843 d->pool = dmam_pool_create(DRIVER_NAME, &op->dev, 844 LLI_BLOCK_SIZE, 32, 0); 845 if (!d->pool) 846 return -ENOMEM; 847 848 /* init phy channel */ 849 d->phy = devm_kzalloc(&op->dev, 850 d->dma_channels * sizeof(struct k3_dma_phy), GFP_KERNEL); 851 if (d->phy == NULL) 852 return -ENOMEM; 853 854 for (i = 0; i < d->dma_channels; i++) { 855 struct k3_dma_phy *p = &d->phy[i]; 856 857 p->idx = i; 858 p->base = d->base + i * 0x40; 859 } 860 861 INIT_LIST_HEAD(&d->slave.channels); 862 dma_cap_set(DMA_SLAVE, d->slave.cap_mask); 863 dma_cap_set(DMA_MEMCPY, d->slave.cap_mask); 864 dma_cap_set(DMA_CYCLIC, d->slave.cap_mask); 865 d->slave.dev = &op->dev; 866 d->slave.device_free_chan_resources = k3_dma_free_chan_resources; 867 d->slave.device_tx_status = k3_dma_tx_status; 868 d->slave.device_prep_dma_memcpy = k3_dma_prep_memcpy; 869 d->slave.device_prep_slave_sg = k3_dma_prep_slave_sg; 870 d->slave.device_prep_dma_cyclic = k3_dma_prep_dma_cyclic; 871 d->slave.device_issue_pending = k3_dma_issue_pending; 872 d->slave.device_config = k3_dma_config; 873 d->slave.device_pause = k3_dma_transfer_pause; 874 d->slave.device_resume = k3_dma_transfer_resume; 875 d->slave.device_terminate_all = k3_dma_terminate_all; 876 d->slave.copy_align = DMAENGINE_ALIGN_8_BYTES; 877 878 /* init virtual channel */ 879 d->chans = devm_kzalloc(&op->dev, 880 d->dma_requests * sizeof(struct k3_dma_chan), GFP_KERNEL); 881 if (d->chans == NULL) 882 return -ENOMEM; 883 884 for (i = 0; i < d->dma_requests; i++) { 885 struct k3_dma_chan *c = &d->chans[i]; 886 887 c->status = DMA_IN_PROGRESS; 888 INIT_LIST_HEAD(&c->node); 889 c->vc.desc_free = k3_dma_free_desc; 890 vchan_init(&c->vc, &d->slave); 891 } 892 893 /* Enable clock before accessing registers */ 894 ret = clk_prepare_enable(d->clk); 895 if (ret < 0) { 896 dev_err(&op->dev, "clk_prepare_enable failed: %d\n", ret); 897 return ret; 898 } 899 900 k3_dma_enable_dma(d, true); 901 902 ret = dma_async_device_register(&d->slave); 903 if (ret) 904 goto dma_async_register_fail; 905 906 ret = of_dma_controller_register((&op->dev)->of_node, 907 k3_of_dma_simple_xlate, d); 908 if (ret) 909 goto of_dma_register_fail; 910 911 spin_lock_init(&d->lock); 912 INIT_LIST_HEAD(&d->chan_pending); 913 tasklet_init(&d->task, k3_dma_tasklet, (unsigned long)d); 914 platform_set_drvdata(op, d); 915 dev_info(&op->dev, "initialized\n"); 916 917 return 0; 918 919 of_dma_register_fail: 920 dma_async_device_unregister(&d->slave); 921 dma_async_register_fail: 922 clk_disable_unprepare(d->clk); 923 return ret; 924 } 925 926 static int k3_dma_remove(struct platform_device *op) 927 { 928 struct k3_dma_chan *c, *cn; 929 struct k3_dma_dev *d = platform_get_drvdata(op); 930 931 dma_async_device_unregister(&d->slave); 932 of_dma_controller_free((&op->dev)->of_node); 933 934 devm_free_irq(&op->dev, d->irq, d); 935 936 list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) { 937 list_del(&c->vc.chan.device_node); 938 tasklet_kill(&c->vc.task); 939 } 940 tasklet_kill(&d->task); 941 clk_disable_unprepare(d->clk); 942 return 0; 943 } 944 945 #ifdef CONFIG_PM_SLEEP 946 static int k3_dma_suspend_dev(struct device *dev) 947 { 948 struct k3_dma_dev *d = dev_get_drvdata(dev); 949 u32 stat = 0; 950 951 stat = k3_dma_get_chan_stat(d); 952 if (stat) { 953 dev_warn(d->slave.dev, 954 "chan %d is running fail to suspend\n", stat); 955 return -1; 956 } 957 k3_dma_enable_dma(d, false); 958 clk_disable_unprepare(d->clk); 959 return 0; 960 } 961 962 static int k3_dma_resume_dev(struct device *dev) 963 { 964 struct k3_dma_dev *d = dev_get_drvdata(dev); 965 int ret = 0; 966 967 ret = clk_prepare_enable(d->clk); 968 if (ret < 0) { 969 dev_err(d->slave.dev, "clk_prepare_enable failed: %d\n", ret); 970 return ret; 971 } 972 k3_dma_enable_dma(d, true); 973 return 0; 974 } 975 #endif 976 977 static SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend_dev, k3_dma_resume_dev); 978 979 static struct platform_driver k3_pdma_driver = { 980 .driver = { 981 .name = DRIVER_NAME, 982 .pm = &k3_dma_pmops, 983 .of_match_table = k3_pdma_dt_ids, 984 }, 985 .probe = k3_dma_probe, 986 .remove = k3_dma_remove, 987 }; 988 989 module_platform_driver(k3_pdma_driver); 990 991 MODULE_DESCRIPTION("Hisilicon k3 DMA Driver"); 992 MODULE_ALIAS("platform:k3dma"); 993 MODULE_LICENSE("GPL v2"); 994