1 /* 2 * Copyright (c) 2013 - 2015 Linaro Ltd. 3 * Copyright (c) 2013 Hisilicon Limited. 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License version 2 as 7 * published by the Free Software Foundation. 8 */ 9 #include <linux/sched.h> 10 #include <linux/device.h> 11 #include <linux/dma-mapping.h> 12 #include <linux/dmapool.h> 13 #include <linux/dmaengine.h> 14 #include <linux/init.h> 15 #include <linux/interrupt.h> 16 #include <linux/kernel.h> 17 #include <linux/module.h> 18 #include <linux/platform_device.h> 19 #include <linux/slab.h> 20 #include <linux/spinlock.h> 21 #include <linux/of_device.h> 22 #include <linux/of.h> 23 #include <linux/clk.h> 24 #include <linux/of_dma.h> 25 26 #include "virt-dma.h" 27 28 #define DRIVER_NAME "k3-dma" 29 #define DMA_MAX_SIZE 0x1ffc 30 #define DMA_CYCLIC_MAX_PERIOD 0x1000 31 #define LLI_BLOCK_SIZE (4 * PAGE_SIZE) 32 33 #define INT_STAT 0x00 34 #define INT_TC1 0x04 35 #define INT_TC2 0x08 36 #define INT_ERR1 0x0c 37 #define INT_ERR2 0x10 38 #define INT_TC1_MASK 0x18 39 #define INT_TC2_MASK 0x1c 40 #define INT_ERR1_MASK 0x20 41 #define INT_ERR2_MASK 0x24 42 #define INT_TC1_RAW 0x600 43 #define INT_TC2_RAW 0x608 44 #define INT_ERR1_RAW 0x610 45 #define INT_ERR2_RAW 0x618 46 #define CH_PRI 0x688 47 #define CH_STAT 0x690 48 #define CX_CUR_CNT 0x704 49 #define CX_LLI 0x800 50 #define CX_CNT1 0x80c 51 #define CX_CNT0 0x810 52 #define CX_SRC 0x814 53 #define CX_DST 0x818 54 #define CX_CFG 0x81c 55 #define AXI_CFG 0x820 56 #define AXI_CFG_DEFAULT 0x201201 57 58 #define CX_LLI_CHAIN_EN 0x2 59 #define CX_CFG_EN 0x1 60 #define CX_CFG_NODEIRQ BIT(1) 61 #define CX_CFG_MEM2PER (0x1 << 2) 62 #define CX_CFG_PER2MEM (0x2 << 2) 63 #define CX_CFG_SRCINCR (0x1 << 31) 64 #define CX_CFG_DSTINCR (0x1 << 30) 65 66 struct k3_desc_hw { 67 u32 lli; 68 u32 reserved[3]; 69 u32 count; 70 u32 saddr; 71 u32 daddr; 72 u32 config; 73 } __aligned(32); 74 75 struct k3_dma_desc_sw { 76 struct virt_dma_desc vd; 77 dma_addr_t desc_hw_lli; 78 size_t desc_num; 79 size_t size; 80 struct k3_desc_hw *desc_hw; 81 }; 82 83 struct k3_dma_phy; 84 85 struct k3_dma_chan { 86 u32 ccfg; 87 struct virt_dma_chan vc; 88 struct k3_dma_phy *phy; 89 struct list_head node; 90 dma_addr_t dev_addr; 91 enum dma_status status; 92 bool cyclic; 93 struct dma_slave_config slave_config; 94 }; 95 96 struct k3_dma_phy { 97 u32 idx; 98 void __iomem *base; 99 struct k3_dma_chan *vchan; 100 struct k3_dma_desc_sw *ds_run; 101 struct k3_dma_desc_sw *ds_done; 102 }; 103 104 struct k3_dma_dev { 105 struct dma_device slave; 106 void __iomem *base; 107 struct tasklet_struct task; 108 spinlock_t lock; 109 struct list_head chan_pending; 110 struct k3_dma_phy *phy; 111 struct k3_dma_chan *chans; 112 struct clk *clk; 113 struct dma_pool *pool; 114 u32 dma_channels; 115 u32 dma_requests; 116 unsigned int irq; 117 }; 118 119 #define to_k3_dma(dmadev) container_of(dmadev, struct k3_dma_dev, slave) 120 121 static int k3_dma_config_write(struct dma_chan *chan, 122 enum dma_transfer_direction dir, 123 struct dma_slave_config *cfg); 124 125 static struct k3_dma_chan *to_k3_chan(struct dma_chan *chan) 126 { 127 return container_of(chan, struct k3_dma_chan, vc.chan); 128 } 129 130 static void k3_dma_pause_dma(struct k3_dma_phy *phy, bool on) 131 { 132 u32 val = 0; 133 134 if (on) { 135 val = readl_relaxed(phy->base + CX_CFG); 136 val |= CX_CFG_EN; 137 writel_relaxed(val, phy->base + CX_CFG); 138 } else { 139 val = readl_relaxed(phy->base + CX_CFG); 140 val &= ~CX_CFG_EN; 141 writel_relaxed(val, phy->base + CX_CFG); 142 } 143 } 144 145 static void k3_dma_terminate_chan(struct k3_dma_phy *phy, struct k3_dma_dev *d) 146 { 147 u32 val = 0; 148 149 k3_dma_pause_dma(phy, false); 150 151 val = 0x1 << phy->idx; 152 writel_relaxed(val, d->base + INT_TC1_RAW); 153 writel_relaxed(val, d->base + INT_TC2_RAW); 154 writel_relaxed(val, d->base + INT_ERR1_RAW); 155 writel_relaxed(val, d->base + INT_ERR2_RAW); 156 } 157 158 static void k3_dma_set_desc(struct k3_dma_phy *phy, struct k3_desc_hw *hw) 159 { 160 writel_relaxed(hw->lli, phy->base + CX_LLI); 161 writel_relaxed(hw->count, phy->base + CX_CNT0); 162 writel_relaxed(hw->saddr, phy->base + CX_SRC); 163 writel_relaxed(hw->daddr, phy->base + CX_DST); 164 writel_relaxed(AXI_CFG_DEFAULT, phy->base + AXI_CFG); 165 writel_relaxed(hw->config, phy->base + CX_CFG); 166 } 167 168 static u32 k3_dma_get_curr_cnt(struct k3_dma_dev *d, struct k3_dma_phy *phy) 169 { 170 u32 cnt = 0; 171 172 cnt = readl_relaxed(d->base + CX_CUR_CNT + phy->idx * 0x10); 173 cnt &= 0xffff; 174 return cnt; 175 } 176 177 static u32 k3_dma_get_curr_lli(struct k3_dma_phy *phy) 178 { 179 return readl_relaxed(phy->base + CX_LLI); 180 } 181 182 static u32 k3_dma_get_chan_stat(struct k3_dma_dev *d) 183 { 184 return readl_relaxed(d->base + CH_STAT); 185 } 186 187 static void k3_dma_enable_dma(struct k3_dma_dev *d, bool on) 188 { 189 if (on) { 190 /* set same priority */ 191 writel_relaxed(0x0, d->base + CH_PRI); 192 193 /* unmask irq */ 194 writel_relaxed(0xffff, d->base + INT_TC1_MASK); 195 writel_relaxed(0xffff, d->base + INT_TC2_MASK); 196 writel_relaxed(0xffff, d->base + INT_ERR1_MASK); 197 writel_relaxed(0xffff, d->base + INT_ERR2_MASK); 198 } else { 199 /* mask irq */ 200 writel_relaxed(0x0, d->base + INT_TC1_MASK); 201 writel_relaxed(0x0, d->base + INT_TC2_MASK); 202 writel_relaxed(0x0, d->base + INT_ERR1_MASK); 203 writel_relaxed(0x0, d->base + INT_ERR2_MASK); 204 } 205 } 206 207 static irqreturn_t k3_dma_int_handler(int irq, void *dev_id) 208 { 209 struct k3_dma_dev *d = (struct k3_dma_dev *)dev_id; 210 struct k3_dma_phy *p; 211 struct k3_dma_chan *c; 212 u32 stat = readl_relaxed(d->base + INT_STAT); 213 u32 tc1 = readl_relaxed(d->base + INT_TC1); 214 u32 tc2 = readl_relaxed(d->base + INT_TC2); 215 u32 err1 = readl_relaxed(d->base + INT_ERR1); 216 u32 err2 = readl_relaxed(d->base + INT_ERR2); 217 u32 i, irq_chan = 0; 218 219 while (stat) { 220 i = __ffs(stat); 221 stat &= ~BIT(i); 222 if (likely(tc1 & BIT(i)) || (tc2 & BIT(i))) { 223 unsigned long flags; 224 225 p = &d->phy[i]; 226 c = p->vchan; 227 if (c && (tc1 & BIT(i))) { 228 spin_lock_irqsave(&c->vc.lock, flags); 229 vchan_cookie_complete(&p->ds_run->vd); 230 p->ds_done = p->ds_run; 231 p->ds_run = NULL; 232 spin_unlock_irqrestore(&c->vc.lock, flags); 233 } 234 if (c && (tc2 & BIT(i))) { 235 spin_lock_irqsave(&c->vc.lock, flags); 236 if (p->ds_run != NULL) 237 vchan_cyclic_callback(&p->ds_run->vd); 238 spin_unlock_irqrestore(&c->vc.lock, flags); 239 } 240 irq_chan |= BIT(i); 241 } 242 if (unlikely((err1 & BIT(i)) || (err2 & BIT(i)))) 243 dev_warn(d->slave.dev, "DMA ERR\n"); 244 } 245 246 writel_relaxed(irq_chan, d->base + INT_TC1_RAW); 247 writel_relaxed(irq_chan, d->base + INT_TC2_RAW); 248 writel_relaxed(err1, d->base + INT_ERR1_RAW); 249 writel_relaxed(err2, d->base + INT_ERR2_RAW); 250 251 if (irq_chan) 252 tasklet_schedule(&d->task); 253 254 if (irq_chan || err1 || err2) 255 return IRQ_HANDLED; 256 257 return IRQ_NONE; 258 } 259 260 static int k3_dma_start_txd(struct k3_dma_chan *c) 261 { 262 struct k3_dma_dev *d = to_k3_dma(c->vc.chan.device); 263 struct virt_dma_desc *vd = vchan_next_desc(&c->vc); 264 265 if (!c->phy) 266 return -EAGAIN; 267 268 if (BIT(c->phy->idx) & k3_dma_get_chan_stat(d)) 269 return -EAGAIN; 270 271 if (vd) { 272 struct k3_dma_desc_sw *ds = 273 container_of(vd, struct k3_dma_desc_sw, vd); 274 /* 275 * fetch and remove request from vc->desc_issued 276 * so vc->desc_issued only contains desc pending 277 */ 278 list_del(&ds->vd.node); 279 280 c->phy->ds_run = ds; 281 c->phy->ds_done = NULL; 282 /* start dma */ 283 k3_dma_set_desc(c->phy, &ds->desc_hw[0]); 284 return 0; 285 } 286 c->phy->ds_run = NULL; 287 c->phy->ds_done = NULL; 288 return -EAGAIN; 289 } 290 291 static void k3_dma_tasklet(unsigned long arg) 292 { 293 struct k3_dma_dev *d = (struct k3_dma_dev *)arg; 294 struct k3_dma_phy *p; 295 struct k3_dma_chan *c, *cn; 296 unsigned pch, pch_alloc = 0; 297 298 /* check new dma request of running channel in vc->desc_issued */ 299 list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) { 300 spin_lock_irq(&c->vc.lock); 301 p = c->phy; 302 if (p && p->ds_done) { 303 if (k3_dma_start_txd(c)) { 304 /* No current txd associated with this channel */ 305 dev_dbg(d->slave.dev, "pchan %u: free\n", p->idx); 306 /* Mark this channel free */ 307 c->phy = NULL; 308 p->vchan = NULL; 309 } 310 } 311 spin_unlock_irq(&c->vc.lock); 312 } 313 314 /* check new channel request in d->chan_pending */ 315 spin_lock_irq(&d->lock); 316 for (pch = 0; pch < d->dma_channels; pch++) { 317 p = &d->phy[pch]; 318 319 if (p->vchan == NULL && !list_empty(&d->chan_pending)) { 320 c = list_first_entry(&d->chan_pending, 321 struct k3_dma_chan, node); 322 /* remove from d->chan_pending */ 323 list_del_init(&c->node); 324 pch_alloc |= 1 << pch; 325 /* Mark this channel allocated */ 326 p->vchan = c; 327 c->phy = p; 328 dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc); 329 } 330 } 331 spin_unlock_irq(&d->lock); 332 333 for (pch = 0; pch < d->dma_channels; pch++) { 334 if (pch_alloc & (1 << pch)) { 335 p = &d->phy[pch]; 336 c = p->vchan; 337 if (c) { 338 spin_lock_irq(&c->vc.lock); 339 k3_dma_start_txd(c); 340 spin_unlock_irq(&c->vc.lock); 341 } 342 } 343 } 344 } 345 346 static void k3_dma_free_chan_resources(struct dma_chan *chan) 347 { 348 struct k3_dma_chan *c = to_k3_chan(chan); 349 struct k3_dma_dev *d = to_k3_dma(chan->device); 350 unsigned long flags; 351 352 spin_lock_irqsave(&d->lock, flags); 353 list_del_init(&c->node); 354 spin_unlock_irqrestore(&d->lock, flags); 355 356 vchan_free_chan_resources(&c->vc); 357 c->ccfg = 0; 358 } 359 360 static enum dma_status k3_dma_tx_status(struct dma_chan *chan, 361 dma_cookie_t cookie, struct dma_tx_state *state) 362 { 363 struct k3_dma_chan *c = to_k3_chan(chan); 364 struct k3_dma_dev *d = to_k3_dma(chan->device); 365 struct k3_dma_phy *p; 366 struct virt_dma_desc *vd; 367 unsigned long flags; 368 enum dma_status ret; 369 size_t bytes = 0; 370 371 ret = dma_cookie_status(&c->vc.chan, cookie, state); 372 if (ret == DMA_COMPLETE) 373 return ret; 374 375 spin_lock_irqsave(&c->vc.lock, flags); 376 p = c->phy; 377 ret = c->status; 378 379 /* 380 * If the cookie is on our issue queue, then the residue is 381 * its total size. 382 */ 383 vd = vchan_find_desc(&c->vc, cookie); 384 if (vd && !c->cyclic) { 385 bytes = container_of(vd, struct k3_dma_desc_sw, vd)->size; 386 } else if ((!p) || (!p->ds_run)) { 387 bytes = 0; 388 } else { 389 struct k3_dma_desc_sw *ds = p->ds_run; 390 u32 clli = 0, index = 0; 391 392 bytes = k3_dma_get_curr_cnt(d, p); 393 clli = k3_dma_get_curr_lli(p); 394 index = ((clli - ds->desc_hw_lli) / 395 sizeof(struct k3_desc_hw)) + 1; 396 for (; index < ds->desc_num; index++) { 397 bytes += ds->desc_hw[index].count; 398 /* end of lli */ 399 if (!ds->desc_hw[index].lli) 400 break; 401 } 402 } 403 spin_unlock_irqrestore(&c->vc.lock, flags); 404 dma_set_residue(state, bytes); 405 return ret; 406 } 407 408 static void k3_dma_issue_pending(struct dma_chan *chan) 409 { 410 struct k3_dma_chan *c = to_k3_chan(chan); 411 struct k3_dma_dev *d = to_k3_dma(chan->device); 412 unsigned long flags; 413 414 spin_lock_irqsave(&c->vc.lock, flags); 415 /* add request to vc->desc_issued */ 416 if (vchan_issue_pending(&c->vc)) { 417 spin_lock(&d->lock); 418 if (!c->phy) { 419 if (list_empty(&c->node)) { 420 /* if new channel, add chan_pending */ 421 list_add_tail(&c->node, &d->chan_pending); 422 /* check in tasklet */ 423 tasklet_schedule(&d->task); 424 dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc); 425 } 426 } 427 spin_unlock(&d->lock); 428 } else 429 dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc); 430 spin_unlock_irqrestore(&c->vc.lock, flags); 431 } 432 433 static void k3_dma_fill_desc(struct k3_dma_desc_sw *ds, dma_addr_t dst, 434 dma_addr_t src, size_t len, u32 num, u32 ccfg) 435 { 436 if (num != ds->desc_num - 1) 437 ds->desc_hw[num].lli = ds->desc_hw_lli + (num + 1) * 438 sizeof(struct k3_desc_hw); 439 440 ds->desc_hw[num].lli |= CX_LLI_CHAIN_EN; 441 ds->desc_hw[num].count = len; 442 ds->desc_hw[num].saddr = src; 443 ds->desc_hw[num].daddr = dst; 444 ds->desc_hw[num].config = ccfg; 445 } 446 447 static struct k3_dma_desc_sw *k3_dma_alloc_desc_resource(int num, 448 struct dma_chan *chan) 449 { 450 struct k3_dma_chan *c = to_k3_chan(chan); 451 struct k3_dma_desc_sw *ds; 452 struct k3_dma_dev *d = to_k3_dma(chan->device); 453 int lli_limit = LLI_BLOCK_SIZE / sizeof(struct k3_desc_hw); 454 455 if (num > lli_limit) { 456 dev_dbg(chan->device->dev, "vch %p: sg num %d exceed max %d\n", 457 &c->vc, num, lli_limit); 458 return NULL; 459 } 460 461 ds = kzalloc(sizeof(*ds), GFP_NOWAIT); 462 if (!ds) 463 return NULL; 464 465 ds->desc_hw = dma_pool_zalloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli); 466 if (!ds->desc_hw) { 467 dev_dbg(chan->device->dev, "vch %p: dma alloc fail\n", &c->vc); 468 kfree(ds); 469 return NULL; 470 } 471 ds->desc_num = num; 472 return ds; 473 } 474 475 static struct dma_async_tx_descriptor *k3_dma_prep_memcpy( 476 struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, 477 size_t len, unsigned long flags) 478 { 479 struct k3_dma_chan *c = to_k3_chan(chan); 480 struct k3_dma_desc_sw *ds; 481 size_t copy = 0; 482 int num = 0; 483 484 if (!len) 485 return NULL; 486 487 num = DIV_ROUND_UP(len, DMA_MAX_SIZE); 488 489 ds = k3_dma_alloc_desc_resource(num, chan); 490 if (!ds) 491 return NULL; 492 493 c->cyclic = 0; 494 ds->size = len; 495 num = 0; 496 497 if (!c->ccfg) { 498 /* default is memtomem, without calling device_config */ 499 c->ccfg = CX_CFG_SRCINCR | CX_CFG_DSTINCR | CX_CFG_EN; 500 c->ccfg |= (0xf << 20) | (0xf << 24); /* burst = 16 */ 501 c->ccfg |= (0x3 << 12) | (0x3 << 16); /* width = 64 bit */ 502 } 503 504 do { 505 copy = min_t(size_t, len, DMA_MAX_SIZE); 506 k3_dma_fill_desc(ds, dst, src, copy, num++, c->ccfg); 507 508 src += copy; 509 dst += copy; 510 len -= copy; 511 } while (len); 512 513 ds->desc_hw[num-1].lli = 0; /* end of link */ 514 return vchan_tx_prep(&c->vc, &ds->vd, flags); 515 } 516 517 static struct dma_async_tx_descriptor *k3_dma_prep_slave_sg( 518 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sglen, 519 enum dma_transfer_direction dir, unsigned long flags, void *context) 520 { 521 struct k3_dma_chan *c = to_k3_chan(chan); 522 struct k3_dma_desc_sw *ds; 523 size_t len, avail, total = 0; 524 struct scatterlist *sg; 525 dma_addr_t addr, src = 0, dst = 0; 526 int num = sglen, i; 527 528 if (sgl == NULL) 529 return NULL; 530 531 c->cyclic = 0; 532 533 for_each_sg(sgl, sg, sglen, i) { 534 avail = sg_dma_len(sg); 535 if (avail > DMA_MAX_SIZE) 536 num += DIV_ROUND_UP(avail, DMA_MAX_SIZE) - 1; 537 } 538 539 ds = k3_dma_alloc_desc_resource(num, chan); 540 if (!ds) 541 return NULL; 542 num = 0; 543 k3_dma_config_write(chan, dir, &c->slave_config); 544 545 for_each_sg(sgl, sg, sglen, i) { 546 addr = sg_dma_address(sg); 547 avail = sg_dma_len(sg); 548 total += avail; 549 550 do { 551 len = min_t(size_t, avail, DMA_MAX_SIZE); 552 553 if (dir == DMA_MEM_TO_DEV) { 554 src = addr; 555 dst = c->dev_addr; 556 } else if (dir == DMA_DEV_TO_MEM) { 557 src = c->dev_addr; 558 dst = addr; 559 } 560 561 k3_dma_fill_desc(ds, dst, src, len, num++, c->ccfg); 562 563 addr += len; 564 avail -= len; 565 } while (avail); 566 } 567 568 ds->desc_hw[num-1].lli = 0; /* end of link */ 569 ds->size = total; 570 return vchan_tx_prep(&c->vc, &ds->vd, flags); 571 } 572 573 static struct dma_async_tx_descriptor * 574 k3_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, 575 size_t buf_len, size_t period_len, 576 enum dma_transfer_direction dir, 577 unsigned long flags) 578 { 579 struct k3_dma_chan *c = to_k3_chan(chan); 580 struct k3_dma_desc_sw *ds; 581 size_t len, avail, total = 0; 582 dma_addr_t addr, src = 0, dst = 0; 583 int num = 1, since = 0; 584 size_t modulo = DMA_CYCLIC_MAX_PERIOD; 585 u32 en_tc2 = 0; 586 587 dev_dbg(chan->device->dev, "%s: buf %pad, dst %pad, buf len %zu, period_len = %zu, dir %d\n", 588 __func__, &buf_addr, &to_k3_chan(chan)->dev_addr, 589 buf_len, period_len, (int)dir); 590 591 avail = buf_len; 592 if (avail > modulo) 593 num += DIV_ROUND_UP(avail, modulo) - 1; 594 595 ds = k3_dma_alloc_desc_resource(num, chan); 596 if (!ds) 597 return NULL; 598 599 c->cyclic = 1; 600 addr = buf_addr; 601 avail = buf_len; 602 total = avail; 603 num = 0; 604 k3_dma_config_write(chan, dir, &c->slave_config); 605 606 if (period_len < modulo) 607 modulo = period_len; 608 609 do { 610 len = min_t(size_t, avail, modulo); 611 612 if (dir == DMA_MEM_TO_DEV) { 613 src = addr; 614 dst = c->dev_addr; 615 } else if (dir == DMA_DEV_TO_MEM) { 616 src = c->dev_addr; 617 dst = addr; 618 } 619 since += len; 620 if (since >= period_len) { 621 /* descriptor asks for TC2 interrupt on completion */ 622 en_tc2 = CX_CFG_NODEIRQ; 623 since -= period_len; 624 } else 625 en_tc2 = 0; 626 627 k3_dma_fill_desc(ds, dst, src, len, num++, c->ccfg | en_tc2); 628 629 addr += len; 630 avail -= len; 631 } while (avail); 632 633 /* "Cyclic" == end of link points back to start of link */ 634 ds->desc_hw[num - 1].lli |= ds->desc_hw_lli; 635 636 ds->size = total; 637 638 return vchan_tx_prep(&c->vc, &ds->vd, flags); 639 } 640 641 static int k3_dma_config(struct dma_chan *chan, 642 struct dma_slave_config *cfg) 643 { 644 struct k3_dma_chan *c = to_k3_chan(chan); 645 646 memcpy(&c->slave_config, cfg, sizeof(*cfg)); 647 648 return 0; 649 } 650 651 static int k3_dma_config_write(struct dma_chan *chan, 652 enum dma_transfer_direction dir, 653 struct dma_slave_config *cfg) 654 { 655 struct k3_dma_chan *c = to_k3_chan(chan); 656 u32 maxburst = 0, val = 0; 657 enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED; 658 659 if (dir == DMA_DEV_TO_MEM) { 660 c->ccfg = CX_CFG_DSTINCR; 661 c->dev_addr = cfg->src_addr; 662 maxburst = cfg->src_maxburst; 663 width = cfg->src_addr_width; 664 } else if (dir == DMA_MEM_TO_DEV) { 665 c->ccfg = CX_CFG_SRCINCR; 666 c->dev_addr = cfg->dst_addr; 667 maxburst = cfg->dst_maxburst; 668 width = cfg->dst_addr_width; 669 } 670 switch (width) { 671 case DMA_SLAVE_BUSWIDTH_1_BYTE: 672 case DMA_SLAVE_BUSWIDTH_2_BYTES: 673 case DMA_SLAVE_BUSWIDTH_4_BYTES: 674 case DMA_SLAVE_BUSWIDTH_8_BYTES: 675 val = __ffs(width); 676 break; 677 default: 678 val = 3; 679 break; 680 } 681 c->ccfg |= (val << 12) | (val << 16); 682 683 if ((maxburst == 0) || (maxburst > 16)) 684 val = 15; 685 else 686 val = maxburst - 1; 687 c->ccfg |= (val << 20) | (val << 24); 688 c->ccfg |= CX_CFG_MEM2PER | CX_CFG_EN; 689 690 /* specific request line */ 691 c->ccfg |= c->vc.chan.chan_id << 4; 692 693 return 0; 694 } 695 696 static void k3_dma_free_desc(struct virt_dma_desc *vd) 697 { 698 struct k3_dma_desc_sw *ds = 699 container_of(vd, struct k3_dma_desc_sw, vd); 700 struct k3_dma_dev *d = to_k3_dma(vd->tx.chan->device); 701 702 dma_pool_free(d->pool, ds->desc_hw, ds->desc_hw_lli); 703 kfree(ds); 704 } 705 706 static int k3_dma_terminate_all(struct dma_chan *chan) 707 { 708 struct k3_dma_chan *c = to_k3_chan(chan); 709 struct k3_dma_dev *d = to_k3_dma(chan->device); 710 struct k3_dma_phy *p = c->phy; 711 unsigned long flags; 712 LIST_HEAD(head); 713 714 dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc); 715 716 /* Prevent this channel being scheduled */ 717 spin_lock(&d->lock); 718 list_del_init(&c->node); 719 spin_unlock(&d->lock); 720 721 /* Clear the tx descriptor lists */ 722 spin_lock_irqsave(&c->vc.lock, flags); 723 vchan_get_all_descriptors(&c->vc, &head); 724 if (p) { 725 /* vchan is assigned to a pchan - stop the channel */ 726 k3_dma_terminate_chan(p, d); 727 c->phy = NULL; 728 p->vchan = NULL; 729 if (p->ds_run) { 730 vchan_terminate_vdesc(&p->ds_run->vd); 731 p->ds_run = NULL; 732 } 733 p->ds_done = NULL; 734 } 735 spin_unlock_irqrestore(&c->vc.lock, flags); 736 vchan_dma_desc_free_list(&c->vc, &head); 737 738 return 0; 739 } 740 741 static void k3_dma_synchronize(struct dma_chan *chan) 742 { 743 struct k3_dma_chan *c = to_k3_chan(chan); 744 745 vchan_synchronize(&c->vc); 746 } 747 748 static int k3_dma_transfer_pause(struct dma_chan *chan) 749 { 750 struct k3_dma_chan *c = to_k3_chan(chan); 751 struct k3_dma_dev *d = to_k3_dma(chan->device); 752 struct k3_dma_phy *p = c->phy; 753 754 dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc); 755 if (c->status == DMA_IN_PROGRESS) { 756 c->status = DMA_PAUSED; 757 if (p) { 758 k3_dma_pause_dma(p, false); 759 } else { 760 spin_lock(&d->lock); 761 list_del_init(&c->node); 762 spin_unlock(&d->lock); 763 } 764 } 765 766 return 0; 767 } 768 769 static int k3_dma_transfer_resume(struct dma_chan *chan) 770 { 771 struct k3_dma_chan *c = to_k3_chan(chan); 772 struct k3_dma_dev *d = to_k3_dma(chan->device); 773 struct k3_dma_phy *p = c->phy; 774 unsigned long flags; 775 776 dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc); 777 spin_lock_irqsave(&c->vc.lock, flags); 778 if (c->status == DMA_PAUSED) { 779 c->status = DMA_IN_PROGRESS; 780 if (p) { 781 k3_dma_pause_dma(p, true); 782 } else if (!list_empty(&c->vc.desc_issued)) { 783 spin_lock(&d->lock); 784 list_add_tail(&c->node, &d->chan_pending); 785 spin_unlock(&d->lock); 786 } 787 } 788 spin_unlock_irqrestore(&c->vc.lock, flags); 789 790 return 0; 791 } 792 793 static const struct of_device_id k3_pdma_dt_ids[] = { 794 { .compatible = "hisilicon,k3-dma-1.0", }, 795 {} 796 }; 797 MODULE_DEVICE_TABLE(of, k3_pdma_dt_ids); 798 799 static struct dma_chan *k3_of_dma_simple_xlate(struct of_phandle_args *dma_spec, 800 struct of_dma *ofdma) 801 { 802 struct k3_dma_dev *d = ofdma->of_dma_data; 803 unsigned int request = dma_spec->args[0]; 804 805 if (request >= d->dma_requests) 806 return NULL; 807 808 return dma_get_slave_channel(&(d->chans[request].vc.chan)); 809 } 810 811 static int k3_dma_probe(struct platform_device *op) 812 { 813 struct k3_dma_dev *d; 814 const struct of_device_id *of_id; 815 struct resource *iores; 816 int i, ret, irq = 0; 817 818 iores = platform_get_resource(op, IORESOURCE_MEM, 0); 819 if (!iores) 820 return -EINVAL; 821 822 d = devm_kzalloc(&op->dev, sizeof(*d), GFP_KERNEL); 823 if (!d) 824 return -ENOMEM; 825 826 d->base = devm_ioremap_resource(&op->dev, iores); 827 if (IS_ERR(d->base)) 828 return PTR_ERR(d->base); 829 830 of_id = of_match_device(k3_pdma_dt_ids, &op->dev); 831 if (of_id) { 832 of_property_read_u32((&op->dev)->of_node, 833 "dma-channels", &d->dma_channels); 834 of_property_read_u32((&op->dev)->of_node, 835 "dma-requests", &d->dma_requests); 836 } 837 838 d->clk = devm_clk_get(&op->dev, NULL); 839 if (IS_ERR(d->clk)) { 840 dev_err(&op->dev, "no dma clk\n"); 841 return PTR_ERR(d->clk); 842 } 843 844 irq = platform_get_irq(op, 0); 845 ret = devm_request_irq(&op->dev, irq, 846 k3_dma_int_handler, 0, DRIVER_NAME, d); 847 if (ret) 848 return ret; 849 850 d->irq = irq; 851 852 /* A DMA memory pool for LLIs, align on 32-byte boundary */ 853 d->pool = dmam_pool_create(DRIVER_NAME, &op->dev, 854 LLI_BLOCK_SIZE, 32, 0); 855 if (!d->pool) 856 return -ENOMEM; 857 858 /* init phy channel */ 859 d->phy = devm_kcalloc(&op->dev, 860 d->dma_channels, sizeof(struct k3_dma_phy), GFP_KERNEL); 861 if (d->phy == NULL) 862 return -ENOMEM; 863 864 for (i = 0; i < d->dma_channels; i++) { 865 struct k3_dma_phy *p = &d->phy[i]; 866 867 p->idx = i; 868 p->base = d->base + i * 0x40; 869 } 870 871 INIT_LIST_HEAD(&d->slave.channels); 872 dma_cap_set(DMA_SLAVE, d->slave.cap_mask); 873 dma_cap_set(DMA_MEMCPY, d->slave.cap_mask); 874 dma_cap_set(DMA_CYCLIC, d->slave.cap_mask); 875 d->slave.dev = &op->dev; 876 d->slave.device_free_chan_resources = k3_dma_free_chan_resources; 877 d->slave.device_tx_status = k3_dma_tx_status; 878 d->slave.device_prep_dma_memcpy = k3_dma_prep_memcpy; 879 d->slave.device_prep_slave_sg = k3_dma_prep_slave_sg; 880 d->slave.device_prep_dma_cyclic = k3_dma_prep_dma_cyclic; 881 d->slave.device_issue_pending = k3_dma_issue_pending; 882 d->slave.device_config = k3_dma_config; 883 d->slave.device_pause = k3_dma_transfer_pause; 884 d->slave.device_resume = k3_dma_transfer_resume; 885 d->slave.device_terminate_all = k3_dma_terminate_all; 886 d->slave.device_synchronize = k3_dma_synchronize; 887 d->slave.copy_align = DMAENGINE_ALIGN_8_BYTES; 888 889 /* init virtual channel */ 890 d->chans = devm_kcalloc(&op->dev, 891 d->dma_requests, sizeof(struct k3_dma_chan), GFP_KERNEL); 892 if (d->chans == NULL) 893 return -ENOMEM; 894 895 for (i = 0; i < d->dma_requests; i++) { 896 struct k3_dma_chan *c = &d->chans[i]; 897 898 c->status = DMA_IN_PROGRESS; 899 INIT_LIST_HEAD(&c->node); 900 c->vc.desc_free = k3_dma_free_desc; 901 vchan_init(&c->vc, &d->slave); 902 } 903 904 /* Enable clock before accessing registers */ 905 ret = clk_prepare_enable(d->clk); 906 if (ret < 0) { 907 dev_err(&op->dev, "clk_prepare_enable failed: %d\n", ret); 908 return ret; 909 } 910 911 k3_dma_enable_dma(d, true); 912 913 ret = dma_async_device_register(&d->slave); 914 if (ret) 915 goto dma_async_register_fail; 916 917 ret = of_dma_controller_register((&op->dev)->of_node, 918 k3_of_dma_simple_xlate, d); 919 if (ret) 920 goto of_dma_register_fail; 921 922 spin_lock_init(&d->lock); 923 INIT_LIST_HEAD(&d->chan_pending); 924 tasklet_init(&d->task, k3_dma_tasklet, (unsigned long)d); 925 platform_set_drvdata(op, d); 926 dev_info(&op->dev, "initialized\n"); 927 928 return 0; 929 930 of_dma_register_fail: 931 dma_async_device_unregister(&d->slave); 932 dma_async_register_fail: 933 clk_disable_unprepare(d->clk); 934 return ret; 935 } 936 937 static int k3_dma_remove(struct platform_device *op) 938 { 939 struct k3_dma_chan *c, *cn; 940 struct k3_dma_dev *d = platform_get_drvdata(op); 941 942 dma_async_device_unregister(&d->slave); 943 of_dma_controller_free((&op->dev)->of_node); 944 945 devm_free_irq(&op->dev, d->irq, d); 946 947 list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) { 948 list_del(&c->vc.chan.device_node); 949 tasklet_kill(&c->vc.task); 950 } 951 tasklet_kill(&d->task); 952 clk_disable_unprepare(d->clk); 953 return 0; 954 } 955 956 #ifdef CONFIG_PM_SLEEP 957 static int k3_dma_suspend_dev(struct device *dev) 958 { 959 struct k3_dma_dev *d = dev_get_drvdata(dev); 960 u32 stat = 0; 961 962 stat = k3_dma_get_chan_stat(d); 963 if (stat) { 964 dev_warn(d->slave.dev, 965 "chan %d is running fail to suspend\n", stat); 966 return -1; 967 } 968 k3_dma_enable_dma(d, false); 969 clk_disable_unprepare(d->clk); 970 return 0; 971 } 972 973 static int k3_dma_resume_dev(struct device *dev) 974 { 975 struct k3_dma_dev *d = dev_get_drvdata(dev); 976 int ret = 0; 977 978 ret = clk_prepare_enable(d->clk); 979 if (ret < 0) { 980 dev_err(d->slave.dev, "clk_prepare_enable failed: %d\n", ret); 981 return ret; 982 } 983 k3_dma_enable_dma(d, true); 984 return 0; 985 } 986 #endif 987 988 static SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend_dev, k3_dma_resume_dev); 989 990 static struct platform_driver k3_pdma_driver = { 991 .driver = { 992 .name = DRIVER_NAME, 993 .pm = &k3_dma_pmops, 994 .of_match_table = k3_pdma_dt_ids, 995 }, 996 .probe = k3_dma_probe, 997 .remove = k3_dma_remove, 998 }; 999 1000 module_platform_driver(k3_pdma_driver); 1001 1002 MODULE_DESCRIPTION("Hisilicon k3 DMA Driver"); 1003 MODULE_ALIAS("platform:k3dma"); 1004 MODULE_LICENSE("GPL v2"); 1005