1 // SPDX-License-Identifier: GPL-2.0+ 2 // 3 // Copyright (c) 2013-2014 Freescale Semiconductor, Inc 4 // Copyright (c) 2017 Sysam, Angelo Dureghello <angelo@sysam.it> 5 6 #include <linux/clk.h> 7 #include <linux/dmapool.h> 8 #include <linux/module.h> 9 #include <linux/slab.h> 10 #include <linux/dma-mapping.h> 11 #include <linux/pm_runtime.h> 12 #include <linux/pm_domain.h> 13 14 #include "fsl-edma-common.h" 15 16 #define EDMA_CR 0x00 17 #define EDMA_ES 0x04 18 #define EDMA_ERQ 0x0C 19 #define EDMA_EEI 0x14 20 #define EDMA_SERQ 0x1B 21 #define EDMA_CERQ 0x1A 22 #define EDMA_SEEI 0x19 23 #define EDMA_CEEI 0x18 24 #define EDMA_CINT 0x1F 25 #define EDMA_CERR 0x1E 26 #define EDMA_SSRT 0x1D 27 #define EDMA_CDNE 0x1C 28 #define EDMA_INTR 0x24 29 #define EDMA_ERR 0x2C 30 31 #define EDMA64_ERQH 0x08 32 #define EDMA64_EEIH 0x10 33 #define EDMA64_SERQ 0x18 34 #define EDMA64_CERQ 0x19 35 #define EDMA64_SEEI 0x1a 36 #define EDMA64_CEEI 0x1b 37 #define EDMA64_CINT 0x1c 38 #define EDMA64_CERR 0x1d 39 #define EDMA64_SSRT 0x1e 40 #define EDMA64_CDNE 0x1f 41 #define EDMA64_INTH 0x20 42 #define EDMA64_INTL 0x24 43 #define EDMA64_ERRH 0x28 44 #define EDMA64_ERRL 0x2c 45 46 void fsl_edma_tx_chan_handler(struct fsl_edma_chan *fsl_chan) 47 { 48 spin_lock(&fsl_chan->vchan.lock); 49 50 if (!fsl_chan->edesc) { 51 /* terminate_all called before */ 52 spin_unlock(&fsl_chan->vchan.lock); 53 return; 54 } 55 56 if (!fsl_chan->edesc->iscyclic) { 57 list_del(&fsl_chan->edesc->vdesc.node); 58 vchan_cookie_complete(&fsl_chan->edesc->vdesc); 59 fsl_chan->edesc = NULL; 60 fsl_chan->status = DMA_COMPLETE; 61 fsl_chan->idle = true; 62 } else { 63 vchan_cyclic_callback(&fsl_chan->edesc->vdesc); 64 } 65 66 if (!fsl_chan->edesc) 67 fsl_edma_xfer_desc(fsl_chan); 68 69 spin_unlock(&fsl_chan->vchan.lock); 70 } 71 72 static void fsl_edma3_enable_request(struct fsl_edma_chan *fsl_chan) 73 { 74 u32 val, flags; 75 76 flags = fsl_edma_drvflags(fsl_chan); 77 val = edma_readl_chreg(fsl_chan, ch_sbr); 78 if (fsl_chan->is_rxchan) 79 val |= EDMA_V3_CH_SBR_RD; 80 else 81 val |= EDMA_V3_CH_SBR_WR; 82 83 if (fsl_chan->is_remote) 84 val &= ~(EDMA_V3_CH_SBR_RD | EDMA_V3_CH_SBR_WR); 85 86 edma_writel_chreg(fsl_chan, val, ch_sbr); 87 88 if (flags & FSL_EDMA_DRV_HAS_CHMUX) { 89 /* 90 * ch_mux: With the exception of 0, attempts to write a value 91 * already in use will be forced to 0. 92 */ 93 if (!edma_readl(fsl_chan->edma, fsl_chan->mux_addr)) 94 edma_writel(fsl_chan->edma, fsl_chan->srcid, fsl_chan->mux_addr); 95 } 96 97 val = edma_readl_chreg(fsl_chan, ch_csr); 98 val |= EDMA_V3_CH_CSR_ERQ; 99 edma_writel_chreg(fsl_chan, val, ch_csr); 100 } 101 102 static void fsl_edma_enable_request(struct fsl_edma_chan *fsl_chan) 103 { 104 struct edma_regs *regs = &fsl_chan->edma->regs; 105 u32 ch = fsl_chan->vchan.chan.chan_id; 106 107 if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_SPLIT_REG) 108 return fsl_edma3_enable_request(fsl_chan); 109 110 if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_WRAP_IO) { 111 edma_writeb(fsl_chan->edma, EDMA_SEEI_SEEI(ch), regs->seei); 112 edma_writeb(fsl_chan->edma, ch, regs->serq); 113 } else { 114 /* ColdFire is big endian, and accesses natively 115 * big endian I/O peripherals 116 */ 117 iowrite8(EDMA_SEEI_SEEI(ch), regs->seei); 118 iowrite8(ch, regs->serq); 119 } 120 } 121 122 static void fsl_edma3_disable_request(struct fsl_edma_chan *fsl_chan) 123 { 124 u32 val = edma_readl_chreg(fsl_chan, ch_csr); 125 u32 flags; 126 127 flags = fsl_edma_drvflags(fsl_chan); 128 129 if (flags & FSL_EDMA_DRV_HAS_CHMUX) 130 edma_writel(fsl_chan->edma, 0, fsl_chan->mux_addr); 131 132 val &= ~EDMA_V3_CH_CSR_ERQ; 133 edma_writel_chreg(fsl_chan, val, ch_csr); 134 } 135 136 void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan) 137 { 138 struct edma_regs *regs = &fsl_chan->edma->regs; 139 u32 ch = fsl_chan->vchan.chan.chan_id; 140 141 if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_SPLIT_REG) 142 return fsl_edma3_disable_request(fsl_chan); 143 144 if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_WRAP_IO) { 145 edma_writeb(fsl_chan->edma, ch, regs->cerq); 146 edma_writeb(fsl_chan->edma, EDMA_CEEI_CEEI(ch), regs->ceei); 147 } else { 148 /* ColdFire is big endian, and accesses natively 149 * big endian I/O peripherals 150 */ 151 iowrite8(ch, regs->cerq); 152 iowrite8(EDMA_CEEI_CEEI(ch), regs->ceei); 153 } 154 } 155 156 static void mux_configure8(struct fsl_edma_chan *fsl_chan, void __iomem *addr, 157 u32 off, u32 slot, bool enable) 158 { 159 u8 val8; 160 161 if (enable) 162 val8 = EDMAMUX_CHCFG_ENBL | slot; 163 else 164 val8 = EDMAMUX_CHCFG_DIS; 165 166 iowrite8(val8, addr + off); 167 } 168 169 static void mux_configure32(struct fsl_edma_chan *fsl_chan, void __iomem *addr, 170 u32 off, u32 slot, bool enable) 171 { 172 u32 val; 173 174 if (enable) 175 val = EDMAMUX_CHCFG_ENBL << 24 | slot; 176 else 177 val = EDMAMUX_CHCFG_DIS; 178 179 iowrite32(val, addr + off * 4); 180 } 181 182 void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan, 183 unsigned int slot, bool enable) 184 { 185 u32 ch = fsl_chan->vchan.chan.chan_id; 186 void __iomem *muxaddr; 187 unsigned int chans_per_mux, ch_off; 188 int endian_diff[4] = {3, 1, -1, -3}; 189 u32 dmamux_nr = fsl_chan->edma->drvdata->dmamuxs; 190 191 if (!dmamux_nr) 192 return; 193 194 chans_per_mux = fsl_chan->edma->n_chans / dmamux_nr; 195 ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux; 196 197 if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_MUX_SWAP) 198 ch_off += endian_diff[ch_off % 4]; 199 200 muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux]; 201 slot = EDMAMUX_CHCFG_SOURCE(slot); 202 203 if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_CONFIG32) 204 mux_configure32(fsl_chan, muxaddr, ch_off, slot, enable); 205 else 206 mux_configure8(fsl_chan, muxaddr, ch_off, slot, enable); 207 } 208 209 static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth addr_width) 210 { 211 u32 val; 212 213 if (addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) 214 addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 215 216 val = ffs(addr_width) - 1; 217 return val | (val << 8); 218 } 219 220 void fsl_edma_free_desc(struct virt_dma_desc *vdesc) 221 { 222 struct fsl_edma_desc *fsl_desc; 223 int i; 224 225 fsl_desc = to_fsl_edma_desc(vdesc); 226 for (i = 0; i < fsl_desc->n_tcds; i++) 227 dma_pool_free(fsl_desc->echan->tcd_pool, fsl_desc->tcd[i].vtcd, 228 fsl_desc->tcd[i].ptcd); 229 kfree(fsl_desc); 230 } 231 232 int fsl_edma_terminate_all(struct dma_chan *chan) 233 { 234 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); 235 unsigned long flags; 236 LIST_HEAD(head); 237 238 spin_lock_irqsave(&fsl_chan->vchan.lock, flags); 239 fsl_edma_disable_request(fsl_chan); 240 fsl_chan->edesc = NULL; 241 fsl_chan->idle = true; 242 vchan_get_all_descriptors(&fsl_chan->vchan, &head); 243 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); 244 vchan_dma_desc_free_list(&fsl_chan->vchan, &head); 245 246 if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_HAS_PD) 247 pm_runtime_allow(fsl_chan->pd_dev); 248 249 return 0; 250 } 251 252 int fsl_edma_pause(struct dma_chan *chan) 253 { 254 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); 255 unsigned long flags; 256 257 spin_lock_irqsave(&fsl_chan->vchan.lock, flags); 258 if (fsl_chan->edesc) { 259 fsl_edma_disable_request(fsl_chan); 260 fsl_chan->status = DMA_PAUSED; 261 fsl_chan->idle = true; 262 } 263 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); 264 return 0; 265 } 266 267 int fsl_edma_resume(struct dma_chan *chan) 268 { 269 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); 270 unsigned long flags; 271 272 spin_lock_irqsave(&fsl_chan->vchan.lock, flags); 273 if (fsl_chan->edesc) { 274 fsl_edma_enable_request(fsl_chan); 275 fsl_chan->status = DMA_IN_PROGRESS; 276 fsl_chan->idle = false; 277 } 278 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); 279 return 0; 280 } 281 282 static void fsl_edma_unprep_slave_dma(struct fsl_edma_chan *fsl_chan) 283 { 284 if (fsl_chan->dma_dir != DMA_NONE) 285 dma_unmap_resource(fsl_chan->vchan.chan.device->dev, 286 fsl_chan->dma_dev_addr, 287 fsl_chan->dma_dev_size, 288 fsl_chan->dma_dir, 0); 289 fsl_chan->dma_dir = DMA_NONE; 290 } 291 292 static bool fsl_edma_prep_slave_dma(struct fsl_edma_chan *fsl_chan, 293 enum dma_transfer_direction dir) 294 { 295 struct device *dev = fsl_chan->vchan.chan.device->dev; 296 enum dma_data_direction dma_dir; 297 phys_addr_t addr = 0; 298 u32 size = 0; 299 300 switch (dir) { 301 case DMA_MEM_TO_DEV: 302 dma_dir = DMA_FROM_DEVICE; 303 addr = fsl_chan->cfg.dst_addr; 304 size = fsl_chan->cfg.dst_maxburst; 305 break; 306 case DMA_DEV_TO_MEM: 307 dma_dir = DMA_TO_DEVICE; 308 addr = fsl_chan->cfg.src_addr; 309 size = fsl_chan->cfg.src_maxburst; 310 break; 311 default: 312 dma_dir = DMA_NONE; 313 break; 314 } 315 316 /* Already mapped for this config? */ 317 if (fsl_chan->dma_dir == dma_dir) 318 return true; 319 320 fsl_edma_unprep_slave_dma(fsl_chan); 321 322 fsl_chan->dma_dev_addr = dma_map_resource(dev, addr, size, dma_dir, 0); 323 if (dma_mapping_error(dev, fsl_chan->dma_dev_addr)) 324 return false; 325 fsl_chan->dma_dev_size = size; 326 fsl_chan->dma_dir = dma_dir; 327 328 return true; 329 } 330 331 int fsl_edma_slave_config(struct dma_chan *chan, 332 struct dma_slave_config *cfg) 333 { 334 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); 335 336 memcpy(&fsl_chan->cfg, cfg, sizeof(*cfg)); 337 fsl_edma_unprep_slave_dma(fsl_chan); 338 339 return 0; 340 } 341 342 static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan, 343 struct virt_dma_desc *vdesc, bool in_progress) 344 { 345 struct fsl_edma_desc *edesc = fsl_chan->edesc; 346 enum dma_transfer_direction dir = edesc->dirn; 347 dma_addr_t cur_addr, dma_addr; 348 size_t len, size; 349 u32 nbytes = 0; 350 int i; 351 352 /* calculate the total size in this desc */ 353 for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++) { 354 nbytes = le32_to_cpu(edesc->tcd[i].vtcd->nbytes); 355 if (nbytes & (EDMA_V3_TCD_NBYTES_DMLOE | EDMA_V3_TCD_NBYTES_SMLOE)) 356 nbytes = EDMA_V3_TCD_NBYTES_MLOFF_NBYTES(nbytes); 357 len += nbytes * le16_to_cpu(edesc->tcd[i].vtcd->biter); 358 } 359 360 if (!in_progress) 361 return len; 362 363 if (dir == DMA_MEM_TO_DEV) 364 cur_addr = edma_read_tcdreg(fsl_chan, saddr); 365 else 366 cur_addr = edma_read_tcdreg(fsl_chan, daddr); 367 368 /* figure out the finished and calculate the residue */ 369 for (i = 0; i < fsl_chan->edesc->n_tcds; i++) { 370 nbytes = le32_to_cpu(edesc->tcd[i].vtcd->nbytes); 371 if (nbytes & (EDMA_V3_TCD_NBYTES_DMLOE | EDMA_V3_TCD_NBYTES_SMLOE)) 372 nbytes = EDMA_V3_TCD_NBYTES_MLOFF_NBYTES(nbytes); 373 374 size = nbytes * le16_to_cpu(edesc->tcd[i].vtcd->biter); 375 376 if (dir == DMA_MEM_TO_DEV) 377 dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->saddr); 378 else 379 dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->daddr); 380 381 len -= size; 382 if (cur_addr >= dma_addr && cur_addr < dma_addr + size) { 383 len += dma_addr + size - cur_addr; 384 break; 385 } 386 } 387 388 return len; 389 } 390 391 enum dma_status fsl_edma_tx_status(struct dma_chan *chan, 392 dma_cookie_t cookie, struct dma_tx_state *txstate) 393 { 394 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); 395 struct virt_dma_desc *vdesc; 396 enum dma_status status; 397 unsigned long flags; 398 399 status = dma_cookie_status(chan, cookie, txstate); 400 if (status == DMA_COMPLETE) 401 return status; 402 403 if (!txstate) 404 return fsl_chan->status; 405 406 spin_lock_irqsave(&fsl_chan->vchan.lock, flags); 407 vdesc = vchan_find_desc(&fsl_chan->vchan, cookie); 408 if (fsl_chan->edesc && cookie == fsl_chan->edesc->vdesc.tx.cookie) 409 txstate->residue = 410 fsl_edma_desc_residue(fsl_chan, vdesc, true); 411 else if (vdesc) 412 txstate->residue = 413 fsl_edma_desc_residue(fsl_chan, vdesc, false); 414 else 415 txstate->residue = 0; 416 417 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); 418 419 return fsl_chan->status; 420 } 421 422 static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan, 423 struct fsl_edma_hw_tcd *tcd) 424 { 425 u16 csr = 0; 426 427 /* 428 * TCD parameters are stored in struct fsl_edma_hw_tcd in little 429 * endian format. However, we need to load the TCD registers in 430 * big- or little-endian obeying the eDMA engine model endian, 431 * and this is performed from specific edma_write functions 432 */ 433 edma_write_tcdreg(fsl_chan, 0, csr); 434 435 edma_write_tcdreg(fsl_chan, tcd->saddr, saddr); 436 edma_write_tcdreg(fsl_chan, tcd->daddr, daddr); 437 438 edma_write_tcdreg(fsl_chan, tcd->attr, attr); 439 edma_write_tcdreg(fsl_chan, tcd->soff, soff); 440 441 edma_write_tcdreg(fsl_chan, tcd->nbytes, nbytes); 442 edma_write_tcdreg(fsl_chan, tcd->slast, slast); 443 444 edma_write_tcdreg(fsl_chan, tcd->citer, citer); 445 edma_write_tcdreg(fsl_chan, tcd->biter, biter); 446 edma_write_tcdreg(fsl_chan, tcd->doff, doff); 447 448 edma_write_tcdreg(fsl_chan, tcd->dlast_sga, dlast_sga); 449 450 csr = le16_to_cpu(tcd->csr); 451 452 if (fsl_chan->is_sw) { 453 csr |= EDMA_TCD_CSR_START; 454 tcd->csr = cpu_to_le16(csr); 455 } 456 457 /* 458 * Must clear CHn_CSR[DONE] bit before enable TCDn_CSR[ESG] at EDMAv3 459 * eDMAv4 have not such requirement. 460 * Change MLINK need clear CHn_CSR[DONE] for both eDMAv3 and eDMAv4. 461 */ 462 if (((fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_CLEAR_DONE_E_SG) && 463 (csr & EDMA_TCD_CSR_E_SG)) || 464 ((fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_CLEAR_DONE_E_LINK) && 465 (csr & EDMA_TCD_CSR_E_LINK))) 466 edma_writel_chreg(fsl_chan, edma_readl_chreg(fsl_chan, ch_csr), ch_csr); 467 468 469 edma_write_tcdreg(fsl_chan, tcd->csr, csr); 470 } 471 472 static inline 473 void fsl_edma_fill_tcd(struct fsl_edma_chan *fsl_chan, 474 struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst, 475 u16 attr, u16 soff, u32 nbytes, u32 slast, u16 citer, 476 u16 biter, u16 doff, u32 dlast_sga, bool major_int, 477 bool disable_req, bool enable_sg) 478 { 479 struct dma_slave_config *cfg = &fsl_chan->cfg; 480 u16 csr = 0; 481 u32 burst; 482 483 /* 484 * eDMA hardware SGs require the TCDs to be stored in little 485 * endian format irrespective of the register endian model. 486 * So we put the value in little endian in memory, waiting 487 * for fsl_edma_set_tcd_regs doing the swap. 488 */ 489 tcd->saddr = cpu_to_le32(src); 490 tcd->daddr = cpu_to_le32(dst); 491 492 tcd->attr = cpu_to_le16(attr); 493 494 tcd->soff = cpu_to_le16(soff); 495 496 if (fsl_chan->is_multi_fifo) { 497 /* set mloff to support multiple fifo */ 498 burst = cfg->direction == DMA_DEV_TO_MEM ? 499 cfg->src_maxburst : cfg->dst_maxburst; 500 nbytes |= EDMA_V3_TCD_NBYTES_MLOFF(-(burst * 4)); 501 /* enable DMLOE/SMLOE */ 502 if (cfg->direction == DMA_MEM_TO_DEV) { 503 nbytes |= EDMA_V3_TCD_NBYTES_DMLOE; 504 nbytes &= ~EDMA_V3_TCD_NBYTES_SMLOE; 505 } else { 506 nbytes |= EDMA_V3_TCD_NBYTES_SMLOE; 507 nbytes &= ~EDMA_V3_TCD_NBYTES_DMLOE; 508 } 509 } 510 511 tcd->nbytes = cpu_to_le32(nbytes); 512 tcd->slast = cpu_to_le32(slast); 513 514 tcd->citer = cpu_to_le16(EDMA_TCD_CITER_CITER(citer)); 515 tcd->doff = cpu_to_le16(doff); 516 517 tcd->dlast_sga = cpu_to_le32(dlast_sga); 518 519 tcd->biter = cpu_to_le16(EDMA_TCD_BITER_BITER(biter)); 520 if (major_int) 521 csr |= EDMA_TCD_CSR_INT_MAJOR; 522 523 if (disable_req) 524 csr |= EDMA_TCD_CSR_D_REQ; 525 526 if (enable_sg) 527 csr |= EDMA_TCD_CSR_E_SG; 528 529 if (fsl_chan->is_rxchan) 530 csr |= EDMA_TCD_CSR_ACTIVE; 531 532 if (fsl_chan->is_sw) 533 csr |= EDMA_TCD_CSR_START; 534 535 tcd->csr = cpu_to_le16(csr); 536 } 537 538 static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan, 539 int sg_len) 540 { 541 struct fsl_edma_desc *fsl_desc; 542 int i; 543 544 fsl_desc = kzalloc(struct_size(fsl_desc, tcd, sg_len), GFP_NOWAIT); 545 if (!fsl_desc) 546 return NULL; 547 548 fsl_desc->echan = fsl_chan; 549 fsl_desc->n_tcds = sg_len; 550 for (i = 0; i < sg_len; i++) { 551 fsl_desc->tcd[i].vtcd = dma_pool_alloc(fsl_chan->tcd_pool, 552 GFP_NOWAIT, &fsl_desc->tcd[i].ptcd); 553 if (!fsl_desc->tcd[i].vtcd) 554 goto err; 555 } 556 return fsl_desc; 557 558 err: 559 while (--i >= 0) 560 dma_pool_free(fsl_chan->tcd_pool, fsl_desc->tcd[i].vtcd, 561 fsl_desc->tcd[i].ptcd); 562 kfree(fsl_desc); 563 return NULL; 564 } 565 566 struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic( 567 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, 568 size_t period_len, enum dma_transfer_direction direction, 569 unsigned long flags) 570 { 571 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); 572 struct fsl_edma_desc *fsl_desc; 573 dma_addr_t dma_buf_next; 574 bool major_int = true; 575 int sg_len, i; 576 u32 src_addr, dst_addr, last_sg, nbytes; 577 u16 soff, doff, iter; 578 579 if (!is_slave_direction(direction)) 580 return NULL; 581 582 if (!fsl_edma_prep_slave_dma(fsl_chan, direction)) 583 return NULL; 584 585 sg_len = buf_len / period_len; 586 fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len); 587 if (!fsl_desc) 588 return NULL; 589 fsl_desc->iscyclic = true; 590 fsl_desc->dirn = direction; 591 592 dma_buf_next = dma_addr; 593 if (direction == DMA_MEM_TO_DEV) { 594 fsl_chan->attr = 595 fsl_edma_get_tcd_attr(fsl_chan->cfg.dst_addr_width); 596 nbytes = fsl_chan->cfg.dst_addr_width * 597 fsl_chan->cfg.dst_maxburst; 598 } else { 599 fsl_chan->attr = 600 fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width); 601 nbytes = fsl_chan->cfg.src_addr_width * 602 fsl_chan->cfg.src_maxburst; 603 } 604 605 iter = period_len / nbytes; 606 607 for (i = 0; i < sg_len; i++) { 608 if (dma_buf_next >= dma_addr + buf_len) 609 dma_buf_next = dma_addr; 610 611 /* get next sg's physical address */ 612 last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd; 613 614 if (direction == DMA_MEM_TO_DEV) { 615 src_addr = dma_buf_next; 616 dst_addr = fsl_chan->dma_dev_addr; 617 soff = fsl_chan->cfg.dst_addr_width; 618 doff = fsl_chan->is_multi_fifo ? 4 : 0; 619 } else if (direction == DMA_DEV_TO_MEM) { 620 src_addr = fsl_chan->dma_dev_addr; 621 dst_addr = dma_buf_next; 622 soff = fsl_chan->is_multi_fifo ? 4 : 0; 623 doff = fsl_chan->cfg.src_addr_width; 624 } else { 625 /* DMA_DEV_TO_DEV */ 626 src_addr = fsl_chan->cfg.src_addr; 627 dst_addr = fsl_chan->cfg.dst_addr; 628 soff = doff = 0; 629 major_int = false; 630 } 631 632 fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[i].vtcd, src_addr, dst_addr, 633 fsl_chan->attr, soff, nbytes, 0, iter, 634 iter, doff, last_sg, major_int, false, true); 635 dma_buf_next += period_len; 636 } 637 638 return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags); 639 } 640 641 struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg( 642 struct dma_chan *chan, struct scatterlist *sgl, 643 unsigned int sg_len, enum dma_transfer_direction direction, 644 unsigned long flags, void *context) 645 { 646 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); 647 struct fsl_edma_desc *fsl_desc; 648 struct scatterlist *sg; 649 u32 src_addr, dst_addr, last_sg, nbytes; 650 u16 soff, doff, iter; 651 int i; 652 653 if (!is_slave_direction(direction)) 654 return NULL; 655 656 if (!fsl_edma_prep_slave_dma(fsl_chan, direction)) 657 return NULL; 658 659 fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len); 660 if (!fsl_desc) 661 return NULL; 662 fsl_desc->iscyclic = false; 663 fsl_desc->dirn = direction; 664 665 if (direction == DMA_MEM_TO_DEV) { 666 fsl_chan->attr = 667 fsl_edma_get_tcd_attr(fsl_chan->cfg.dst_addr_width); 668 nbytes = fsl_chan->cfg.dst_addr_width * 669 fsl_chan->cfg.dst_maxburst; 670 } else { 671 fsl_chan->attr = 672 fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width); 673 nbytes = fsl_chan->cfg.src_addr_width * 674 fsl_chan->cfg.src_maxburst; 675 } 676 677 for_each_sg(sgl, sg, sg_len, i) { 678 if (direction == DMA_MEM_TO_DEV) { 679 src_addr = sg_dma_address(sg); 680 dst_addr = fsl_chan->dma_dev_addr; 681 soff = fsl_chan->cfg.dst_addr_width; 682 doff = 0; 683 } else if (direction == DMA_DEV_TO_MEM) { 684 src_addr = fsl_chan->dma_dev_addr; 685 dst_addr = sg_dma_address(sg); 686 soff = 0; 687 doff = fsl_chan->cfg.src_addr_width; 688 } else { 689 /* DMA_DEV_TO_DEV */ 690 src_addr = fsl_chan->cfg.src_addr; 691 dst_addr = fsl_chan->cfg.dst_addr; 692 soff = 0; 693 doff = 0; 694 } 695 696 /* 697 * Choose the suitable burst length if sg_dma_len is not 698 * multiple of burst length so that the whole transfer length is 699 * multiple of minor loop(burst length). 700 */ 701 if (sg_dma_len(sg) % nbytes) { 702 u32 width = (direction == DMA_DEV_TO_MEM) ? doff : soff; 703 u32 burst = (direction == DMA_DEV_TO_MEM) ? 704 fsl_chan->cfg.src_maxburst : 705 fsl_chan->cfg.dst_maxburst; 706 int j; 707 708 for (j = burst; j > 1; j--) { 709 if (!(sg_dma_len(sg) % (j * width))) { 710 nbytes = j * width; 711 break; 712 } 713 } 714 /* Set burst size as 1 if there's no suitable one */ 715 if (j == 1) 716 nbytes = width; 717 } 718 iter = sg_dma_len(sg) / nbytes; 719 if (i < sg_len - 1) { 720 last_sg = fsl_desc->tcd[(i + 1)].ptcd; 721 fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[i].vtcd, src_addr, 722 dst_addr, fsl_chan->attr, soff, 723 nbytes, 0, iter, iter, doff, last_sg, 724 false, false, true); 725 } else { 726 last_sg = 0; 727 fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[i].vtcd, src_addr, 728 dst_addr, fsl_chan->attr, soff, 729 nbytes, 0, iter, iter, doff, last_sg, 730 true, true, false); 731 } 732 } 733 734 return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags); 735 } 736 737 struct dma_async_tx_descriptor *fsl_edma_prep_memcpy(struct dma_chan *chan, 738 dma_addr_t dma_dst, dma_addr_t dma_src, 739 size_t len, unsigned long flags) 740 { 741 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); 742 struct fsl_edma_desc *fsl_desc; 743 744 fsl_desc = fsl_edma_alloc_desc(fsl_chan, 1); 745 if (!fsl_desc) 746 return NULL; 747 fsl_desc->iscyclic = false; 748 749 fsl_chan->is_sw = true; 750 if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_MEM_REMOTE) 751 fsl_chan->is_remote = true; 752 753 /* To match with copy_align and max_seg_size so 1 tcd is enough */ 754 fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[0].vtcd, dma_src, dma_dst, 755 fsl_edma_get_tcd_attr(DMA_SLAVE_BUSWIDTH_32_BYTES), 756 32, len, 0, 1, 1, 32, 0, true, true, false); 757 758 return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags); 759 } 760 761 void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan) 762 { 763 struct virt_dma_desc *vdesc; 764 765 lockdep_assert_held(&fsl_chan->vchan.lock); 766 767 vdesc = vchan_next_desc(&fsl_chan->vchan); 768 if (!vdesc) 769 return; 770 fsl_chan->edesc = to_fsl_edma_desc(vdesc); 771 fsl_edma_set_tcd_regs(fsl_chan, fsl_chan->edesc->tcd[0].vtcd); 772 fsl_edma_enable_request(fsl_chan); 773 fsl_chan->status = DMA_IN_PROGRESS; 774 fsl_chan->idle = false; 775 } 776 777 void fsl_edma_issue_pending(struct dma_chan *chan) 778 { 779 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); 780 unsigned long flags; 781 782 spin_lock_irqsave(&fsl_chan->vchan.lock, flags); 783 784 if (unlikely(fsl_chan->pm_state != RUNNING)) { 785 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); 786 /* cannot submit due to suspend */ 787 return; 788 } 789 790 if (vchan_issue_pending(&fsl_chan->vchan) && !fsl_chan->edesc) 791 fsl_edma_xfer_desc(fsl_chan); 792 793 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); 794 } 795 796 int fsl_edma_alloc_chan_resources(struct dma_chan *chan) 797 { 798 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); 799 800 if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_HAS_CHCLK) 801 clk_prepare_enable(fsl_chan->clk); 802 803 fsl_chan->tcd_pool = dma_pool_create("tcd_pool", chan->device->dev, 804 sizeof(struct fsl_edma_hw_tcd), 805 32, 0); 806 return 0; 807 } 808 809 void fsl_edma_free_chan_resources(struct dma_chan *chan) 810 { 811 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); 812 struct fsl_edma_engine *edma = fsl_chan->edma; 813 unsigned long flags; 814 LIST_HEAD(head); 815 816 spin_lock_irqsave(&fsl_chan->vchan.lock, flags); 817 fsl_edma_disable_request(fsl_chan); 818 if (edma->drvdata->dmamuxs) 819 fsl_edma_chan_mux(fsl_chan, 0, false); 820 fsl_chan->edesc = NULL; 821 vchan_get_all_descriptors(&fsl_chan->vchan, &head); 822 fsl_edma_unprep_slave_dma(fsl_chan); 823 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); 824 825 vchan_dma_desc_free_list(&fsl_chan->vchan, &head); 826 dma_pool_destroy(fsl_chan->tcd_pool); 827 fsl_chan->tcd_pool = NULL; 828 fsl_chan->is_sw = false; 829 fsl_chan->srcid = 0; 830 fsl_chan->is_remote = false; 831 if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_HAS_CHCLK) 832 clk_disable_unprepare(fsl_chan->clk); 833 } 834 835 void fsl_edma_cleanup_vchan(struct dma_device *dmadev) 836 { 837 struct fsl_edma_chan *chan, *_chan; 838 839 list_for_each_entry_safe(chan, _chan, 840 &dmadev->channels, vchan.chan.device_node) { 841 list_del(&chan->vchan.chan.device_node); 842 tasklet_kill(&chan->vchan.task); 843 } 844 } 845 846 /* 847 * On the 32 channels Vybrid/mpc577x edma version, register offsets are 848 * different compared to ColdFire mcf5441x 64 channels edma. 849 * 850 * This function sets up register offsets as per proper declared version 851 * so must be called in xxx_edma_probe() just after setting the 852 * edma "version" and "membase" appropriately. 853 */ 854 void fsl_edma_setup_regs(struct fsl_edma_engine *edma) 855 { 856 bool is64 = !!(edma->drvdata->flags & FSL_EDMA_DRV_EDMA64); 857 858 edma->regs.cr = edma->membase + EDMA_CR; 859 edma->regs.es = edma->membase + EDMA_ES; 860 edma->regs.erql = edma->membase + EDMA_ERQ; 861 edma->regs.eeil = edma->membase + EDMA_EEI; 862 863 edma->regs.serq = edma->membase + (is64 ? EDMA64_SERQ : EDMA_SERQ); 864 edma->regs.cerq = edma->membase + (is64 ? EDMA64_CERQ : EDMA_CERQ); 865 edma->regs.seei = edma->membase + (is64 ? EDMA64_SEEI : EDMA_SEEI); 866 edma->regs.ceei = edma->membase + (is64 ? EDMA64_CEEI : EDMA_CEEI); 867 edma->regs.cint = edma->membase + (is64 ? EDMA64_CINT : EDMA_CINT); 868 edma->regs.cerr = edma->membase + (is64 ? EDMA64_CERR : EDMA_CERR); 869 edma->regs.ssrt = edma->membase + (is64 ? EDMA64_SSRT : EDMA_SSRT); 870 edma->regs.cdne = edma->membase + (is64 ? EDMA64_CDNE : EDMA_CDNE); 871 edma->regs.intl = edma->membase + (is64 ? EDMA64_INTL : EDMA_INTR); 872 edma->regs.errl = edma->membase + (is64 ? EDMA64_ERRL : EDMA_ERR); 873 874 if (is64) { 875 edma->regs.erqh = edma->membase + EDMA64_ERQH; 876 edma->regs.eeih = edma->membase + EDMA64_EEIH; 877 edma->regs.errh = edma->membase + EDMA64_ERRH; 878 edma->regs.inth = edma->membase + EDMA64_INTH; 879 } 880 } 881 882 MODULE_LICENSE("GPL v2"); 883