1 /* 2 * Copyright 2015 Robert Jarzmik <robert.jarzmik@free.fr> 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 */ 8 9 #include <linux/err.h> 10 #include <linux/module.h> 11 #include <linux/init.h> 12 #include <linux/types.h> 13 #include <linux/interrupt.h> 14 #include <linux/dma-mapping.h> 15 #include <linux/slab.h> 16 #include <linux/dmaengine.h> 17 #include <linux/platform_device.h> 18 #include <linux/device.h> 19 #include <linux/platform_data/mmp_dma.h> 20 #include <linux/dmapool.h> 21 #include <linux/of_device.h> 22 #include <linux/of_dma.h> 23 #include <linux/of.h> 24 #include <linux/dma/pxa-dma.h> 25 26 #include "dmaengine.h" 27 #include "virt-dma.h" 28 29 #define DCSR(n) (0x0000 + ((n) << 2)) 30 #define DALGN(n) 0x00a0 31 #define DINT 0x00f0 32 #define DDADR(n) (0x0200 + ((n) << 4)) 33 #define DSADR(n) (0x0204 + ((n) << 4)) 34 #define DTADR(n) (0x0208 + ((n) << 4)) 35 #define DCMD(n) (0x020c + ((n) << 4)) 36 37 #define PXA_DCSR_RUN BIT(31) /* Run Bit (read / write) */ 38 #define PXA_DCSR_NODESC BIT(30) /* No-Descriptor Fetch (read / write) */ 39 #define PXA_DCSR_STOPIRQEN BIT(29) /* Stop Interrupt Enable (R/W) */ 40 #define PXA_DCSR_REQPEND BIT(8) /* Request Pending (read-only) */ 41 #define PXA_DCSR_STOPSTATE BIT(3) /* Stop State (read-only) */ 42 #define PXA_DCSR_ENDINTR BIT(2) /* End Interrupt (read / write) */ 43 #define PXA_DCSR_STARTINTR BIT(1) /* Start Interrupt (read / write) */ 44 #define PXA_DCSR_BUSERR BIT(0) /* Bus Error Interrupt (read / write) */ 45 46 #define PXA_DCSR_EORIRQEN BIT(28) /* End of Receive IRQ Enable (R/W) */ 47 #define PXA_DCSR_EORJMPEN BIT(27) /* Jump to next descriptor on EOR */ 48 #define PXA_DCSR_EORSTOPEN BIT(26) /* STOP on an EOR */ 49 #define PXA_DCSR_SETCMPST BIT(25) /* Set Descriptor Compare Status */ 50 #define PXA_DCSR_CLRCMPST BIT(24) /* Clear Descriptor Compare Status */ 51 #define PXA_DCSR_CMPST BIT(10) /* The Descriptor Compare Status */ 52 #define PXA_DCSR_EORINTR BIT(9) /* The end of Receive */ 53 54 #define DRCMR_MAPVLD BIT(7) /* Map Valid (read / write) */ 55 #define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */ 56 57 #define DDADR_DESCADDR 0xfffffff0 /* Address of next descriptor (mask) */ 58 #define DDADR_STOP BIT(0) /* Stop (read / write) */ 59 60 #define PXA_DCMD_INCSRCADDR BIT(31) /* Source Address Increment Setting. */ 61 #define PXA_DCMD_INCTRGADDR BIT(30) /* Target Address Increment Setting. */ 62 #define PXA_DCMD_FLOWSRC BIT(29) /* Flow Control by the source. */ 63 #define PXA_DCMD_FLOWTRG BIT(28) /* Flow Control by the target. */ 64 #define PXA_DCMD_STARTIRQEN BIT(22) /* Start Interrupt Enable */ 65 #define PXA_DCMD_ENDIRQEN BIT(21) /* End Interrupt Enable */ 66 #define PXA_DCMD_ENDIAN BIT(18) /* Device Endian-ness. */ 67 #define PXA_DCMD_BURST8 (1 << 16) /* 8 byte burst */ 68 #define PXA_DCMD_BURST16 (2 << 16) /* 16 byte burst */ 69 #define PXA_DCMD_BURST32 (3 << 16) /* 32 byte burst */ 70 #define PXA_DCMD_WIDTH1 (1 << 14) /* 1 byte width */ 71 #define PXA_DCMD_WIDTH2 (2 << 14) /* 2 byte width (HalfWord) */ 72 #define PXA_DCMD_WIDTH4 (3 << 14) /* 4 byte width (Word) */ 73 #define PXA_DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */ 74 75 #define PDMA_ALIGNMENT 3 76 #define PDMA_MAX_DESC_BYTES (PXA_DCMD_LENGTH & ~((1 << PDMA_ALIGNMENT) - 1)) 77 78 struct pxad_desc_hw { 79 u32 ddadr; /* Points to the next descriptor + flags */ 80 u32 dsadr; /* DSADR value for the current transfer */ 81 u32 dtadr; /* DTADR value for the current transfer */ 82 u32 dcmd; /* DCMD value for the current transfer */ 83 } __aligned(16); 84 85 struct pxad_desc_sw { 86 struct virt_dma_desc vd; /* Virtual descriptor */ 87 int nb_desc; /* Number of hw. descriptors */ 88 size_t len; /* Number of bytes xfered */ 89 dma_addr_t first; /* First descriptor's addr */ 90 91 /* At least one descriptor has an src/dst address not multiple of 8 */ 92 bool misaligned; 93 bool cyclic; 94 struct dma_pool *desc_pool; /* Channel's used allocator */ 95 96 struct pxad_desc_hw *hw_desc[]; /* DMA coherent descriptors */ 97 }; 98 99 struct pxad_phy { 100 int idx; 101 void __iomem *base; 102 struct pxad_chan *vchan; 103 }; 104 105 struct pxad_chan { 106 struct virt_dma_chan vc; /* Virtual channel */ 107 u32 drcmr; /* Requestor of the channel */ 108 enum pxad_chan_prio prio; /* Required priority of phy */ 109 /* 110 * At least one desc_sw in submitted or issued transfers on this channel 111 * has one address such as: addr % 8 != 0. This implies the DALGN 112 * setting on the phy. 113 */ 114 bool misaligned; 115 struct dma_slave_config cfg; /* Runtime config */ 116 117 /* protected by vc->lock */ 118 struct pxad_phy *phy; 119 struct dma_pool *desc_pool; /* Descriptors pool */ 120 }; 121 122 struct pxad_device { 123 struct dma_device slave; 124 int nr_chans; 125 void __iomem *base; 126 struct pxad_phy *phys; 127 spinlock_t phy_lock; /* Phy association */ 128 #ifdef CONFIG_DEBUG_FS 129 struct dentry *dbgfs_root; 130 struct dentry *dbgfs_state; 131 struct dentry **dbgfs_chan; 132 #endif 133 }; 134 135 #define tx_to_pxad_desc(tx) \ 136 container_of(tx, struct pxad_desc_sw, async_tx) 137 #define to_pxad_chan(dchan) \ 138 container_of(dchan, struct pxad_chan, vc.chan) 139 #define to_pxad_dev(dmadev) \ 140 container_of(dmadev, struct pxad_device, slave) 141 #define to_pxad_sw_desc(_vd) \ 142 container_of((_vd), struct pxad_desc_sw, vd) 143 144 #define _phy_readl_relaxed(phy, _reg) \ 145 readl_relaxed((phy)->base + _reg((phy)->idx)) 146 #define phy_readl_relaxed(phy, _reg) \ 147 ({ \ 148 u32 _v; \ 149 _v = readl_relaxed((phy)->base + _reg((phy)->idx)); \ 150 dev_vdbg(&phy->vchan->vc.chan.dev->device, \ 151 "%s(): readl(%s): 0x%08x\n", __func__, #_reg, \ 152 _v); \ 153 _v; \ 154 }) 155 #define phy_writel(phy, val, _reg) \ 156 do { \ 157 writel((val), (phy)->base + _reg((phy)->idx)); \ 158 dev_vdbg(&phy->vchan->vc.chan.dev->device, \ 159 "%s(): writel(0x%08x, %s)\n", \ 160 __func__, (u32)(val), #_reg); \ 161 } while (0) 162 #define phy_writel_relaxed(phy, val, _reg) \ 163 do { \ 164 writel_relaxed((val), (phy)->base + _reg((phy)->idx)); \ 165 dev_vdbg(&phy->vchan->vc.chan.dev->device, \ 166 "%s(): writel_relaxed(0x%08x, %s)\n", \ 167 __func__, (u32)(val), #_reg); \ 168 } while (0) 169 170 static unsigned int pxad_drcmr(unsigned int line) 171 { 172 if (line < 64) 173 return 0x100 + line * 4; 174 return 0x1000 + line * 4; 175 } 176 177 /* 178 * Debug fs 179 */ 180 #ifdef CONFIG_DEBUG_FS 181 #include <linux/debugfs.h> 182 #include <linux/uaccess.h> 183 #include <linux/seq_file.h> 184 185 static int dbg_show_requester_chan(struct seq_file *s, void *p) 186 { 187 struct pxad_phy *phy = s->private; 188 int i; 189 u32 drcmr; 190 191 seq_printf(s, "DMA channel %d requester :\n", phy->idx); 192 for (i = 0; i < 70; i++) { 193 drcmr = readl_relaxed(phy->base + pxad_drcmr(i)); 194 if ((drcmr & DRCMR_CHLNUM) == phy->idx) 195 seq_printf(s, "\tRequester %d (MAPVLD=%d)\n", i, 196 !!(drcmr & DRCMR_MAPVLD)); 197 } 198 return 0; 199 } 200 201 static inline int dbg_burst_from_dcmd(u32 dcmd) 202 { 203 int burst = (dcmd >> 16) & 0x3; 204 205 return burst ? 4 << burst : 0; 206 } 207 208 static int is_phys_valid(unsigned long addr) 209 { 210 return pfn_valid(__phys_to_pfn(addr)); 211 } 212 213 #define PXA_DCSR_STR(flag) (dcsr & PXA_DCSR_##flag ? #flag" " : "") 214 #define PXA_DCMD_STR(flag) (dcmd & PXA_DCMD_##flag ? #flag" " : "") 215 216 static int dbg_show_descriptors(struct seq_file *s, void *p) 217 { 218 struct pxad_phy *phy = s->private; 219 int i, max_show = 20, burst, width; 220 u32 dcmd; 221 unsigned long phys_desc, ddadr; 222 struct pxad_desc_hw *desc; 223 224 phys_desc = ddadr = _phy_readl_relaxed(phy, DDADR); 225 226 seq_printf(s, "DMA channel %d descriptors :\n", phy->idx); 227 seq_printf(s, "[%03d] First descriptor unknown\n", 0); 228 for (i = 1; i < max_show && is_phys_valid(phys_desc); i++) { 229 desc = phys_to_virt(phys_desc); 230 dcmd = desc->dcmd; 231 burst = dbg_burst_from_dcmd(dcmd); 232 width = (1 << ((dcmd >> 14) & 0x3)) >> 1; 233 234 seq_printf(s, "[%03d] Desc at %08lx(virt %p)\n", 235 i, phys_desc, desc); 236 seq_printf(s, "\tDDADR = %08x\n", desc->ddadr); 237 seq_printf(s, "\tDSADR = %08x\n", desc->dsadr); 238 seq_printf(s, "\tDTADR = %08x\n", desc->dtadr); 239 seq_printf(s, "\tDCMD = %08x (%s%s%s%s%s%s%sburst=%d width=%d len=%d)\n", 240 dcmd, 241 PXA_DCMD_STR(INCSRCADDR), PXA_DCMD_STR(INCTRGADDR), 242 PXA_DCMD_STR(FLOWSRC), PXA_DCMD_STR(FLOWTRG), 243 PXA_DCMD_STR(STARTIRQEN), PXA_DCMD_STR(ENDIRQEN), 244 PXA_DCMD_STR(ENDIAN), burst, width, 245 dcmd & PXA_DCMD_LENGTH); 246 phys_desc = desc->ddadr; 247 } 248 if (i == max_show) 249 seq_printf(s, "[%03d] Desc at %08lx ... max display reached\n", 250 i, phys_desc); 251 else 252 seq_printf(s, "[%03d] Desc at %08lx is %s\n", 253 i, phys_desc, phys_desc == DDADR_STOP ? 254 "DDADR_STOP" : "invalid"); 255 256 return 0; 257 } 258 259 static int dbg_show_chan_state(struct seq_file *s, void *p) 260 { 261 struct pxad_phy *phy = s->private; 262 u32 dcsr, dcmd; 263 int burst, width; 264 static const char * const str_prio[] = { 265 "high", "normal", "low", "invalid" 266 }; 267 268 dcsr = _phy_readl_relaxed(phy, DCSR); 269 dcmd = _phy_readl_relaxed(phy, DCMD); 270 burst = dbg_burst_from_dcmd(dcmd); 271 width = (1 << ((dcmd >> 14) & 0x3)) >> 1; 272 273 seq_printf(s, "DMA channel %d\n", phy->idx); 274 seq_printf(s, "\tPriority : %s\n", 275 str_prio[(phy->idx & 0xf) / 4]); 276 seq_printf(s, "\tUnaligned transfer bit: %s\n", 277 _phy_readl_relaxed(phy, DALGN) & BIT(phy->idx) ? 278 "yes" : "no"); 279 seq_printf(s, "\tDCSR = %08x (%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s)\n", 280 dcsr, PXA_DCSR_STR(RUN), PXA_DCSR_STR(NODESC), 281 PXA_DCSR_STR(STOPIRQEN), PXA_DCSR_STR(EORIRQEN), 282 PXA_DCSR_STR(EORJMPEN), PXA_DCSR_STR(EORSTOPEN), 283 PXA_DCSR_STR(SETCMPST), PXA_DCSR_STR(CLRCMPST), 284 PXA_DCSR_STR(CMPST), PXA_DCSR_STR(EORINTR), 285 PXA_DCSR_STR(REQPEND), PXA_DCSR_STR(STOPSTATE), 286 PXA_DCSR_STR(ENDINTR), PXA_DCSR_STR(STARTINTR), 287 PXA_DCSR_STR(BUSERR)); 288 289 seq_printf(s, "\tDCMD = %08x (%s%s%s%s%s%s%sburst=%d width=%d len=%d)\n", 290 dcmd, 291 PXA_DCMD_STR(INCSRCADDR), PXA_DCMD_STR(INCTRGADDR), 292 PXA_DCMD_STR(FLOWSRC), PXA_DCMD_STR(FLOWTRG), 293 PXA_DCMD_STR(STARTIRQEN), PXA_DCMD_STR(ENDIRQEN), 294 PXA_DCMD_STR(ENDIAN), burst, width, dcmd & PXA_DCMD_LENGTH); 295 seq_printf(s, "\tDSADR = %08x\n", _phy_readl_relaxed(phy, DSADR)); 296 seq_printf(s, "\tDTADR = %08x\n", _phy_readl_relaxed(phy, DTADR)); 297 seq_printf(s, "\tDDADR = %08x\n", _phy_readl_relaxed(phy, DDADR)); 298 299 return 0; 300 } 301 302 static int dbg_show_state(struct seq_file *s, void *p) 303 { 304 struct pxad_device *pdev = s->private; 305 306 /* basic device status */ 307 seq_puts(s, "DMA engine status\n"); 308 seq_printf(s, "\tChannel number: %d\n", pdev->nr_chans); 309 310 return 0; 311 } 312 313 #define DBGFS_FUNC_DECL(name) \ 314 static int dbg_open_##name(struct inode *inode, struct file *file) \ 315 { \ 316 return single_open(file, dbg_show_##name, inode->i_private); \ 317 } \ 318 static const struct file_operations dbg_fops_##name = { \ 319 .owner = THIS_MODULE, \ 320 .open = dbg_open_##name, \ 321 .llseek = seq_lseek, \ 322 .read = seq_read, \ 323 .release = single_release, \ 324 } 325 326 DBGFS_FUNC_DECL(state); 327 DBGFS_FUNC_DECL(chan_state); 328 DBGFS_FUNC_DECL(descriptors); 329 DBGFS_FUNC_DECL(requester_chan); 330 331 static struct dentry *pxad_dbg_alloc_chan(struct pxad_device *pdev, 332 int ch, struct dentry *chandir) 333 { 334 char chan_name[11]; 335 struct dentry *chan, *chan_state = NULL, *chan_descr = NULL; 336 struct dentry *chan_reqs = NULL; 337 void *dt; 338 339 scnprintf(chan_name, sizeof(chan_name), "%d", ch); 340 chan = debugfs_create_dir(chan_name, chandir); 341 dt = (void *)&pdev->phys[ch]; 342 343 if (chan) 344 chan_state = debugfs_create_file("state", 0400, chan, dt, 345 &dbg_fops_chan_state); 346 if (chan_state) 347 chan_descr = debugfs_create_file("descriptors", 0400, chan, dt, 348 &dbg_fops_descriptors); 349 if (chan_descr) 350 chan_reqs = debugfs_create_file("requesters", 0400, chan, dt, 351 &dbg_fops_requester_chan); 352 if (!chan_reqs) 353 goto err_state; 354 355 return chan; 356 357 err_state: 358 debugfs_remove_recursive(chan); 359 return NULL; 360 } 361 362 static void pxad_init_debugfs(struct pxad_device *pdev) 363 { 364 int i; 365 struct dentry *chandir; 366 367 pdev->dbgfs_root = debugfs_create_dir(dev_name(pdev->slave.dev), NULL); 368 if (IS_ERR(pdev->dbgfs_root) || !pdev->dbgfs_root) 369 goto err_root; 370 371 pdev->dbgfs_state = debugfs_create_file("state", 0400, pdev->dbgfs_root, 372 pdev, &dbg_fops_state); 373 if (!pdev->dbgfs_state) 374 goto err_state; 375 376 pdev->dbgfs_chan = 377 kmalloc_array(pdev->nr_chans, sizeof(*pdev->dbgfs_state), 378 GFP_KERNEL); 379 if (!pdev->dbgfs_chan) 380 goto err_alloc; 381 382 chandir = debugfs_create_dir("channels", pdev->dbgfs_root); 383 if (!chandir) 384 goto err_chandir; 385 386 for (i = 0; i < pdev->nr_chans; i++) { 387 pdev->dbgfs_chan[i] = pxad_dbg_alloc_chan(pdev, i, chandir); 388 if (!pdev->dbgfs_chan[i]) 389 goto err_chans; 390 } 391 392 return; 393 err_chans: 394 err_chandir: 395 kfree(pdev->dbgfs_chan); 396 err_alloc: 397 err_state: 398 debugfs_remove_recursive(pdev->dbgfs_root); 399 err_root: 400 pr_err("pxad: debugfs is not available\n"); 401 } 402 403 static void pxad_cleanup_debugfs(struct pxad_device *pdev) 404 { 405 debugfs_remove_recursive(pdev->dbgfs_root); 406 } 407 #else 408 static inline void pxad_init_debugfs(struct pxad_device *pdev) {} 409 static inline void pxad_cleanup_debugfs(struct pxad_device *pdev) {} 410 #endif 411 412 /* 413 * In the transition phase where legacy pxa handling is done at the same time as 414 * mmp_dma, the DMA physical channel split between the 2 DMA providers is done 415 * through legacy_reserved. Legacy code reserves DMA channels by settings 416 * corresponding bits in legacy_reserved. 417 */ 418 static u32 legacy_reserved; 419 static u32 legacy_unavailable; 420 421 static struct pxad_phy *lookup_phy(struct pxad_chan *pchan) 422 { 423 int prio, i; 424 struct pxad_device *pdev = to_pxad_dev(pchan->vc.chan.device); 425 struct pxad_phy *phy, *found = NULL; 426 unsigned long flags; 427 428 /* 429 * dma channel priorities 430 * ch 0 - 3, 16 - 19 <--> (0) 431 * ch 4 - 7, 20 - 23 <--> (1) 432 * ch 8 - 11, 24 - 27 <--> (2) 433 * ch 12 - 15, 28 - 31 <--> (3) 434 */ 435 436 spin_lock_irqsave(&pdev->phy_lock, flags); 437 for (prio = pchan->prio; prio >= PXAD_PRIO_HIGHEST; prio--) { 438 for (i = 0; i < pdev->nr_chans; i++) { 439 if (prio != (i & 0xf) >> 2) 440 continue; 441 if ((i < 32) && (legacy_reserved & BIT(i))) 442 continue; 443 phy = &pdev->phys[i]; 444 if (!phy->vchan) { 445 phy->vchan = pchan; 446 found = phy; 447 if (i < 32) 448 legacy_unavailable |= BIT(i); 449 goto out_unlock; 450 } 451 } 452 } 453 454 out_unlock: 455 spin_unlock_irqrestore(&pdev->phy_lock, flags); 456 dev_dbg(&pchan->vc.chan.dev->device, 457 "%s(): phy=%p(%d)\n", __func__, found, 458 found ? found->idx : -1); 459 460 return found; 461 } 462 463 static void pxad_free_phy(struct pxad_chan *chan) 464 { 465 struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device); 466 unsigned long flags; 467 u32 reg; 468 int i; 469 470 dev_dbg(&chan->vc.chan.dev->device, 471 "%s(): freeing\n", __func__); 472 if (!chan->phy) 473 return; 474 475 /* clear the channel mapping in DRCMR */ 476 reg = pxad_drcmr(chan->drcmr); 477 writel_relaxed(0, chan->phy->base + reg); 478 479 spin_lock_irqsave(&pdev->phy_lock, flags); 480 for (i = 0; i < 32; i++) 481 if (chan->phy == &pdev->phys[i]) 482 legacy_unavailable &= ~BIT(i); 483 chan->phy->vchan = NULL; 484 chan->phy = NULL; 485 spin_unlock_irqrestore(&pdev->phy_lock, flags); 486 } 487 488 static bool is_chan_running(struct pxad_chan *chan) 489 { 490 u32 dcsr; 491 struct pxad_phy *phy = chan->phy; 492 493 if (!phy) 494 return false; 495 dcsr = phy_readl_relaxed(phy, DCSR); 496 return dcsr & PXA_DCSR_RUN; 497 } 498 499 static bool is_running_chan_misaligned(struct pxad_chan *chan) 500 { 501 u32 dalgn; 502 503 BUG_ON(!chan->phy); 504 dalgn = phy_readl_relaxed(chan->phy, DALGN); 505 return dalgn & (BIT(chan->phy->idx)); 506 } 507 508 static void phy_enable(struct pxad_phy *phy, bool misaligned) 509 { 510 u32 reg, dalgn; 511 512 if (!phy->vchan) 513 return; 514 515 dev_dbg(&phy->vchan->vc.chan.dev->device, 516 "%s(); phy=%p(%d) misaligned=%d\n", __func__, 517 phy, phy->idx, misaligned); 518 519 reg = pxad_drcmr(phy->vchan->drcmr); 520 writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg); 521 522 dalgn = phy_readl_relaxed(phy, DALGN); 523 if (misaligned) 524 dalgn |= BIT(phy->idx); 525 else 526 dalgn &= ~BIT(phy->idx); 527 phy_writel_relaxed(phy, dalgn, DALGN); 528 529 phy_writel(phy, PXA_DCSR_STOPIRQEN | PXA_DCSR_ENDINTR | 530 PXA_DCSR_BUSERR | PXA_DCSR_RUN, DCSR); 531 } 532 533 static void phy_disable(struct pxad_phy *phy) 534 { 535 u32 dcsr; 536 537 if (!phy) 538 return; 539 540 dcsr = phy_readl_relaxed(phy, DCSR); 541 dev_dbg(&phy->vchan->vc.chan.dev->device, 542 "%s(): phy=%p(%d)\n", __func__, phy, phy->idx); 543 phy_writel(phy, dcsr & ~PXA_DCSR_RUN & ~PXA_DCSR_STOPIRQEN, DCSR); 544 } 545 546 static void pxad_launch_chan(struct pxad_chan *chan, 547 struct pxad_desc_sw *desc) 548 { 549 dev_dbg(&chan->vc.chan.dev->device, 550 "%s(): desc=%p\n", __func__, desc); 551 if (!chan->phy) { 552 chan->phy = lookup_phy(chan); 553 if (!chan->phy) { 554 dev_dbg(&chan->vc.chan.dev->device, 555 "%s(): no free dma channel\n", __func__); 556 return; 557 } 558 } 559 560 /* 561 * Program the descriptor's address into the DMA controller, 562 * then start the DMA transaction 563 */ 564 phy_writel(chan->phy, desc->first, DDADR); 565 phy_enable(chan->phy, chan->misaligned); 566 } 567 568 static void set_updater_desc(struct pxad_desc_sw *sw_desc, 569 unsigned long flags) 570 { 571 struct pxad_desc_hw *updater = 572 sw_desc->hw_desc[sw_desc->nb_desc - 1]; 573 dma_addr_t dma = sw_desc->hw_desc[sw_desc->nb_desc - 2]->ddadr; 574 575 updater->ddadr = DDADR_STOP; 576 updater->dsadr = dma; 577 updater->dtadr = dma + 8; 578 updater->dcmd = PXA_DCMD_WIDTH4 | PXA_DCMD_BURST32 | 579 (PXA_DCMD_LENGTH & sizeof(u32)); 580 if (flags & DMA_PREP_INTERRUPT) 581 updater->dcmd |= PXA_DCMD_ENDIRQEN; 582 } 583 584 static bool is_desc_completed(struct virt_dma_desc *vd) 585 { 586 struct pxad_desc_sw *sw_desc = to_pxad_sw_desc(vd); 587 struct pxad_desc_hw *updater = 588 sw_desc->hw_desc[sw_desc->nb_desc - 1]; 589 590 return updater->dtadr != (updater->dsadr + 8); 591 } 592 593 static void pxad_desc_chain(struct virt_dma_desc *vd1, 594 struct virt_dma_desc *vd2) 595 { 596 struct pxad_desc_sw *desc1 = to_pxad_sw_desc(vd1); 597 struct pxad_desc_sw *desc2 = to_pxad_sw_desc(vd2); 598 dma_addr_t dma_to_chain; 599 600 dma_to_chain = desc2->first; 601 desc1->hw_desc[desc1->nb_desc - 1]->ddadr = dma_to_chain; 602 } 603 604 static bool pxad_try_hotchain(struct virt_dma_chan *vc, 605 struct virt_dma_desc *vd) 606 { 607 struct virt_dma_desc *vd_last_issued = NULL; 608 struct pxad_chan *chan = to_pxad_chan(&vc->chan); 609 610 /* 611 * Attempt to hot chain the tx if the phy is still running. This is 612 * considered successful only if either the channel is still running 613 * after the chaining, or if the chained transfer is completed after 614 * having been hot chained. 615 * A change of alignment is not allowed, and forbids hotchaining. 616 */ 617 if (is_chan_running(chan)) { 618 BUG_ON(list_empty(&vc->desc_issued)); 619 620 if (!is_running_chan_misaligned(chan) && 621 to_pxad_sw_desc(vd)->misaligned) 622 return false; 623 624 vd_last_issued = list_entry(vc->desc_issued.prev, 625 struct virt_dma_desc, node); 626 pxad_desc_chain(vd_last_issued, vd); 627 if (is_chan_running(chan) || is_desc_completed(vd_last_issued)) 628 return true; 629 } 630 631 return false; 632 } 633 634 static unsigned int clear_chan_irq(struct pxad_phy *phy) 635 { 636 u32 dcsr; 637 u32 dint = readl(phy->base + DINT); 638 639 if (!(dint & BIT(phy->idx))) 640 return PXA_DCSR_RUN; 641 642 /* clear irq */ 643 dcsr = phy_readl_relaxed(phy, DCSR); 644 phy_writel(phy, dcsr, DCSR); 645 if ((dcsr & PXA_DCSR_BUSERR) && (phy->vchan)) 646 dev_warn(&phy->vchan->vc.chan.dev->device, 647 "%s(chan=%p): PXA_DCSR_BUSERR\n", 648 __func__, &phy->vchan); 649 650 return dcsr & ~PXA_DCSR_RUN; 651 } 652 653 static irqreturn_t pxad_chan_handler(int irq, void *dev_id) 654 { 655 struct pxad_phy *phy = dev_id; 656 struct pxad_chan *chan = phy->vchan; 657 struct virt_dma_desc *vd, *tmp; 658 unsigned int dcsr; 659 unsigned long flags; 660 661 BUG_ON(!chan); 662 663 dcsr = clear_chan_irq(phy); 664 if (dcsr & PXA_DCSR_RUN) 665 return IRQ_NONE; 666 667 spin_lock_irqsave(&chan->vc.lock, flags); 668 list_for_each_entry_safe(vd, tmp, &chan->vc.desc_issued, node) { 669 dev_dbg(&chan->vc.chan.dev->device, 670 "%s(): checking txd %p[%x]: completed=%d\n", 671 __func__, vd, vd->tx.cookie, is_desc_completed(vd)); 672 if (is_desc_completed(vd)) { 673 list_del(&vd->node); 674 vchan_cookie_complete(vd); 675 } else { 676 break; 677 } 678 } 679 680 if (dcsr & PXA_DCSR_STOPSTATE) { 681 dev_dbg(&chan->vc.chan.dev->device, 682 "%s(): channel stopped, submitted_empty=%d issued_empty=%d", 683 __func__, 684 list_empty(&chan->vc.desc_submitted), 685 list_empty(&chan->vc.desc_issued)); 686 phy_writel_relaxed(phy, dcsr & ~PXA_DCSR_STOPIRQEN, DCSR); 687 688 if (list_empty(&chan->vc.desc_issued)) { 689 chan->misaligned = 690 !list_empty(&chan->vc.desc_submitted); 691 } else { 692 vd = list_first_entry(&chan->vc.desc_issued, 693 struct virt_dma_desc, node); 694 pxad_launch_chan(chan, to_pxad_sw_desc(vd)); 695 } 696 } 697 spin_unlock_irqrestore(&chan->vc.lock, flags); 698 699 return IRQ_HANDLED; 700 } 701 702 static irqreturn_t pxad_int_handler(int irq, void *dev_id) 703 { 704 struct pxad_device *pdev = dev_id; 705 struct pxad_phy *phy; 706 u32 dint = readl(pdev->base + DINT); 707 int i, ret = IRQ_NONE; 708 709 while (dint) { 710 i = __ffs(dint); 711 dint &= (dint - 1); 712 phy = &pdev->phys[i]; 713 if ((i < 32) && (legacy_reserved & BIT(i))) 714 continue; 715 if (pxad_chan_handler(irq, phy) == IRQ_HANDLED) 716 ret = IRQ_HANDLED; 717 } 718 719 return ret; 720 } 721 722 static int pxad_alloc_chan_resources(struct dma_chan *dchan) 723 { 724 struct pxad_chan *chan = to_pxad_chan(dchan); 725 struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device); 726 727 if (chan->desc_pool) 728 return 1; 729 730 chan->desc_pool = dma_pool_create(dma_chan_name(dchan), 731 pdev->slave.dev, 732 sizeof(struct pxad_desc_hw), 733 __alignof__(struct pxad_desc_hw), 734 0); 735 if (!chan->desc_pool) { 736 dev_err(&chan->vc.chan.dev->device, 737 "%s(): unable to allocate descriptor pool\n", 738 __func__); 739 return -ENOMEM; 740 } 741 742 return 1; 743 } 744 745 static void pxad_free_chan_resources(struct dma_chan *dchan) 746 { 747 struct pxad_chan *chan = to_pxad_chan(dchan); 748 749 vchan_free_chan_resources(&chan->vc); 750 dma_pool_destroy(chan->desc_pool); 751 chan->desc_pool = NULL; 752 753 } 754 755 static void pxad_free_desc(struct virt_dma_desc *vd) 756 { 757 int i; 758 dma_addr_t dma; 759 struct pxad_desc_sw *sw_desc = to_pxad_sw_desc(vd); 760 761 BUG_ON(sw_desc->nb_desc == 0); 762 for (i = sw_desc->nb_desc - 1; i >= 0; i--) { 763 if (i > 0) 764 dma = sw_desc->hw_desc[i - 1]->ddadr; 765 else 766 dma = sw_desc->first; 767 dma_pool_free(sw_desc->desc_pool, 768 sw_desc->hw_desc[i], dma); 769 } 770 sw_desc->nb_desc = 0; 771 kfree(sw_desc); 772 } 773 774 static struct pxad_desc_sw * 775 pxad_alloc_desc(struct pxad_chan *chan, unsigned int nb_hw_desc) 776 { 777 struct pxad_desc_sw *sw_desc; 778 dma_addr_t dma; 779 int i; 780 781 sw_desc = kzalloc(sizeof(*sw_desc) + 782 nb_hw_desc * sizeof(struct pxad_desc_hw *), 783 GFP_NOWAIT); 784 if (!sw_desc) 785 return NULL; 786 sw_desc->desc_pool = chan->desc_pool; 787 788 for (i = 0; i < nb_hw_desc; i++) { 789 sw_desc->hw_desc[i] = dma_pool_alloc(sw_desc->desc_pool, 790 GFP_NOWAIT, &dma); 791 if (!sw_desc->hw_desc[i]) { 792 dev_err(&chan->vc.chan.dev->device, 793 "%s(): Couldn't allocate the %dth hw_desc from dma_pool %p\n", 794 __func__, i, sw_desc->desc_pool); 795 goto err; 796 } 797 798 if (i == 0) 799 sw_desc->first = dma; 800 else 801 sw_desc->hw_desc[i - 1]->ddadr = dma; 802 sw_desc->nb_desc++; 803 } 804 805 return sw_desc; 806 err: 807 pxad_free_desc(&sw_desc->vd); 808 return NULL; 809 } 810 811 static dma_cookie_t pxad_tx_submit(struct dma_async_tx_descriptor *tx) 812 { 813 struct virt_dma_chan *vc = to_virt_chan(tx->chan); 814 struct pxad_chan *chan = to_pxad_chan(&vc->chan); 815 struct virt_dma_desc *vd_chained = NULL, 816 *vd = container_of(tx, struct virt_dma_desc, tx); 817 dma_cookie_t cookie; 818 unsigned long flags; 819 820 set_updater_desc(to_pxad_sw_desc(vd), tx->flags); 821 822 spin_lock_irqsave(&vc->lock, flags); 823 cookie = dma_cookie_assign(tx); 824 825 if (list_empty(&vc->desc_submitted) && pxad_try_hotchain(vc, vd)) { 826 list_move_tail(&vd->node, &vc->desc_issued); 827 dev_dbg(&chan->vc.chan.dev->device, 828 "%s(): txd %p[%x]: submitted (hot linked)\n", 829 __func__, vd, cookie); 830 goto out; 831 } 832 833 /* 834 * Fallback to placing the tx in the submitted queue 835 */ 836 if (!list_empty(&vc->desc_submitted)) { 837 vd_chained = list_entry(vc->desc_submitted.prev, 838 struct virt_dma_desc, node); 839 /* 840 * Only chain the descriptors if no new misalignment is 841 * introduced. If a new misalignment is chained, let the channel 842 * stop, and be relaunched in misalign mode from the irq 843 * handler. 844 */ 845 if (chan->misaligned || !to_pxad_sw_desc(vd)->misaligned) 846 pxad_desc_chain(vd_chained, vd); 847 else 848 vd_chained = NULL; 849 } 850 dev_dbg(&chan->vc.chan.dev->device, 851 "%s(): txd %p[%x]: submitted (%s linked)\n", 852 __func__, vd, cookie, vd_chained ? "cold" : "not"); 853 list_move_tail(&vd->node, &vc->desc_submitted); 854 chan->misaligned |= to_pxad_sw_desc(vd)->misaligned; 855 856 out: 857 spin_unlock_irqrestore(&vc->lock, flags); 858 return cookie; 859 } 860 861 static void pxad_issue_pending(struct dma_chan *dchan) 862 { 863 struct pxad_chan *chan = to_pxad_chan(dchan); 864 struct virt_dma_desc *vd_first; 865 unsigned long flags; 866 867 spin_lock_irqsave(&chan->vc.lock, flags); 868 if (list_empty(&chan->vc.desc_submitted)) 869 goto out; 870 871 vd_first = list_first_entry(&chan->vc.desc_submitted, 872 struct virt_dma_desc, node); 873 dev_dbg(&chan->vc.chan.dev->device, 874 "%s(): txd %p[%x]", __func__, vd_first, vd_first->tx.cookie); 875 876 vchan_issue_pending(&chan->vc); 877 if (!pxad_try_hotchain(&chan->vc, vd_first)) 878 pxad_launch_chan(chan, to_pxad_sw_desc(vd_first)); 879 out: 880 spin_unlock_irqrestore(&chan->vc.lock, flags); 881 } 882 883 static inline struct dma_async_tx_descriptor * 884 pxad_tx_prep(struct virt_dma_chan *vc, struct virt_dma_desc *vd, 885 unsigned long tx_flags) 886 { 887 struct dma_async_tx_descriptor *tx; 888 struct pxad_chan *chan = container_of(vc, struct pxad_chan, vc); 889 890 tx = vchan_tx_prep(vc, vd, tx_flags); 891 tx->tx_submit = pxad_tx_submit; 892 dev_dbg(&chan->vc.chan.dev->device, 893 "%s(): vc=%p txd=%p[%x] flags=0x%lx\n", __func__, 894 vc, vd, vd->tx.cookie, 895 tx_flags); 896 897 return tx; 898 } 899 900 static void pxad_get_config(struct pxad_chan *chan, 901 enum dma_transfer_direction dir, 902 u32 *dcmd, u32 *dev_src, u32 *dev_dst) 903 { 904 u32 maxburst = 0, dev_addr = 0; 905 enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED; 906 907 *dcmd = 0; 908 if (dir == DMA_DEV_TO_MEM) { 909 maxburst = chan->cfg.src_maxburst; 910 width = chan->cfg.src_addr_width; 911 dev_addr = chan->cfg.src_addr; 912 *dev_src = dev_addr; 913 *dcmd |= PXA_DCMD_INCTRGADDR | PXA_DCMD_FLOWSRC; 914 } 915 if (dir == DMA_MEM_TO_DEV) { 916 maxburst = chan->cfg.dst_maxburst; 917 width = chan->cfg.dst_addr_width; 918 dev_addr = chan->cfg.dst_addr; 919 *dev_dst = dev_addr; 920 *dcmd |= PXA_DCMD_INCSRCADDR | PXA_DCMD_FLOWTRG; 921 } 922 if (dir == DMA_MEM_TO_MEM) 923 *dcmd |= PXA_DCMD_BURST32 | PXA_DCMD_INCTRGADDR | 924 PXA_DCMD_INCSRCADDR; 925 926 dev_dbg(&chan->vc.chan.dev->device, 927 "%s(): dev_addr=0x%x maxburst=%d width=%d dir=%d\n", 928 __func__, dev_addr, maxburst, width, dir); 929 930 if (width == DMA_SLAVE_BUSWIDTH_1_BYTE) 931 *dcmd |= PXA_DCMD_WIDTH1; 932 else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES) 933 *dcmd |= PXA_DCMD_WIDTH2; 934 else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES) 935 *dcmd |= PXA_DCMD_WIDTH4; 936 937 if (maxburst == 8) 938 *dcmd |= PXA_DCMD_BURST8; 939 else if (maxburst == 16) 940 *dcmd |= PXA_DCMD_BURST16; 941 else if (maxburst == 32) 942 *dcmd |= PXA_DCMD_BURST32; 943 944 /* FIXME: drivers should be ported over to use the filter 945 * function. Once that's done, the following two lines can 946 * be removed. 947 */ 948 if (chan->cfg.slave_id) 949 chan->drcmr = chan->cfg.slave_id; 950 } 951 952 static struct dma_async_tx_descriptor * 953 pxad_prep_memcpy(struct dma_chan *dchan, 954 dma_addr_t dma_dst, dma_addr_t dma_src, 955 size_t len, unsigned long flags) 956 { 957 struct pxad_chan *chan = to_pxad_chan(dchan); 958 struct pxad_desc_sw *sw_desc; 959 struct pxad_desc_hw *hw_desc; 960 u32 dcmd; 961 unsigned int i, nb_desc = 0; 962 size_t copy; 963 964 if (!dchan || !len) 965 return NULL; 966 967 dev_dbg(&chan->vc.chan.dev->device, 968 "%s(): dma_dst=0x%lx dma_src=0x%lx len=%zu flags=%lx\n", 969 __func__, (unsigned long)dma_dst, (unsigned long)dma_src, 970 len, flags); 971 pxad_get_config(chan, DMA_MEM_TO_MEM, &dcmd, NULL, NULL); 972 973 nb_desc = DIV_ROUND_UP(len, PDMA_MAX_DESC_BYTES); 974 sw_desc = pxad_alloc_desc(chan, nb_desc + 1); 975 if (!sw_desc) 976 return NULL; 977 sw_desc->len = len; 978 979 if (!IS_ALIGNED(dma_src, 1 << PDMA_ALIGNMENT) || 980 !IS_ALIGNED(dma_dst, 1 << PDMA_ALIGNMENT)) 981 sw_desc->misaligned = true; 982 983 i = 0; 984 do { 985 hw_desc = sw_desc->hw_desc[i++]; 986 copy = min_t(size_t, len, PDMA_MAX_DESC_BYTES); 987 hw_desc->dcmd = dcmd | (PXA_DCMD_LENGTH & copy); 988 hw_desc->dsadr = dma_src; 989 hw_desc->dtadr = dma_dst; 990 len -= copy; 991 dma_src += copy; 992 dma_dst += copy; 993 } while (len); 994 set_updater_desc(sw_desc, flags); 995 996 return pxad_tx_prep(&chan->vc, &sw_desc->vd, flags); 997 } 998 999 static struct dma_async_tx_descriptor * 1000 pxad_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, 1001 unsigned int sg_len, enum dma_transfer_direction dir, 1002 unsigned long flags, void *context) 1003 { 1004 struct pxad_chan *chan = to_pxad_chan(dchan); 1005 struct pxad_desc_sw *sw_desc; 1006 size_t len, avail; 1007 struct scatterlist *sg; 1008 dma_addr_t dma; 1009 u32 dcmd, dsadr = 0, dtadr = 0; 1010 unsigned int nb_desc = 0, i, j = 0; 1011 1012 if ((sgl == NULL) || (sg_len == 0)) 1013 return NULL; 1014 1015 pxad_get_config(chan, dir, &dcmd, &dsadr, &dtadr); 1016 dev_dbg(&chan->vc.chan.dev->device, 1017 "%s(): dir=%d flags=%lx\n", __func__, dir, flags); 1018 1019 for_each_sg(sgl, sg, sg_len, i) 1020 nb_desc += DIV_ROUND_UP(sg_dma_len(sg), PDMA_MAX_DESC_BYTES); 1021 sw_desc = pxad_alloc_desc(chan, nb_desc + 1); 1022 if (!sw_desc) 1023 return NULL; 1024 1025 for_each_sg(sgl, sg, sg_len, i) { 1026 dma = sg_dma_address(sg); 1027 avail = sg_dma_len(sg); 1028 sw_desc->len += avail; 1029 1030 do { 1031 len = min_t(size_t, avail, PDMA_MAX_DESC_BYTES); 1032 if (dma & 0x7) 1033 sw_desc->misaligned = true; 1034 1035 sw_desc->hw_desc[j]->dcmd = 1036 dcmd | (PXA_DCMD_LENGTH & len); 1037 sw_desc->hw_desc[j]->dsadr = dsadr ? dsadr : dma; 1038 sw_desc->hw_desc[j++]->dtadr = dtadr ? dtadr : dma; 1039 1040 dma += len; 1041 avail -= len; 1042 } while (avail); 1043 } 1044 set_updater_desc(sw_desc, flags); 1045 1046 return pxad_tx_prep(&chan->vc, &sw_desc->vd, flags); 1047 } 1048 1049 static struct dma_async_tx_descriptor * 1050 pxad_prep_dma_cyclic(struct dma_chan *dchan, 1051 dma_addr_t buf_addr, size_t len, size_t period_len, 1052 enum dma_transfer_direction dir, unsigned long flags) 1053 { 1054 struct pxad_chan *chan = to_pxad_chan(dchan); 1055 struct pxad_desc_sw *sw_desc; 1056 struct pxad_desc_hw **phw_desc; 1057 dma_addr_t dma; 1058 u32 dcmd, dsadr = 0, dtadr = 0; 1059 unsigned int nb_desc = 0; 1060 1061 if (!dchan || !len || !period_len) 1062 return NULL; 1063 if ((dir != DMA_DEV_TO_MEM) && (dir != DMA_MEM_TO_DEV)) { 1064 dev_err(&chan->vc.chan.dev->device, 1065 "Unsupported direction for cyclic DMA\n"); 1066 return NULL; 1067 } 1068 /* the buffer length must be a multiple of period_len */ 1069 if (len % period_len != 0 || period_len > PDMA_MAX_DESC_BYTES || 1070 !IS_ALIGNED(period_len, 1 << PDMA_ALIGNMENT)) 1071 return NULL; 1072 1073 pxad_get_config(chan, dir, &dcmd, &dsadr, &dtadr); 1074 dcmd |= PXA_DCMD_ENDIRQEN | (PXA_DCMD_LENGTH | period_len); 1075 dev_dbg(&chan->vc.chan.dev->device, 1076 "%s(): buf_addr=0x%lx len=%zu period=%zu dir=%d flags=%lx\n", 1077 __func__, (unsigned long)buf_addr, len, period_len, dir, flags); 1078 1079 nb_desc = DIV_ROUND_UP(period_len, PDMA_MAX_DESC_BYTES); 1080 nb_desc *= DIV_ROUND_UP(len, period_len); 1081 sw_desc = pxad_alloc_desc(chan, nb_desc + 1); 1082 if (!sw_desc) 1083 return NULL; 1084 sw_desc->cyclic = true; 1085 sw_desc->len = len; 1086 1087 phw_desc = sw_desc->hw_desc; 1088 dma = buf_addr; 1089 do { 1090 phw_desc[0]->dsadr = dsadr ? dsadr : dma; 1091 phw_desc[0]->dtadr = dtadr ? dtadr : dma; 1092 phw_desc[0]->dcmd = dcmd; 1093 phw_desc++; 1094 dma += period_len; 1095 len -= period_len; 1096 } while (len); 1097 set_updater_desc(sw_desc, flags); 1098 1099 return pxad_tx_prep(&chan->vc, &sw_desc->vd, flags); 1100 } 1101 1102 static int pxad_config(struct dma_chan *dchan, 1103 struct dma_slave_config *cfg) 1104 { 1105 struct pxad_chan *chan = to_pxad_chan(dchan); 1106 1107 if (!dchan) 1108 return -EINVAL; 1109 1110 chan->cfg = *cfg; 1111 return 0; 1112 } 1113 1114 static int pxad_terminate_all(struct dma_chan *dchan) 1115 { 1116 struct pxad_chan *chan = to_pxad_chan(dchan); 1117 struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device); 1118 struct virt_dma_desc *vd = NULL; 1119 unsigned long flags; 1120 struct pxad_phy *phy; 1121 LIST_HEAD(head); 1122 1123 dev_dbg(&chan->vc.chan.dev->device, 1124 "%s(): vchan %p: terminate all\n", __func__, &chan->vc); 1125 1126 spin_lock_irqsave(&chan->vc.lock, flags); 1127 vchan_get_all_descriptors(&chan->vc, &head); 1128 1129 list_for_each_entry(vd, &head, node) { 1130 dev_dbg(&chan->vc.chan.dev->device, 1131 "%s(): cancelling txd %p[%x] (completed=%d)", __func__, 1132 vd, vd->tx.cookie, is_desc_completed(vd)); 1133 } 1134 1135 phy = chan->phy; 1136 if (phy) { 1137 phy_disable(chan->phy); 1138 pxad_free_phy(chan); 1139 chan->phy = NULL; 1140 spin_lock(&pdev->phy_lock); 1141 phy->vchan = NULL; 1142 spin_unlock(&pdev->phy_lock); 1143 } 1144 spin_unlock_irqrestore(&chan->vc.lock, flags); 1145 vchan_dma_desc_free_list(&chan->vc, &head); 1146 1147 return 0; 1148 } 1149 1150 static unsigned int pxad_residue(struct pxad_chan *chan, 1151 dma_cookie_t cookie) 1152 { 1153 struct virt_dma_desc *vd = NULL; 1154 struct pxad_desc_sw *sw_desc = NULL; 1155 struct pxad_desc_hw *hw_desc = NULL; 1156 u32 curr, start, len, end, residue = 0; 1157 unsigned long flags; 1158 bool passed = false; 1159 int i; 1160 1161 /* 1162 * If the channel does not have a phy pointer anymore, it has already 1163 * been completed. Therefore, its residue is 0. 1164 */ 1165 if (!chan->phy) 1166 return 0; 1167 1168 spin_lock_irqsave(&chan->vc.lock, flags); 1169 1170 vd = vchan_find_desc(&chan->vc, cookie); 1171 if (!vd) 1172 goto out; 1173 1174 sw_desc = to_pxad_sw_desc(vd); 1175 if (sw_desc->hw_desc[0]->dcmd & PXA_DCMD_INCSRCADDR) 1176 curr = phy_readl_relaxed(chan->phy, DSADR); 1177 else 1178 curr = phy_readl_relaxed(chan->phy, DTADR); 1179 1180 for (i = 0; i < sw_desc->nb_desc - 1; i++) { 1181 hw_desc = sw_desc->hw_desc[i]; 1182 if (sw_desc->hw_desc[0]->dcmd & PXA_DCMD_INCSRCADDR) 1183 start = hw_desc->dsadr; 1184 else 1185 start = hw_desc->dtadr; 1186 len = hw_desc->dcmd & PXA_DCMD_LENGTH; 1187 end = start + len; 1188 1189 /* 1190 * 'passed' will be latched once we found the descriptor 1191 * which lies inside the boundaries of the curr 1192 * pointer. All descriptors that occur in the list 1193 * _after_ we found that partially handled descriptor 1194 * are still to be processed and are hence added to the 1195 * residual bytes counter. 1196 */ 1197 1198 if (passed) { 1199 residue += len; 1200 } else if (curr >= start && curr <= end) { 1201 residue += end - curr; 1202 passed = true; 1203 } 1204 } 1205 if (!passed) 1206 residue = sw_desc->len; 1207 1208 out: 1209 spin_unlock_irqrestore(&chan->vc.lock, flags); 1210 dev_dbg(&chan->vc.chan.dev->device, 1211 "%s(): txd %p[%x] sw_desc=%p: %d\n", 1212 __func__, vd, cookie, sw_desc, residue); 1213 return residue; 1214 } 1215 1216 static enum dma_status pxad_tx_status(struct dma_chan *dchan, 1217 dma_cookie_t cookie, 1218 struct dma_tx_state *txstate) 1219 { 1220 struct pxad_chan *chan = to_pxad_chan(dchan); 1221 enum dma_status ret; 1222 1223 ret = dma_cookie_status(dchan, cookie, txstate); 1224 if (likely(txstate && (ret != DMA_ERROR))) 1225 dma_set_residue(txstate, pxad_residue(chan, cookie)); 1226 1227 return ret; 1228 } 1229 1230 static void pxad_free_channels(struct dma_device *dmadev) 1231 { 1232 struct pxad_chan *c, *cn; 1233 1234 list_for_each_entry_safe(c, cn, &dmadev->channels, 1235 vc.chan.device_node) { 1236 list_del(&c->vc.chan.device_node); 1237 tasklet_kill(&c->vc.task); 1238 } 1239 } 1240 1241 static int pxad_remove(struct platform_device *op) 1242 { 1243 struct pxad_device *pdev = platform_get_drvdata(op); 1244 1245 pxad_cleanup_debugfs(pdev); 1246 pxad_free_channels(&pdev->slave); 1247 dma_async_device_unregister(&pdev->slave); 1248 return 0; 1249 } 1250 1251 static int pxad_init_phys(struct platform_device *op, 1252 struct pxad_device *pdev, 1253 unsigned int nb_phy_chans) 1254 { 1255 int irq0, irq, nr_irq = 0, i, ret; 1256 struct pxad_phy *phy; 1257 1258 irq0 = platform_get_irq(op, 0); 1259 if (irq0 < 0) 1260 return irq0; 1261 1262 pdev->phys = devm_kcalloc(&op->dev, nb_phy_chans, 1263 sizeof(pdev->phys[0]), GFP_KERNEL); 1264 if (!pdev->phys) 1265 return -ENOMEM; 1266 1267 for (i = 0; i < nb_phy_chans; i++) 1268 if (platform_get_irq(op, i) > 0) 1269 nr_irq++; 1270 1271 for (i = 0; i < nb_phy_chans; i++) { 1272 phy = &pdev->phys[i]; 1273 phy->base = pdev->base; 1274 phy->idx = i; 1275 irq = platform_get_irq(op, i); 1276 if ((nr_irq > 1) && (irq > 0)) 1277 ret = devm_request_irq(&op->dev, irq, 1278 pxad_chan_handler, 1279 IRQF_SHARED, "pxa-dma", phy); 1280 if ((nr_irq == 1) && (i == 0)) 1281 ret = devm_request_irq(&op->dev, irq0, 1282 pxad_int_handler, 1283 IRQF_SHARED, "pxa-dma", pdev); 1284 if (ret) { 1285 dev_err(pdev->slave.dev, 1286 "%s(): can't request irq %d:%d\n", __func__, 1287 irq, ret); 1288 return ret; 1289 } 1290 } 1291 1292 return 0; 1293 } 1294 1295 static const struct of_device_id const pxad_dt_ids[] = { 1296 { .compatible = "marvell,pdma-1.0", }, 1297 {} 1298 }; 1299 MODULE_DEVICE_TABLE(of, pxad_dt_ids); 1300 1301 static struct dma_chan *pxad_dma_xlate(struct of_phandle_args *dma_spec, 1302 struct of_dma *ofdma) 1303 { 1304 struct pxad_device *d = ofdma->of_dma_data; 1305 struct dma_chan *chan; 1306 1307 chan = dma_get_any_slave_channel(&d->slave); 1308 if (!chan) 1309 return NULL; 1310 1311 to_pxad_chan(chan)->drcmr = dma_spec->args[0]; 1312 to_pxad_chan(chan)->prio = dma_spec->args[1]; 1313 1314 return chan; 1315 } 1316 1317 static int pxad_init_dmadev(struct platform_device *op, 1318 struct pxad_device *pdev, 1319 unsigned int nr_phy_chans) 1320 { 1321 int ret; 1322 unsigned int i; 1323 struct pxad_chan *c; 1324 1325 pdev->nr_chans = nr_phy_chans; 1326 INIT_LIST_HEAD(&pdev->slave.channels); 1327 pdev->slave.device_alloc_chan_resources = pxad_alloc_chan_resources; 1328 pdev->slave.device_free_chan_resources = pxad_free_chan_resources; 1329 pdev->slave.device_tx_status = pxad_tx_status; 1330 pdev->slave.device_issue_pending = pxad_issue_pending; 1331 pdev->slave.device_config = pxad_config; 1332 pdev->slave.device_terminate_all = pxad_terminate_all; 1333 1334 if (op->dev.coherent_dma_mask) 1335 dma_set_mask(&op->dev, op->dev.coherent_dma_mask); 1336 else 1337 dma_set_mask(&op->dev, DMA_BIT_MASK(32)); 1338 1339 ret = pxad_init_phys(op, pdev, nr_phy_chans); 1340 if (ret) 1341 return ret; 1342 1343 for (i = 0; i < nr_phy_chans; i++) { 1344 c = devm_kzalloc(&op->dev, sizeof(*c), GFP_KERNEL); 1345 if (!c) 1346 return -ENOMEM; 1347 c->vc.desc_free = pxad_free_desc; 1348 vchan_init(&c->vc, &pdev->slave); 1349 } 1350 1351 return dma_async_device_register(&pdev->slave); 1352 } 1353 1354 static int pxad_probe(struct platform_device *op) 1355 { 1356 struct pxad_device *pdev; 1357 const struct of_device_id *of_id; 1358 struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev); 1359 struct resource *iores; 1360 int ret, dma_channels = 0; 1361 const enum dma_slave_buswidth widths = 1362 DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES | 1363 DMA_SLAVE_BUSWIDTH_4_BYTES; 1364 1365 pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL); 1366 if (!pdev) 1367 return -ENOMEM; 1368 1369 spin_lock_init(&pdev->phy_lock); 1370 1371 iores = platform_get_resource(op, IORESOURCE_MEM, 0); 1372 pdev->base = devm_ioremap_resource(&op->dev, iores); 1373 if (IS_ERR(pdev->base)) 1374 return PTR_ERR(pdev->base); 1375 1376 of_id = of_match_device(pxad_dt_ids, &op->dev); 1377 if (of_id) 1378 of_property_read_u32(op->dev.of_node, "#dma-channels", 1379 &dma_channels); 1380 else if (pdata && pdata->dma_channels) 1381 dma_channels = pdata->dma_channels; 1382 else 1383 dma_channels = 32; /* default 32 channel */ 1384 1385 dma_cap_set(DMA_SLAVE, pdev->slave.cap_mask); 1386 dma_cap_set(DMA_MEMCPY, pdev->slave.cap_mask); 1387 dma_cap_set(DMA_CYCLIC, pdev->slave.cap_mask); 1388 dma_cap_set(DMA_PRIVATE, pdev->slave.cap_mask); 1389 pdev->slave.device_prep_dma_memcpy = pxad_prep_memcpy; 1390 pdev->slave.device_prep_slave_sg = pxad_prep_slave_sg; 1391 pdev->slave.device_prep_dma_cyclic = pxad_prep_dma_cyclic; 1392 1393 pdev->slave.copy_align = PDMA_ALIGNMENT; 1394 pdev->slave.src_addr_widths = widths; 1395 pdev->slave.dst_addr_widths = widths; 1396 pdev->slave.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM); 1397 pdev->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; 1398 1399 pdev->slave.dev = &op->dev; 1400 ret = pxad_init_dmadev(op, pdev, dma_channels); 1401 if (ret) { 1402 dev_err(pdev->slave.dev, "unable to register\n"); 1403 return ret; 1404 } 1405 1406 if (op->dev.of_node) { 1407 /* Device-tree DMA controller registration */ 1408 ret = of_dma_controller_register(op->dev.of_node, 1409 pxad_dma_xlate, pdev); 1410 if (ret < 0) { 1411 dev_err(pdev->slave.dev, 1412 "of_dma_controller_register failed\n"); 1413 return ret; 1414 } 1415 } 1416 1417 platform_set_drvdata(op, pdev); 1418 pxad_init_debugfs(pdev); 1419 dev_info(pdev->slave.dev, "initialized %d channels\n", dma_channels); 1420 return 0; 1421 } 1422 1423 static const struct platform_device_id pxad_id_table[] = { 1424 { "pxa-dma", }, 1425 { }, 1426 }; 1427 1428 static struct platform_driver pxad_driver = { 1429 .driver = { 1430 .name = "pxa-dma", 1431 .of_match_table = pxad_dt_ids, 1432 }, 1433 .id_table = pxad_id_table, 1434 .probe = pxad_probe, 1435 .remove = pxad_remove, 1436 }; 1437 1438 bool pxad_filter_fn(struct dma_chan *chan, void *param) 1439 { 1440 struct pxad_chan *c = to_pxad_chan(chan); 1441 struct pxad_param *p = param; 1442 1443 if (chan->device->dev->driver != &pxad_driver.driver) 1444 return false; 1445 1446 c->drcmr = p->drcmr; 1447 c->prio = p->prio; 1448 1449 return true; 1450 } 1451 EXPORT_SYMBOL_GPL(pxad_filter_fn); 1452 1453 int pxad_toggle_reserved_channel(int legacy_channel) 1454 { 1455 if (legacy_unavailable & (BIT(legacy_channel))) 1456 return -EBUSY; 1457 legacy_reserved ^= BIT(legacy_channel); 1458 return 0; 1459 } 1460 EXPORT_SYMBOL_GPL(pxad_toggle_reserved_channel); 1461 1462 module_platform_driver(pxad_driver); 1463 1464 MODULE_DESCRIPTION("Marvell PXA Peripheral DMA Driver"); 1465 MODULE_AUTHOR("Robert Jarzmik <robert.jarzmik@free.fr>"); 1466 MODULE_LICENSE("GPL v2"); 1467