1*6365beadSRussell King /* 2*6365beadSRussell King * SA11x0 DMAengine support 3*6365beadSRussell King * 4*6365beadSRussell King * Copyright (C) 2012 Russell King 5*6365beadSRussell King * Derived in part from arch/arm/mach-sa1100/dma.c, 6*6365beadSRussell King * Copyright (C) 2000, 2001 by Nicolas Pitre 7*6365beadSRussell King * 8*6365beadSRussell King * This program is free software; you can redistribute it and/or modify 9*6365beadSRussell King * it under the terms of the GNU General Public License version 2 as 10*6365beadSRussell King * published by the Free Software Foundation. 11*6365beadSRussell King */ 12*6365beadSRussell King #include <linux/sched.h> 13*6365beadSRussell King #include <linux/device.h> 14*6365beadSRussell King #include <linux/dmaengine.h> 15*6365beadSRussell King #include <linux/init.h> 16*6365beadSRussell King #include <linux/interrupt.h> 17*6365beadSRussell King #include <linux/kernel.h> 18*6365beadSRussell King #include <linux/module.h> 19*6365beadSRussell King #include <linux/platform_device.h> 20*6365beadSRussell King #include <linux/sa11x0-dma.h> 21*6365beadSRussell King #include <linux/slab.h> 22*6365beadSRussell King #include <linux/spinlock.h> 23*6365beadSRussell King 24*6365beadSRussell King #define NR_PHY_CHAN 6 25*6365beadSRussell King #define DMA_ALIGN 3 26*6365beadSRussell King #define DMA_MAX_SIZE 0x1fff 27*6365beadSRussell King #define DMA_CHUNK_SIZE 0x1000 28*6365beadSRussell King 29*6365beadSRussell King #define DMA_DDAR 0x00 30*6365beadSRussell King #define DMA_DCSR_S 0x04 31*6365beadSRussell King #define DMA_DCSR_C 0x08 32*6365beadSRussell King #define DMA_DCSR_R 0x0c 33*6365beadSRussell King #define DMA_DBSA 0x10 34*6365beadSRussell King #define DMA_DBTA 0x14 35*6365beadSRussell King #define DMA_DBSB 0x18 36*6365beadSRussell King #define DMA_DBTB 0x1c 37*6365beadSRussell King #define DMA_SIZE 0x20 38*6365beadSRussell King 39*6365beadSRussell King #define DCSR_RUN (1 << 0) 40*6365beadSRussell King #define DCSR_IE (1 << 1) 41*6365beadSRussell King #define DCSR_ERROR (1 << 2) 42*6365beadSRussell King #define DCSR_DONEA (1 << 3) 43*6365beadSRussell King #define DCSR_STRTA (1 << 4) 44*6365beadSRussell King #define DCSR_DONEB (1 << 5) 45*6365beadSRussell King #define DCSR_STRTB (1 << 6) 46*6365beadSRussell King #define DCSR_BIU (1 << 7) 47*6365beadSRussell King 48*6365beadSRussell King #define DDAR_RW (1 << 0) /* 0 = W, 1 = R */ 49*6365beadSRussell King #define DDAR_E (1 << 1) /* 0 = LE, 1 = BE */ 50*6365beadSRussell King #define DDAR_BS (1 << 2) /* 0 = BS4, 1 = BS8 */ 51*6365beadSRussell King #define DDAR_DW (1 << 3) /* 0 = 8b, 1 = 16b */ 52*6365beadSRussell King #define DDAR_Ser0UDCTr (0x0 << 4) 53*6365beadSRussell King #define DDAR_Ser0UDCRc (0x1 << 4) 54*6365beadSRussell King #define DDAR_Ser1SDLCTr (0x2 << 4) 55*6365beadSRussell King #define DDAR_Ser1SDLCRc (0x3 << 4) 56*6365beadSRussell King #define DDAR_Ser1UARTTr (0x4 << 4) 57*6365beadSRussell King #define DDAR_Ser1UARTRc (0x5 << 4) 58*6365beadSRussell King #define DDAR_Ser2ICPTr (0x6 << 4) 59*6365beadSRussell King #define DDAR_Ser2ICPRc (0x7 << 4) 60*6365beadSRussell King #define DDAR_Ser3UARTTr (0x8 << 4) 61*6365beadSRussell King #define DDAR_Ser3UARTRc (0x9 << 4) 62*6365beadSRussell King #define DDAR_Ser4MCP0Tr (0xa << 4) 63*6365beadSRussell King #define DDAR_Ser4MCP0Rc (0xb << 4) 64*6365beadSRussell King #define DDAR_Ser4MCP1Tr (0xc << 4) 65*6365beadSRussell King #define DDAR_Ser4MCP1Rc (0xd << 4) 66*6365beadSRussell King #define DDAR_Ser4SSPTr (0xe << 4) 67*6365beadSRussell King #define DDAR_Ser4SSPRc (0xf << 4) 68*6365beadSRussell King 69*6365beadSRussell King struct sa11x0_dma_sg { 70*6365beadSRussell King u32 addr; 71*6365beadSRussell King u32 len; 72*6365beadSRussell King }; 73*6365beadSRussell King 74*6365beadSRussell King struct sa11x0_dma_desc { 75*6365beadSRussell King struct dma_async_tx_descriptor tx; 76*6365beadSRussell King u32 ddar; 77*6365beadSRussell King size_t size; 78*6365beadSRussell King 79*6365beadSRussell King /* maybe protected by c->lock */ 80*6365beadSRussell King struct list_head node; 81*6365beadSRussell King unsigned sglen; 82*6365beadSRussell King struct sa11x0_dma_sg sg[0]; 83*6365beadSRussell King }; 84*6365beadSRussell King 85*6365beadSRussell King struct sa11x0_dma_phy; 86*6365beadSRussell King 87*6365beadSRussell King struct sa11x0_dma_chan { 88*6365beadSRussell King struct dma_chan chan; 89*6365beadSRussell King spinlock_t lock; 90*6365beadSRussell King dma_cookie_t lc; 91*6365beadSRussell King 92*6365beadSRussell King /* protected by c->lock */ 93*6365beadSRussell King struct sa11x0_dma_phy *phy; 94*6365beadSRussell King enum dma_status status; 95*6365beadSRussell King struct list_head desc_submitted; 96*6365beadSRussell King struct list_head desc_issued; 97*6365beadSRussell King 98*6365beadSRussell King /* protected by d->lock */ 99*6365beadSRussell King struct list_head node; 100*6365beadSRussell King 101*6365beadSRussell King u32 ddar; 102*6365beadSRussell King const char *name; 103*6365beadSRussell King }; 104*6365beadSRussell King 105*6365beadSRussell King struct sa11x0_dma_phy { 106*6365beadSRussell King void __iomem *base; 107*6365beadSRussell King struct sa11x0_dma_dev *dev; 108*6365beadSRussell King unsigned num; 109*6365beadSRussell King 110*6365beadSRussell King struct sa11x0_dma_chan *vchan; 111*6365beadSRussell King 112*6365beadSRussell King /* Protected by c->lock */ 113*6365beadSRussell King unsigned sg_load; 114*6365beadSRussell King struct sa11x0_dma_desc *txd_load; 115*6365beadSRussell King unsigned sg_done; 116*6365beadSRussell King struct sa11x0_dma_desc *txd_done; 117*6365beadSRussell King #ifdef CONFIG_PM_SLEEP 118*6365beadSRussell King u32 dbs[2]; 119*6365beadSRussell King u32 dbt[2]; 120*6365beadSRussell King u32 dcsr; 121*6365beadSRussell King #endif 122*6365beadSRussell King }; 123*6365beadSRussell King 124*6365beadSRussell King struct sa11x0_dma_dev { 125*6365beadSRussell King struct dma_device slave; 126*6365beadSRussell King void __iomem *base; 127*6365beadSRussell King spinlock_t lock; 128*6365beadSRussell King struct tasklet_struct task; 129*6365beadSRussell King struct list_head chan_pending; 130*6365beadSRussell King struct list_head desc_complete; 131*6365beadSRussell King struct sa11x0_dma_phy phy[NR_PHY_CHAN]; 132*6365beadSRussell King }; 133*6365beadSRussell King 134*6365beadSRussell King static struct sa11x0_dma_chan *to_sa11x0_dma_chan(struct dma_chan *chan) 135*6365beadSRussell King { 136*6365beadSRussell King return container_of(chan, struct sa11x0_dma_chan, chan); 137*6365beadSRussell King } 138*6365beadSRussell King 139*6365beadSRussell King static struct sa11x0_dma_dev *to_sa11x0_dma(struct dma_device *dmadev) 140*6365beadSRussell King { 141*6365beadSRussell King return container_of(dmadev, struct sa11x0_dma_dev, slave); 142*6365beadSRussell King } 143*6365beadSRussell King 144*6365beadSRussell King static struct sa11x0_dma_desc *to_sa11x0_dma_tx(struct dma_async_tx_descriptor *tx) 145*6365beadSRussell King { 146*6365beadSRussell King return container_of(tx, struct sa11x0_dma_desc, tx); 147*6365beadSRussell King } 148*6365beadSRussell King 149*6365beadSRussell King static struct sa11x0_dma_desc *sa11x0_dma_next_desc(struct sa11x0_dma_chan *c) 150*6365beadSRussell King { 151*6365beadSRussell King if (list_empty(&c->desc_issued)) 152*6365beadSRussell King return NULL; 153*6365beadSRussell King 154*6365beadSRussell King return list_first_entry(&c->desc_issued, struct sa11x0_dma_desc, node); 155*6365beadSRussell King } 156*6365beadSRussell King 157*6365beadSRussell King static void sa11x0_dma_start_desc(struct sa11x0_dma_phy *p, struct sa11x0_dma_desc *txd) 158*6365beadSRussell King { 159*6365beadSRussell King list_del(&txd->node); 160*6365beadSRussell King p->txd_load = txd; 161*6365beadSRussell King p->sg_load = 0; 162*6365beadSRussell King 163*6365beadSRussell King dev_vdbg(p->dev->slave.dev, "pchan %u: txd %p[%x]: starting: DDAR:%x\n", 164*6365beadSRussell King p->num, txd, txd->tx.cookie, txd->ddar); 165*6365beadSRussell King } 166*6365beadSRussell King 167*6365beadSRussell King static void noinline sa11x0_dma_start_sg(struct sa11x0_dma_phy *p, 168*6365beadSRussell King struct sa11x0_dma_chan *c) 169*6365beadSRussell King { 170*6365beadSRussell King struct sa11x0_dma_desc *txd = p->txd_load; 171*6365beadSRussell King struct sa11x0_dma_sg *sg; 172*6365beadSRussell King void __iomem *base = p->base; 173*6365beadSRussell King unsigned dbsx, dbtx; 174*6365beadSRussell King u32 dcsr; 175*6365beadSRussell King 176*6365beadSRussell King if (!txd) 177*6365beadSRussell King return; 178*6365beadSRussell King 179*6365beadSRussell King dcsr = readl_relaxed(base + DMA_DCSR_R); 180*6365beadSRussell King 181*6365beadSRussell King /* Don't try to load the next transfer if both buffers are started */ 182*6365beadSRussell King if ((dcsr & (DCSR_STRTA | DCSR_STRTB)) == (DCSR_STRTA | DCSR_STRTB)) 183*6365beadSRussell King return; 184*6365beadSRussell King 185*6365beadSRussell King if (p->sg_load == txd->sglen) { 186*6365beadSRussell King struct sa11x0_dma_desc *txn = sa11x0_dma_next_desc(c); 187*6365beadSRussell King 188*6365beadSRussell King /* 189*6365beadSRussell King * We have reached the end of the current descriptor. 190*6365beadSRussell King * Peek at the next descriptor, and if compatible with 191*6365beadSRussell King * the current, start processing it. 192*6365beadSRussell King */ 193*6365beadSRussell King if (txn && txn->ddar == txd->ddar) { 194*6365beadSRussell King txd = txn; 195*6365beadSRussell King sa11x0_dma_start_desc(p, txn); 196*6365beadSRussell King } else { 197*6365beadSRussell King p->txd_load = NULL; 198*6365beadSRussell King return; 199*6365beadSRussell King } 200*6365beadSRussell King } 201*6365beadSRussell King 202*6365beadSRussell King sg = &txd->sg[p->sg_load++]; 203*6365beadSRussell King 204*6365beadSRussell King /* Select buffer to load according to channel status */ 205*6365beadSRussell King if (((dcsr & (DCSR_BIU | DCSR_STRTB)) == (DCSR_BIU | DCSR_STRTB)) || 206*6365beadSRussell King ((dcsr & (DCSR_BIU | DCSR_STRTA)) == 0)) { 207*6365beadSRussell King dbsx = DMA_DBSA; 208*6365beadSRussell King dbtx = DMA_DBTA; 209*6365beadSRussell King dcsr = DCSR_STRTA | DCSR_IE | DCSR_RUN; 210*6365beadSRussell King } else { 211*6365beadSRussell King dbsx = DMA_DBSB; 212*6365beadSRussell King dbtx = DMA_DBTB; 213*6365beadSRussell King dcsr = DCSR_STRTB | DCSR_IE | DCSR_RUN; 214*6365beadSRussell King } 215*6365beadSRussell King 216*6365beadSRussell King writel_relaxed(sg->addr, base + dbsx); 217*6365beadSRussell King writel_relaxed(sg->len, base + dbtx); 218*6365beadSRussell King writel(dcsr, base + DMA_DCSR_S); 219*6365beadSRussell King 220*6365beadSRussell King dev_dbg(p->dev->slave.dev, "pchan %u: load: DCSR:%02x DBS%c:%08x DBT%c:%08x\n", 221*6365beadSRussell King p->num, dcsr, 222*6365beadSRussell King 'A' + (dbsx == DMA_DBSB), sg->addr, 223*6365beadSRussell King 'A' + (dbtx == DMA_DBTB), sg->len); 224*6365beadSRussell King } 225*6365beadSRussell King 226*6365beadSRussell King static void noinline sa11x0_dma_complete(struct sa11x0_dma_phy *p, 227*6365beadSRussell King struct sa11x0_dma_chan *c) 228*6365beadSRussell King { 229*6365beadSRussell King struct sa11x0_dma_desc *txd = p->txd_done; 230*6365beadSRussell King 231*6365beadSRussell King if (++p->sg_done == txd->sglen) { 232*6365beadSRussell King struct sa11x0_dma_dev *d = p->dev; 233*6365beadSRussell King 234*6365beadSRussell King dev_vdbg(d->slave.dev, "pchan %u: txd %p[%x]: completed\n", 235*6365beadSRussell King p->num, p->txd_done, p->txd_done->tx.cookie); 236*6365beadSRussell King 237*6365beadSRussell King c->lc = txd->tx.cookie; 238*6365beadSRussell King 239*6365beadSRussell King spin_lock(&d->lock); 240*6365beadSRussell King list_add_tail(&txd->node, &d->desc_complete); 241*6365beadSRussell King spin_unlock(&d->lock); 242*6365beadSRussell King 243*6365beadSRussell King p->sg_done = 0; 244*6365beadSRussell King p->txd_done = p->txd_load; 245*6365beadSRussell King 246*6365beadSRussell King tasklet_schedule(&d->task); 247*6365beadSRussell King } 248*6365beadSRussell King 249*6365beadSRussell King sa11x0_dma_start_sg(p, c); 250*6365beadSRussell King } 251*6365beadSRussell King 252*6365beadSRussell King static irqreturn_t sa11x0_dma_irq(int irq, void *dev_id) 253*6365beadSRussell King { 254*6365beadSRussell King struct sa11x0_dma_phy *p = dev_id; 255*6365beadSRussell King struct sa11x0_dma_dev *d = p->dev; 256*6365beadSRussell King struct sa11x0_dma_chan *c; 257*6365beadSRussell King u32 dcsr; 258*6365beadSRussell King 259*6365beadSRussell King dcsr = readl_relaxed(p->base + DMA_DCSR_R); 260*6365beadSRussell King if (!(dcsr & (DCSR_ERROR | DCSR_DONEA | DCSR_DONEB))) 261*6365beadSRussell King return IRQ_NONE; 262*6365beadSRussell King 263*6365beadSRussell King /* Clear reported status bits */ 264*6365beadSRussell King writel_relaxed(dcsr & (DCSR_ERROR | DCSR_DONEA | DCSR_DONEB), 265*6365beadSRussell King p->base + DMA_DCSR_C); 266*6365beadSRussell King 267*6365beadSRussell King dev_dbg(d->slave.dev, "pchan %u: irq: DCSR:%02x\n", p->num, dcsr); 268*6365beadSRussell King 269*6365beadSRussell King if (dcsr & DCSR_ERROR) { 270*6365beadSRussell King dev_err(d->slave.dev, "pchan %u: error. DCSR:%02x DDAR:%08x DBSA:%08x DBTA:%08x DBSB:%08x DBTB:%08x\n", 271*6365beadSRussell King p->num, dcsr, 272*6365beadSRussell King readl_relaxed(p->base + DMA_DDAR), 273*6365beadSRussell King readl_relaxed(p->base + DMA_DBSA), 274*6365beadSRussell King readl_relaxed(p->base + DMA_DBTA), 275*6365beadSRussell King readl_relaxed(p->base + DMA_DBSB), 276*6365beadSRussell King readl_relaxed(p->base + DMA_DBTB)); 277*6365beadSRussell King } 278*6365beadSRussell King 279*6365beadSRussell King c = p->vchan; 280*6365beadSRussell King if (c) { 281*6365beadSRussell King unsigned long flags; 282*6365beadSRussell King 283*6365beadSRussell King spin_lock_irqsave(&c->lock, flags); 284*6365beadSRussell King /* 285*6365beadSRussell King * Now that we're holding the lock, check that the vchan 286*6365beadSRussell King * really is associated with this pchan before touching the 287*6365beadSRussell King * hardware. This should always succeed, because we won't 288*6365beadSRussell King * change p->vchan or c->phy while the channel is actively 289*6365beadSRussell King * transferring. 290*6365beadSRussell King */ 291*6365beadSRussell King if (c->phy == p) { 292*6365beadSRussell King if (dcsr & DCSR_DONEA) 293*6365beadSRussell King sa11x0_dma_complete(p, c); 294*6365beadSRussell King if (dcsr & DCSR_DONEB) 295*6365beadSRussell King sa11x0_dma_complete(p, c); 296*6365beadSRussell King } 297*6365beadSRussell King spin_unlock_irqrestore(&c->lock, flags); 298*6365beadSRussell King } 299*6365beadSRussell King 300*6365beadSRussell King return IRQ_HANDLED; 301*6365beadSRussell King } 302*6365beadSRussell King 303*6365beadSRussell King static void sa11x0_dma_start_txd(struct sa11x0_dma_chan *c) 304*6365beadSRussell King { 305*6365beadSRussell King struct sa11x0_dma_desc *txd = sa11x0_dma_next_desc(c); 306*6365beadSRussell King 307*6365beadSRussell King /* If the issued list is empty, we have no further txds to process */ 308*6365beadSRussell King if (txd) { 309*6365beadSRussell King struct sa11x0_dma_phy *p = c->phy; 310*6365beadSRussell King 311*6365beadSRussell King sa11x0_dma_start_desc(p, txd); 312*6365beadSRussell King p->txd_done = txd; 313*6365beadSRussell King p->sg_done = 0; 314*6365beadSRussell King 315*6365beadSRussell King /* The channel should not have any transfers started */ 316*6365beadSRussell King WARN_ON(readl_relaxed(p->base + DMA_DCSR_R) & 317*6365beadSRussell King (DCSR_STRTA | DCSR_STRTB)); 318*6365beadSRussell King 319*6365beadSRussell King /* Clear the run and start bits before changing DDAR */ 320*6365beadSRussell King writel_relaxed(DCSR_RUN | DCSR_STRTA | DCSR_STRTB, 321*6365beadSRussell King p->base + DMA_DCSR_C); 322*6365beadSRussell King writel_relaxed(txd->ddar, p->base + DMA_DDAR); 323*6365beadSRussell King 324*6365beadSRussell King /* Try to start both buffers */ 325*6365beadSRussell King sa11x0_dma_start_sg(p, c); 326*6365beadSRussell King sa11x0_dma_start_sg(p, c); 327*6365beadSRussell King } 328*6365beadSRussell King } 329*6365beadSRussell King 330*6365beadSRussell King static void sa11x0_dma_tasklet(unsigned long arg) 331*6365beadSRussell King { 332*6365beadSRussell King struct sa11x0_dma_dev *d = (struct sa11x0_dma_dev *)arg; 333*6365beadSRussell King struct sa11x0_dma_phy *p; 334*6365beadSRussell King struct sa11x0_dma_chan *c; 335*6365beadSRussell King struct sa11x0_dma_desc *txd, *txn; 336*6365beadSRussell King LIST_HEAD(head); 337*6365beadSRussell King unsigned pch, pch_alloc = 0; 338*6365beadSRussell King 339*6365beadSRussell King dev_dbg(d->slave.dev, "tasklet enter\n"); 340*6365beadSRussell King 341*6365beadSRussell King /* Get the completed tx descriptors */ 342*6365beadSRussell King spin_lock_irq(&d->lock); 343*6365beadSRussell King list_splice_init(&d->desc_complete, &head); 344*6365beadSRussell King spin_unlock_irq(&d->lock); 345*6365beadSRussell King 346*6365beadSRussell King list_for_each_entry(txd, &head, node) { 347*6365beadSRussell King c = to_sa11x0_dma_chan(txd->tx.chan); 348*6365beadSRussell King 349*6365beadSRussell King dev_dbg(d->slave.dev, "vchan %p: txd %p[%x] completed\n", 350*6365beadSRussell King c, txd, txd->tx.cookie); 351*6365beadSRussell King 352*6365beadSRussell King spin_lock_irq(&c->lock); 353*6365beadSRussell King p = c->phy; 354*6365beadSRussell King if (p) { 355*6365beadSRussell King if (!p->txd_done) 356*6365beadSRussell King sa11x0_dma_start_txd(c); 357*6365beadSRussell King if (!p->txd_done) { 358*6365beadSRussell King /* No current txd associated with this channel */ 359*6365beadSRussell King dev_dbg(d->slave.dev, "pchan %u: free\n", p->num); 360*6365beadSRussell King 361*6365beadSRussell King /* Mark this channel free */ 362*6365beadSRussell King c->phy = NULL; 363*6365beadSRussell King p->vchan = NULL; 364*6365beadSRussell King } 365*6365beadSRussell King } 366*6365beadSRussell King spin_unlock_irq(&c->lock); 367*6365beadSRussell King } 368*6365beadSRussell King 369*6365beadSRussell King spin_lock_irq(&d->lock); 370*6365beadSRussell King for (pch = 0; pch < NR_PHY_CHAN; pch++) { 371*6365beadSRussell King p = &d->phy[pch]; 372*6365beadSRussell King 373*6365beadSRussell King if (p->vchan == NULL && !list_empty(&d->chan_pending)) { 374*6365beadSRussell King c = list_first_entry(&d->chan_pending, 375*6365beadSRussell King struct sa11x0_dma_chan, node); 376*6365beadSRussell King list_del_init(&c->node); 377*6365beadSRussell King 378*6365beadSRussell King pch_alloc |= 1 << pch; 379*6365beadSRussell King 380*6365beadSRussell King /* Mark this channel allocated */ 381*6365beadSRussell King p->vchan = c; 382*6365beadSRussell King 383*6365beadSRussell King dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, c); 384*6365beadSRussell King } 385*6365beadSRussell King } 386*6365beadSRussell King spin_unlock_irq(&d->lock); 387*6365beadSRussell King 388*6365beadSRussell King for (pch = 0; pch < NR_PHY_CHAN; pch++) { 389*6365beadSRussell King if (pch_alloc & (1 << pch)) { 390*6365beadSRussell King p = &d->phy[pch]; 391*6365beadSRussell King c = p->vchan; 392*6365beadSRussell King 393*6365beadSRussell King spin_lock_irq(&c->lock); 394*6365beadSRussell King c->phy = p; 395*6365beadSRussell King 396*6365beadSRussell King sa11x0_dma_start_txd(c); 397*6365beadSRussell King spin_unlock_irq(&c->lock); 398*6365beadSRussell King } 399*6365beadSRussell King } 400*6365beadSRussell King 401*6365beadSRussell King /* Now free the completed tx descriptor, and call their callbacks */ 402*6365beadSRussell King list_for_each_entry_safe(txd, txn, &head, node) { 403*6365beadSRussell King dma_async_tx_callback callback = txd->tx.callback; 404*6365beadSRussell King void *callback_param = txd->tx.callback_param; 405*6365beadSRussell King 406*6365beadSRussell King dev_dbg(d->slave.dev, "txd %p[%x]: callback and free\n", 407*6365beadSRussell King txd, txd->tx.cookie); 408*6365beadSRussell King 409*6365beadSRussell King kfree(txd); 410*6365beadSRussell King 411*6365beadSRussell King if (callback) 412*6365beadSRussell King callback(callback_param); 413*6365beadSRussell King } 414*6365beadSRussell King 415*6365beadSRussell King dev_dbg(d->slave.dev, "tasklet exit\n"); 416*6365beadSRussell King } 417*6365beadSRussell King 418*6365beadSRussell King 419*6365beadSRussell King static void sa11x0_dma_desc_free(struct sa11x0_dma_dev *d, struct list_head *head) 420*6365beadSRussell King { 421*6365beadSRussell King struct sa11x0_dma_desc *txd, *txn; 422*6365beadSRussell King 423*6365beadSRussell King list_for_each_entry_safe(txd, txn, head, node) { 424*6365beadSRussell King dev_dbg(d->slave.dev, "txd %p: freeing\n", txd); 425*6365beadSRussell King kfree(txd); 426*6365beadSRussell King } 427*6365beadSRussell King } 428*6365beadSRussell King 429*6365beadSRussell King static int sa11x0_dma_alloc_chan_resources(struct dma_chan *chan) 430*6365beadSRussell King { 431*6365beadSRussell King return 0; 432*6365beadSRussell King } 433*6365beadSRussell King 434*6365beadSRussell King static void sa11x0_dma_free_chan_resources(struct dma_chan *chan) 435*6365beadSRussell King { 436*6365beadSRussell King struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); 437*6365beadSRussell King struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); 438*6365beadSRussell King unsigned long flags; 439*6365beadSRussell King LIST_HEAD(head); 440*6365beadSRussell King 441*6365beadSRussell King spin_lock_irqsave(&c->lock, flags); 442*6365beadSRussell King spin_lock(&d->lock); 443*6365beadSRussell King list_del_init(&c->node); 444*6365beadSRussell King spin_unlock(&d->lock); 445*6365beadSRussell King 446*6365beadSRussell King list_splice_tail_init(&c->desc_submitted, &head); 447*6365beadSRussell King list_splice_tail_init(&c->desc_issued, &head); 448*6365beadSRussell King spin_unlock_irqrestore(&c->lock, flags); 449*6365beadSRussell King 450*6365beadSRussell King sa11x0_dma_desc_free(d, &head); 451*6365beadSRussell King } 452*6365beadSRussell King 453*6365beadSRussell King static dma_addr_t sa11x0_dma_pos(struct sa11x0_dma_phy *p) 454*6365beadSRussell King { 455*6365beadSRussell King unsigned reg; 456*6365beadSRussell King u32 dcsr; 457*6365beadSRussell King 458*6365beadSRussell King dcsr = readl_relaxed(p->base + DMA_DCSR_R); 459*6365beadSRussell King 460*6365beadSRussell King if ((dcsr & (DCSR_BIU | DCSR_STRTA)) == DCSR_STRTA || 461*6365beadSRussell King (dcsr & (DCSR_BIU | DCSR_STRTB)) == DCSR_BIU) 462*6365beadSRussell King reg = DMA_DBSA; 463*6365beadSRussell King else 464*6365beadSRussell King reg = DMA_DBSB; 465*6365beadSRussell King 466*6365beadSRussell King return readl_relaxed(p->base + reg); 467*6365beadSRussell King } 468*6365beadSRussell King 469*6365beadSRussell King static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan, 470*6365beadSRussell King dma_cookie_t cookie, struct dma_tx_state *state) 471*6365beadSRussell King { 472*6365beadSRussell King struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); 473*6365beadSRussell King struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); 474*6365beadSRussell King struct sa11x0_dma_phy *p; 475*6365beadSRussell King struct sa11x0_dma_desc *txd; 476*6365beadSRussell King dma_cookie_t last_used, last_complete; 477*6365beadSRussell King unsigned long flags; 478*6365beadSRussell King enum dma_status ret; 479*6365beadSRussell King size_t bytes = 0; 480*6365beadSRussell King 481*6365beadSRussell King last_used = c->chan.cookie; 482*6365beadSRussell King last_complete = c->lc; 483*6365beadSRussell King 484*6365beadSRussell King ret = dma_async_is_complete(cookie, last_complete, last_used); 485*6365beadSRussell King if (ret == DMA_SUCCESS) { 486*6365beadSRussell King dma_set_tx_state(state, last_complete, last_used, 0); 487*6365beadSRussell King return ret; 488*6365beadSRussell King } 489*6365beadSRussell King 490*6365beadSRussell King spin_lock_irqsave(&c->lock, flags); 491*6365beadSRussell King p = c->phy; 492*6365beadSRussell King ret = c->status; 493*6365beadSRussell King if (p) { 494*6365beadSRussell King dma_addr_t addr = sa11x0_dma_pos(p); 495*6365beadSRussell King 496*6365beadSRussell King dev_vdbg(d->slave.dev, "tx_status: addr:%x\n", addr); 497*6365beadSRussell King 498*6365beadSRussell King txd = p->txd_done; 499*6365beadSRussell King if (txd) { 500*6365beadSRussell King unsigned i; 501*6365beadSRussell King 502*6365beadSRussell King for (i = 0; i < txd->sglen; i++) { 503*6365beadSRussell King dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x\n", 504*6365beadSRussell King i, txd->sg[i].addr, txd->sg[i].len); 505*6365beadSRussell King if (addr >= txd->sg[i].addr && 506*6365beadSRussell King addr < txd->sg[i].addr + txd->sg[i].len) { 507*6365beadSRussell King unsigned len; 508*6365beadSRussell King 509*6365beadSRussell King len = txd->sg[i].len - 510*6365beadSRussell King (addr - txd->sg[i].addr); 511*6365beadSRussell King dev_vdbg(d->slave.dev, "tx_status: [%u] +%x\n", 512*6365beadSRussell King i, len); 513*6365beadSRussell King bytes += len; 514*6365beadSRussell King i++; 515*6365beadSRussell King break; 516*6365beadSRussell King } 517*6365beadSRussell King } 518*6365beadSRussell King for (; i < txd->sglen; i++) { 519*6365beadSRussell King dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x ++\n", 520*6365beadSRussell King i, txd->sg[i].addr, txd->sg[i].len); 521*6365beadSRussell King bytes += txd->sg[i].len; 522*6365beadSRussell King } 523*6365beadSRussell King } 524*6365beadSRussell King if (txd != p->txd_load && p->txd_load) 525*6365beadSRussell King bytes += p->txd_load->size; 526*6365beadSRussell King } 527*6365beadSRussell King list_for_each_entry(txd, &c->desc_issued, node) { 528*6365beadSRussell King bytes += txd->size; 529*6365beadSRussell King } 530*6365beadSRussell King spin_unlock_irqrestore(&c->lock, flags); 531*6365beadSRussell King 532*6365beadSRussell King dma_set_tx_state(state, last_complete, last_used, bytes); 533*6365beadSRussell King 534*6365beadSRussell King dev_vdbg(d->slave.dev, "tx_status: bytes 0x%zx\n", bytes); 535*6365beadSRussell King 536*6365beadSRussell King return ret; 537*6365beadSRussell King } 538*6365beadSRussell King 539*6365beadSRussell King /* 540*6365beadSRussell King * Move pending txds to the issued list, and re-init pending list. 541*6365beadSRussell King * If not already pending, add this channel to the list of pending 542*6365beadSRussell King * channels and trigger the tasklet to run. 543*6365beadSRussell King */ 544*6365beadSRussell King static void sa11x0_dma_issue_pending(struct dma_chan *chan) 545*6365beadSRussell King { 546*6365beadSRussell King struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); 547*6365beadSRussell King struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); 548*6365beadSRussell King unsigned long flags; 549*6365beadSRussell King 550*6365beadSRussell King spin_lock_irqsave(&c->lock, flags); 551*6365beadSRussell King list_splice_tail_init(&c->desc_submitted, &c->desc_issued); 552*6365beadSRussell King if (!list_empty(&c->desc_issued)) { 553*6365beadSRussell King spin_lock(&d->lock); 554*6365beadSRussell King if (!c->phy && list_empty(&c->node)) { 555*6365beadSRussell King list_add_tail(&c->node, &d->chan_pending); 556*6365beadSRussell King tasklet_schedule(&d->task); 557*6365beadSRussell King dev_dbg(d->slave.dev, "vchan %p: issued\n", c); 558*6365beadSRussell King } 559*6365beadSRussell King spin_unlock(&d->lock); 560*6365beadSRussell King } else 561*6365beadSRussell King dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", c); 562*6365beadSRussell King spin_unlock_irqrestore(&c->lock, flags); 563*6365beadSRussell King } 564*6365beadSRussell King 565*6365beadSRussell King static dma_cookie_t sa11x0_dma_tx_submit(struct dma_async_tx_descriptor *tx) 566*6365beadSRussell King { 567*6365beadSRussell King struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(tx->chan); 568*6365beadSRussell King struct sa11x0_dma_desc *txd = to_sa11x0_dma_tx(tx); 569*6365beadSRussell King unsigned long flags; 570*6365beadSRussell King 571*6365beadSRussell King spin_lock_irqsave(&c->lock, flags); 572*6365beadSRussell King c->chan.cookie += 1; 573*6365beadSRussell King if (c->chan.cookie < 0) 574*6365beadSRussell King c->chan.cookie = 1; 575*6365beadSRussell King txd->tx.cookie = c->chan.cookie; 576*6365beadSRussell King 577*6365beadSRussell King list_add_tail(&txd->node, &c->desc_submitted); 578*6365beadSRussell King spin_unlock_irqrestore(&c->lock, flags); 579*6365beadSRussell King 580*6365beadSRussell King dev_dbg(tx->chan->device->dev, "vchan %p: txd %p[%x]: submitted\n", 581*6365beadSRussell King c, txd, txd->tx.cookie); 582*6365beadSRussell King 583*6365beadSRussell King return txd->tx.cookie; 584*6365beadSRussell King } 585*6365beadSRussell King 586*6365beadSRussell King static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg( 587*6365beadSRussell King struct dma_chan *chan, struct scatterlist *sg, unsigned int sglen, 588*6365beadSRussell King enum dma_transfer_direction dir, unsigned long flags) 589*6365beadSRussell King { 590*6365beadSRussell King struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); 591*6365beadSRussell King struct sa11x0_dma_desc *txd; 592*6365beadSRussell King struct scatterlist *sgent; 593*6365beadSRussell King unsigned i, j = sglen; 594*6365beadSRussell King size_t size = 0; 595*6365beadSRussell King 596*6365beadSRussell King /* SA11x0 channels can only operate in their native direction */ 597*6365beadSRussell King if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) { 598*6365beadSRussell King dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n", 599*6365beadSRussell King c, c->ddar, dir); 600*6365beadSRussell King return NULL; 601*6365beadSRussell King } 602*6365beadSRussell King 603*6365beadSRussell King /* Do not allow zero-sized txds */ 604*6365beadSRussell King if (sglen == 0) 605*6365beadSRussell King return NULL; 606*6365beadSRussell King 607*6365beadSRussell King for_each_sg(sg, sgent, sglen, i) { 608*6365beadSRussell King dma_addr_t addr = sg_dma_address(sgent); 609*6365beadSRussell King unsigned int len = sg_dma_len(sgent); 610*6365beadSRussell King 611*6365beadSRussell King if (len > DMA_MAX_SIZE) 612*6365beadSRussell King j += DIV_ROUND_UP(len, DMA_MAX_SIZE & ~DMA_ALIGN) - 1; 613*6365beadSRussell King if (addr & DMA_ALIGN) { 614*6365beadSRussell King dev_dbg(chan->device->dev, "vchan %p: bad buffer alignment: %08x\n", 615*6365beadSRussell King c, addr); 616*6365beadSRussell King return NULL; 617*6365beadSRussell King } 618*6365beadSRussell King } 619*6365beadSRussell King 620*6365beadSRussell King txd = kzalloc(sizeof(*txd) + j * sizeof(txd->sg[0]), GFP_ATOMIC); 621*6365beadSRussell King if (!txd) { 622*6365beadSRussell King dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", c); 623*6365beadSRussell King return NULL; 624*6365beadSRussell King } 625*6365beadSRussell King 626*6365beadSRussell King j = 0; 627*6365beadSRussell King for_each_sg(sg, sgent, sglen, i) { 628*6365beadSRussell King dma_addr_t addr = sg_dma_address(sgent); 629*6365beadSRussell King unsigned len = sg_dma_len(sgent); 630*6365beadSRussell King 631*6365beadSRussell King size += len; 632*6365beadSRussell King 633*6365beadSRussell King do { 634*6365beadSRussell King unsigned tlen = len; 635*6365beadSRussell King 636*6365beadSRussell King /* 637*6365beadSRussell King * Check whether the transfer will fit. If not, try 638*6365beadSRussell King * to split the transfer up such that we end up with 639*6365beadSRussell King * equal chunks - but make sure that we preserve the 640*6365beadSRussell King * alignment. This avoids small segments. 641*6365beadSRussell King */ 642*6365beadSRussell King if (tlen > DMA_MAX_SIZE) { 643*6365beadSRussell King unsigned mult = DIV_ROUND_UP(tlen, 644*6365beadSRussell King DMA_MAX_SIZE & ~DMA_ALIGN); 645*6365beadSRussell King 646*6365beadSRussell King tlen = (tlen / mult) & ~DMA_ALIGN; 647*6365beadSRussell King } 648*6365beadSRussell King 649*6365beadSRussell King txd->sg[j].addr = addr; 650*6365beadSRussell King txd->sg[j].len = tlen; 651*6365beadSRussell King 652*6365beadSRussell King addr += tlen; 653*6365beadSRussell King len -= tlen; 654*6365beadSRussell King j++; 655*6365beadSRussell King } while (len); 656*6365beadSRussell King } 657*6365beadSRussell King 658*6365beadSRussell King dma_async_tx_descriptor_init(&txd->tx, &c->chan); 659*6365beadSRussell King txd->tx.flags = flags; 660*6365beadSRussell King txd->tx.tx_submit = sa11x0_dma_tx_submit; 661*6365beadSRussell King txd->ddar = c->ddar; 662*6365beadSRussell King txd->size = size; 663*6365beadSRussell King txd->sglen = j; 664*6365beadSRussell King 665*6365beadSRussell King dev_dbg(chan->device->dev, "vchan %p: txd %p: size %u nr %u\n", 666*6365beadSRussell King c, txd, txd->size, txd->sglen); 667*6365beadSRussell King 668*6365beadSRussell King return &txd->tx; 669*6365beadSRussell King } 670*6365beadSRussell King 671*6365beadSRussell King static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_config *cfg) 672*6365beadSRussell King { 673*6365beadSRussell King u32 ddar = c->ddar & ((0xf << 4) | DDAR_RW); 674*6365beadSRussell King dma_addr_t addr; 675*6365beadSRussell King enum dma_slave_buswidth width; 676*6365beadSRussell King u32 maxburst; 677*6365beadSRussell King 678*6365beadSRussell King if (ddar & DDAR_RW) { 679*6365beadSRussell King addr = cfg->src_addr; 680*6365beadSRussell King width = cfg->src_addr_width; 681*6365beadSRussell King maxburst = cfg->src_maxburst; 682*6365beadSRussell King } else { 683*6365beadSRussell King addr = cfg->dst_addr; 684*6365beadSRussell King width = cfg->dst_addr_width; 685*6365beadSRussell King maxburst = cfg->dst_maxburst; 686*6365beadSRussell King } 687*6365beadSRussell King 688*6365beadSRussell King if ((width != DMA_SLAVE_BUSWIDTH_1_BYTE && 689*6365beadSRussell King width != DMA_SLAVE_BUSWIDTH_2_BYTES) || 690*6365beadSRussell King (maxburst != 4 && maxburst != 8)) 691*6365beadSRussell King return -EINVAL; 692*6365beadSRussell King 693*6365beadSRussell King if (width == DMA_SLAVE_BUSWIDTH_2_BYTES) 694*6365beadSRussell King ddar |= DDAR_DW; 695*6365beadSRussell King if (maxburst == 8) 696*6365beadSRussell King ddar |= DDAR_BS; 697*6365beadSRussell King 698*6365beadSRussell King dev_dbg(c->chan.device->dev, "vchan %p: dma_slave_config addr %x width %u burst %u\n", 699*6365beadSRussell King c, addr, width, maxburst); 700*6365beadSRussell King 701*6365beadSRussell King c->ddar = ddar | (addr & 0xf0000000) | (addr & 0x003ffffc) << 6; 702*6365beadSRussell King 703*6365beadSRussell King return 0; 704*6365beadSRussell King } 705*6365beadSRussell King 706*6365beadSRussell King static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 707*6365beadSRussell King unsigned long arg) 708*6365beadSRussell King { 709*6365beadSRussell King struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); 710*6365beadSRussell King struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); 711*6365beadSRussell King struct sa11x0_dma_phy *p; 712*6365beadSRussell King LIST_HEAD(head); 713*6365beadSRussell King unsigned long flags; 714*6365beadSRussell King int ret; 715*6365beadSRussell King 716*6365beadSRussell King switch (cmd) { 717*6365beadSRussell King case DMA_SLAVE_CONFIG: 718*6365beadSRussell King return sa11x0_dma_slave_config(c, (struct dma_slave_config *)arg); 719*6365beadSRussell King 720*6365beadSRussell King case DMA_TERMINATE_ALL: 721*6365beadSRussell King dev_dbg(d->slave.dev, "vchan %p: terminate all\n", c); 722*6365beadSRussell King /* Clear the tx descriptor lists */ 723*6365beadSRussell King spin_lock_irqsave(&c->lock, flags); 724*6365beadSRussell King list_splice_tail_init(&c->desc_submitted, &head); 725*6365beadSRussell King list_splice_tail_init(&c->desc_issued, &head); 726*6365beadSRussell King 727*6365beadSRussell King p = c->phy; 728*6365beadSRussell King if (p) { 729*6365beadSRussell King struct sa11x0_dma_desc *txd, *txn; 730*6365beadSRussell King 731*6365beadSRussell King dev_dbg(d->slave.dev, "pchan %u: terminating\n", p->num); 732*6365beadSRussell King /* vchan is assigned to a pchan - stop the channel */ 733*6365beadSRussell King writel(DCSR_RUN | DCSR_IE | 734*6365beadSRussell King DCSR_STRTA | DCSR_DONEA | 735*6365beadSRussell King DCSR_STRTB | DCSR_DONEB, 736*6365beadSRussell King p->base + DMA_DCSR_C); 737*6365beadSRussell King 738*6365beadSRussell King list_for_each_entry_safe(txd, txn, &d->desc_complete, node) 739*6365beadSRussell King if (txd->tx.chan == &c->chan) 740*6365beadSRussell King list_move(&txd->node, &head); 741*6365beadSRussell King 742*6365beadSRussell King if (p->txd_load) { 743*6365beadSRussell King if (p->txd_load != p->txd_done) 744*6365beadSRussell King list_add_tail(&p->txd_load->node, &head); 745*6365beadSRussell King p->txd_load = NULL; 746*6365beadSRussell King } 747*6365beadSRussell King if (p->txd_done) { 748*6365beadSRussell King list_add_tail(&p->txd_done->node, &head); 749*6365beadSRussell King p->txd_done = NULL; 750*6365beadSRussell King } 751*6365beadSRussell King c->phy = NULL; 752*6365beadSRussell King spin_lock(&d->lock); 753*6365beadSRussell King p->vchan = NULL; 754*6365beadSRussell King spin_unlock(&d->lock); 755*6365beadSRussell King tasklet_schedule(&d->task); 756*6365beadSRussell King } 757*6365beadSRussell King spin_unlock_irqrestore(&c->lock, flags); 758*6365beadSRussell King sa11x0_dma_desc_free(d, &head); 759*6365beadSRussell King ret = 0; 760*6365beadSRussell King break; 761*6365beadSRussell King 762*6365beadSRussell King case DMA_PAUSE: 763*6365beadSRussell King dev_dbg(d->slave.dev, "vchan %p: pause\n", c); 764*6365beadSRussell King spin_lock_irqsave(&c->lock, flags); 765*6365beadSRussell King if (c->status == DMA_IN_PROGRESS) { 766*6365beadSRussell King c->status = DMA_PAUSED; 767*6365beadSRussell King 768*6365beadSRussell King p = c->phy; 769*6365beadSRussell King if (p) { 770*6365beadSRussell King writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C); 771*6365beadSRussell King } else { 772*6365beadSRussell King spin_lock(&d->lock); 773*6365beadSRussell King list_del_init(&c->node); 774*6365beadSRussell King spin_unlock(&d->lock); 775*6365beadSRussell King } 776*6365beadSRussell King } 777*6365beadSRussell King spin_unlock_irqrestore(&c->lock, flags); 778*6365beadSRussell King ret = 0; 779*6365beadSRussell King break; 780*6365beadSRussell King 781*6365beadSRussell King case DMA_RESUME: 782*6365beadSRussell King dev_dbg(d->slave.dev, "vchan %p: resume\n", c); 783*6365beadSRussell King spin_lock_irqsave(&c->lock, flags); 784*6365beadSRussell King if (c->status == DMA_PAUSED) { 785*6365beadSRussell King c->status = DMA_IN_PROGRESS; 786*6365beadSRussell King 787*6365beadSRussell King p = c->phy; 788*6365beadSRussell King if (p) { 789*6365beadSRussell King writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_S); 790*6365beadSRussell King } else if (!list_empty(&c->desc_issued)) { 791*6365beadSRussell King spin_lock(&d->lock); 792*6365beadSRussell King list_add_tail(&c->node, &d->chan_pending); 793*6365beadSRussell King spin_unlock(&d->lock); 794*6365beadSRussell King } 795*6365beadSRussell King } 796*6365beadSRussell King spin_unlock_irqrestore(&c->lock, flags); 797*6365beadSRussell King ret = 0; 798*6365beadSRussell King break; 799*6365beadSRussell King 800*6365beadSRussell King default: 801*6365beadSRussell King ret = -ENXIO; 802*6365beadSRussell King break; 803*6365beadSRussell King } 804*6365beadSRussell King 805*6365beadSRussell King return ret; 806*6365beadSRussell King } 807*6365beadSRussell King 808*6365beadSRussell King struct sa11x0_dma_channel_desc { 809*6365beadSRussell King u32 ddar; 810*6365beadSRussell King const char *name; 811*6365beadSRussell King }; 812*6365beadSRussell King 813*6365beadSRussell King #define CD(d1, d2) { .ddar = DDAR_##d1 | d2, .name = #d1 } 814*6365beadSRussell King static const struct sa11x0_dma_channel_desc chan_desc[] = { 815*6365beadSRussell King CD(Ser0UDCTr, 0), 816*6365beadSRussell King CD(Ser0UDCRc, DDAR_RW), 817*6365beadSRussell King CD(Ser1SDLCTr, 0), 818*6365beadSRussell King CD(Ser1SDLCRc, DDAR_RW), 819*6365beadSRussell King CD(Ser1UARTTr, 0), 820*6365beadSRussell King CD(Ser1UARTRc, DDAR_RW), 821*6365beadSRussell King CD(Ser2ICPTr, 0), 822*6365beadSRussell King CD(Ser2ICPRc, DDAR_RW), 823*6365beadSRussell King CD(Ser3UARTTr, 0), 824*6365beadSRussell King CD(Ser3UARTRc, DDAR_RW), 825*6365beadSRussell King CD(Ser4MCP0Tr, 0), 826*6365beadSRussell King CD(Ser4MCP0Rc, DDAR_RW), 827*6365beadSRussell King CD(Ser4MCP1Tr, 0), 828*6365beadSRussell King CD(Ser4MCP1Rc, DDAR_RW), 829*6365beadSRussell King CD(Ser4SSPTr, 0), 830*6365beadSRussell King CD(Ser4SSPRc, DDAR_RW), 831*6365beadSRussell King }; 832*6365beadSRussell King 833*6365beadSRussell King static int __devinit sa11x0_dma_init_dmadev(struct dma_device *dmadev, 834*6365beadSRussell King struct device *dev) 835*6365beadSRussell King { 836*6365beadSRussell King unsigned i; 837*6365beadSRussell King 838*6365beadSRussell King dmadev->chancnt = ARRAY_SIZE(chan_desc); 839*6365beadSRussell King INIT_LIST_HEAD(&dmadev->channels); 840*6365beadSRussell King dmadev->dev = dev; 841*6365beadSRussell King dmadev->device_alloc_chan_resources = sa11x0_dma_alloc_chan_resources; 842*6365beadSRussell King dmadev->device_free_chan_resources = sa11x0_dma_free_chan_resources; 843*6365beadSRussell King dmadev->device_control = sa11x0_dma_control; 844*6365beadSRussell King dmadev->device_tx_status = sa11x0_dma_tx_status; 845*6365beadSRussell King dmadev->device_issue_pending = sa11x0_dma_issue_pending; 846*6365beadSRussell King 847*6365beadSRussell King for (i = 0; i < dmadev->chancnt; i++) { 848*6365beadSRussell King struct sa11x0_dma_chan *c; 849*6365beadSRussell King 850*6365beadSRussell King c = kzalloc(sizeof(*c), GFP_KERNEL); 851*6365beadSRussell King if (!c) { 852*6365beadSRussell King dev_err(dev, "no memory for channel %u\n", i); 853*6365beadSRussell King return -ENOMEM; 854*6365beadSRussell King } 855*6365beadSRussell King 856*6365beadSRussell King c->chan.device = dmadev; 857*6365beadSRussell King c->status = DMA_IN_PROGRESS; 858*6365beadSRussell King c->ddar = chan_desc[i].ddar; 859*6365beadSRussell King c->name = chan_desc[i].name; 860*6365beadSRussell King spin_lock_init(&c->lock); 861*6365beadSRussell King INIT_LIST_HEAD(&c->desc_submitted); 862*6365beadSRussell King INIT_LIST_HEAD(&c->desc_issued); 863*6365beadSRussell King INIT_LIST_HEAD(&c->node); 864*6365beadSRussell King list_add_tail(&c->chan.device_node, &dmadev->channels); 865*6365beadSRussell King } 866*6365beadSRussell King 867*6365beadSRussell King return dma_async_device_register(dmadev); 868*6365beadSRussell King } 869*6365beadSRussell King 870*6365beadSRussell King static int sa11x0_dma_request_irq(struct platform_device *pdev, int nr, 871*6365beadSRussell King void *data) 872*6365beadSRussell King { 873*6365beadSRussell King int irq = platform_get_irq(pdev, nr); 874*6365beadSRussell King 875*6365beadSRussell King if (irq <= 0) 876*6365beadSRussell King return -ENXIO; 877*6365beadSRussell King 878*6365beadSRussell King return request_irq(irq, sa11x0_dma_irq, 0, dev_name(&pdev->dev), data); 879*6365beadSRussell King } 880*6365beadSRussell King 881*6365beadSRussell King static void sa11x0_dma_free_irq(struct platform_device *pdev, int nr, 882*6365beadSRussell King void *data) 883*6365beadSRussell King { 884*6365beadSRussell King int irq = platform_get_irq(pdev, nr); 885*6365beadSRussell King if (irq > 0) 886*6365beadSRussell King free_irq(irq, data); 887*6365beadSRussell King } 888*6365beadSRussell King 889*6365beadSRussell King static void sa11x0_dma_free_channels(struct dma_device *dmadev) 890*6365beadSRussell King { 891*6365beadSRussell King struct sa11x0_dma_chan *c, *cn; 892*6365beadSRussell King 893*6365beadSRussell King list_for_each_entry_safe(c, cn, &dmadev->channels, chan.device_node) { 894*6365beadSRussell King list_del(&c->chan.device_node); 895*6365beadSRussell King kfree(c); 896*6365beadSRussell King } 897*6365beadSRussell King } 898*6365beadSRussell King 899*6365beadSRussell King static int __devinit sa11x0_dma_probe(struct platform_device *pdev) 900*6365beadSRussell King { 901*6365beadSRussell King struct sa11x0_dma_dev *d; 902*6365beadSRussell King struct resource *res; 903*6365beadSRussell King unsigned i; 904*6365beadSRussell King int ret; 905*6365beadSRussell King 906*6365beadSRussell King res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 907*6365beadSRussell King if (!res) 908*6365beadSRussell King return -ENXIO; 909*6365beadSRussell King 910*6365beadSRussell King d = kzalloc(sizeof(*d), GFP_KERNEL); 911*6365beadSRussell King if (!d) { 912*6365beadSRussell King ret = -ENOMEM; 913*6365beadSRussell King goto err_alloc; 914*6365beadSRussell King } 915*6365beadSRussell King 916*6365beadSRussell King spin_lock_init(&d->lock); 917*6365beadSRussell King INIT_LIST_HEAD(&d->chan_pending); 918*6365beadSRussell King INIT_LIST_HEAD(&d->desc_complete); 919*6365beadSRussell King 920*6365beadSRussell King d->base = ioremap(res->start, resource_size(res)); 921*6365beadSRussell King if (!d->base) { 922*6365beadSRussell King ret = -ENOMEM; 923*6365beadSRussell King goto err_ioremap; 924*6365beadSRussell King } 925*6365beadSRussell King 926*6365beadSRussell King tasklet_init(&d->task, sa11x0_dma_tasklet, (unsigned long)d); 927*6365beadSRussell King 928*6365beadSRussell King for (i = 0; i < NR_PHY_CHAN; i++) { 929*6365beadSRussell King struct sa11x0_dma_phy *p = &d->phy[i]; 930*6365beadSRussell King 931*6365beadSRussell King p->dev = d; 932*6365beadSRussell King p->num = i; 933*6365beadSRussell King p->base = d->base + i * DMA_SIZE; 934*6365beadSRussell King writel_relaxed(DCSR_RUN | DCSR_IE | DCSR_ERROR | 935*6365beadSRussell King DCSR_DONEA | DCSR_STRTA | DCSR_DONEB | DCSR_STRTB, 936*6365beadSRussell King p->base + DMA_DCSR_C); 937*6365beadSRussell King writel_relaxed(0, p->base + DMA_DDAR); 938*6365beadSRussell King 939*6365beadSRussell King ret = sa11x0_dma_request_irq(pdev, i, p); 940*6365beadSRussell King if (ret) { 941*6365beadSRussell King while (i) { 942*6365beadSRussell King i--; 943*6365beadSRussell King sa11x0_dma_free_irq(pdev, i, &d->phy[i]); 944*6365beadSRussell King } 945*6365beadSRussell King goto err_irq; 946*6365beadSRussell King } 947*6365beadSRussell King } 948*6365beadSRussell King 949*6365beadSRussell King dma_cap_set(DMA_SLAVE, d->slave.cap_mask); 950*6365beadSRussell King d->slave.device_prep_slave_sg = sa11x0_dma_prep_slave_sg; 951*6365beadSRussell King ret = sa11x0_dma_init_dmadev(&d->slave, &pdev->dev); 952*6365beadSRussell King if (ret) { 953*6365beadSRussell King dev_warn(d->slave.dev, "failed to register slave async device: %d\n", 954*6365beadSRussell King ret); 955*6365beadSRussell King goto err_slave_reg; 956*6365beadSRussell King } 957*6365beadSRussell King 958*6365beadSRussell King platform_set_drvdata(pdev, d); 959*6365beadSRussell King return 0; 960*6365beadSRussell King 961*6365beadSRussell King err_slave_reg: 962*6365beadSRussell King sa11x0_dma_free_channels(&d->slave); 963*6365beadSRussell King for (i = 0; i < NR_PHY_CHAN; i++) 964*6365beadSRussell King sa11x0_dma_free_irq(pdev, i, &d->phy[i]); 965*6365beadSRussell King err_irq: 966*6365beadSRussell King tasklet_kill(&d->task); 967*6365beadSRussell King iounmap(d->base); 968*6365beadSRussell King err_ioremap: 969*6365beadSRussell King kfree(d); 970*6365beadSRussell King err_alloc: 971*6365beadSRussell King return ret; 972*6365beadSRussell King } 973*6365beadSRussell King 974*6365beadSRussell King static int __devexit sa11x0_dma_remove(struct platform_device *pdev) 975*6365beadSRussell King { 976*6365beadSRussell King struct sa11x0_dma_dev *d = platform_get_drvdata(pdev); 977*6365beadSRussell King unsigned pch; 978*6365beadSRussell King 979*6365beadSRussell King dma_async_device_unregister(&d->slave); 980*6365beadSRussell King 981*6365beadSRussell King sa11x0_dma_free_channels(&d->slave); 982*6365beadSRussell King for (pch = 0; pch < NR_PHY_CHAN; pch++) 983*6365beadSRussell King sa11x0_dma_free_irq(pdev, pch, &d->phy[pch]); 984*6365beadSRussell King tasklet_kill(&d->task); 985*6365beadSRussell King iounmap(d->base); 986*6365beadSRussell King kfree(d); 987*6365beadSRussell King 988*6365beadSRussell King return 0; 989*6365beadSRussell King } 990*6365beadSRussell King 991*6365beadSRussell King #ifdef CONFIG_PM_SLEEP 992*6365beadSRussell King static int sa11x0_dma_suspend(struct device *dev) 993*6365beadSRussell King { 994*6365beadSRussell King struct sa11x0_dma_dev *d = dev_get_drvdata(dev); 995*6365beadSRussell King unsigned pch; 996*6365beadSRussell King 997*6365beadSRussell King for (pch = 0; pch < NR_PHY_CHAN; pch++) { 998*6365beadSRussell King struct sa11x0_dma_phy *p = &d->phy[pch]; 999*6365beadSRussell King u32 dcsr, saved_dcsr; 1000*6365beadSRussell King 1001*6365beadSRussell King dcsr = saved_dcsr = readl_relaxed(p->base + DMA_DCSR_R); 1002*6365beadSRussell King if (dcsr & DCSR_RUN) { 1003*6365beadSRussell King writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C); 1004*6365beadSRussell King dcsr = readl_relaxed(p->base + DMA_DCSR_R); 1005*6365beadSRussell King } 1006*6365beadSRussell King 1007*6365beadSRussell King saved_dcsr &= DCSR_RUN | DCSR_IE; 1008*6365beadSRussell King if (dcsr & DCSR_BIU) { 1009*6365beadSRussell King p->dbs[0] = readl_relaxed(p->base + DMA_DBSB); 1010*6365beadSRussell King p->dbt[0] = readl_relaxed(p->base + DMA_DBTB); 1011*6365beadSRussell King p->dbs[1] = readl_relaxed(p->base + DMA_DBSA); 1012*6365beadSRussell King p->dbt[1] = readl_relaxed(p->base + DMA_DBTA); 1013*6365beadSRussell King saved_dcsr |= (dcsr & DCSR_STRTA ? DCSR_STRTB : 0) | 1014*6365beadSRussell King (dcsr & DCSR_STRTB ? DCSR_STRTA : 0); 1015*6365beadSRussell King } else { 1016*6365beadSRussell King p->dbs[0] = readl_relaxed(p->base + DMA_DBSA); 1017*6365beadSRussell King p->dbt[0] = readl_relaxed(p->base + DMA_DBTA); 1018*6365beadSRussell King p->dbs[1] = readl_relaxed(p->base + DMA_DBSB); 1019*6365beadSRussell King p->dbt[1] = readl_relaxed(p->base + DMA_DBTB); 1020*6365beadSRussell King saved_dcsr |= dcsr & (DCSR_STRTA | DCSR_STRTB); 1021*6365beadSRussell King } 1022*6365beadSRussell King p->dcsr = saved_dcsr; 1023*6365beadSRussell King 1024*6365beadSRussell King writel(DCSR_STRTA | DCSR_STRTB, p->base + DMA_DCSR_C); 1025*6365beadSRussell King } 1026*6365beadSRussell King 1027*6365beadSRussell King return 0; 1028*6365beadSRussell King } 1029*6365beadSRussell King 1030*6365beadSRussell King static int sa11x0_dma_resume(struct device *dev) 1031*6365beadSRussell King { 1032*6365beadSRussell King struct sa11x0_dma_dev *d = dev_get_drvdata(dev); 1033*6365beadSRussell King unsigned pch; 1034*6365beadSRussell King 1035*6365beadSRussell King for (pch = 0; pch < NR_PHY_CHAN; pch++) { 1036*6365beadSRussell King struct sa11x0_dma_phy *p = &d->phy[pch]; 1037*6365beadSRussell King struct sa11x0_dma_desc *txd = NULL; 1038*6365beadSRussell King u32 dcsr = readl_relaxed(p->base + DMA_DCSR_R); 1039*6365beadSRussell King 1040*6365beadSRussell King WARN_ON(dcsr & (DCSR_BIU | DCSR_STRTA | DCSR_STRTB | DCSR_RUN)); 1041*6365beadSRussell King 1042*6365beadSRussell King if (p->txd_done) 1043*6365beadSRussell King txd = p->txd_done; 1044*6365beadSRussell King else if (p->txd_load) 1045*6365beadSRussell King txd = p->txd_load; 1046*6365beadSRussell King 1047*6365beadSRussell King if (!txd) 1048*6365beadSRussell King continue; 1049*6365beadSRussell King 1050*6365beadSRussell King writel_relaxed(txd->ddar, p->base + DMA_DDAR); 1051*6365beadSRussell King 1052*6365beadSRussell King writel_relaxed(p->dbs[0], p->base + DMA_DBSA); 1053*6365beadSRussell King writel_relaxed(p->dbt[0], p->base + DMA_DBTA); 1054*6365beadSRussell King writel_relaxed(p->dbs[1], p->base + DMA_DBSB); 1055*6365beadSRussell King writel_relaxed(p->dbt[1], p->base + DMA_DBTB); 1056*6365beadSRussell King writel_relaxed(p->dcsr, p->base + DMA_DCSR_S); 1057*6365beadSRussell King } 1058*6365beadSRussell King 1059*6365beadSRussell King return 0; 1060*6365beadSRussell King } 1061*6365beadSRussell King #endif 1062*6365beadSRussell King 1063*6365beadSRussell King static const struct dev_pm_ops sa11x0_dma_pm_ops = { 1064*6365beadSRussell King .suspend_noirq = sa11x0_dma_suspend, 1065*6365beadSRussell King .resume_noirq = sa11x0_dma_resume, 1066*6365beadSRussell King .freeze_noirq = sa11x0_dma_suspend, 1067*6365beadSRussell King .thaw_noirq = sa11x0_dma_resume, 1068*6365beadSRussell King .poweroff_noirq = sa11x0_dma_suspend, 1069*6365beadSRussell King .restore_noirq = sa11x0_dma_resume, 1070*6365beadSRussell King }; 1071*6365beadSRussell King 1072*6365beadSRussell King static struct platform_driver sa11x0_dma_driver = { 1073*6365beadSRussell King .driver = { 1074*6365beadSRussell King .name = "sa11x0-dma", 1075*6365beadSRussell King .owner = THIS_MODULE, 1076*6365beadSRussell King .pm = &sa11x0_dma_pm_ops, 1077*6365beadSRussell King }, 1078*6365beadSRussell King .probe = sa11x0_dma_probe, 1079*6365beadSRussell King .remove = __devexit_p(sa11x0_dma_remove), 1080*6365beadSRussell King }; 1081*6365beadSRussell King 1082*6365beadSRussell King bool sa11x0_dma_filter_fn(struct dma_chan *chan, void *param) 1083*6365beadSRussell King { 1084*6365beadSRussell King if (chan->device->dev->driver == &sa11x0_dma_driver.driver) { 1085*6365beadSRussell King struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); 1086*6365beadSRussell King const char *p = param; 1087*6365beadSRussell King 1088*6365beadSRussell King return !strcmp(c->name, p); 1089*6365beadSRussell King } 1090*6365beadSRussell King return false; 1091*6365beadSRussell King } 1092*6365beadSRussell King EXPORT_SYMBOL(sa11x0_dma_filter_fn); 1093*6365beadSRussell King 1094*6365beadSRussell King static int __init sa11x0_dma_init(void) 1095*6365beadSRussell King { 1096*6365beadSRussell King return platform_driver_register(&sa11x0_dma_driver); 1097*6365beadSRussell King } 1098*6365beadSRussell King subsys_initcall(sa11x0_dma_init); 1099*6365beadSRussell King 1100*6365beadSRussell King static void __exit sa11x0_dma_exit(void) 1101*6365beadSRussell King { 1102*6365beadSRussell King platform_driver_unregister(&sa11x0_dma_driver); 1103*6365beadSRussell King } 1104*6365beadSRussell King module_exit(sa11x0_dma_exit); 1105*6365beadSRussell King 1106*6365beadSRussell King MODULE_AUTHOR("Russell King"); 1107*6365beadSRussell King MODULE_DESCRIPTION("SA-11x0 DMA driver"); 1108*6365beadSRussell King MODULE_LICENSE("GPL v2"); 1109*6365beadSRussell King MODULE_ALIAS("platform:sa11x0-dma"); 1110