1 /* 2 * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008. 3 * Copyright (C) Semihalf 2009 4 * Copyright (C) Ilya Yanok, Emcraft Systems 2010 5 * 6 * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description 7 * (defines, structures and comments) was taken from MPC5121 DMA driver 8 * written by Hongjun Chen <hong-jun.chen@freescale.com>. 9 * 10 * Approved as OSADL project by a majority of OSADL members and funded 11 * by OSADL membership fees in 2009; for details see www.osadl.org. 12 * 13 * This program is free software; you can redistribute it and/or modify it 14 * under the terms of the GNU General Public License as published by the Free 15 * Software Foundation; either version 2 of the License, or (at your option) 16 * any later version. 17 * 18 * This program is distributed in the hope that it will be useful, but WITHOUT 19 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 20 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 21 * more details. 22 * 23 * You should have received a copy of the GNU General Public License along with 24 * this program; if not, write to the Free Software Foundation, Inc., 59 25 * Temple Place - Suite 330, Boston, MA 02111-1307, USA. 26 * 27 * The full GNU General Public License is included in this distribution in the 28 * file called COPYING. 29 */ 30 31 /* 32 * This is initial version of MPC5121 DMA driver. Only memory to memory 33 * transfers are supported (tested using dmatest module). 34 */ 35 36 #include <linux/module.h> 37 #include <linux/dmaengine.h> 38 #include <linux/dma-mapping.h> 39 #include <linux/interrupt.h> 40 #include <linux/io.h> 41 #include <linux/slab.h> 42 #include <linux/of_device.h> 43 #include <linux/of_platform.h> 44 45 #include <linux/random.h> 46 47 #include "dmaengine.h" 48 49 /* Number of DMA Transfer descriptors allocated per channel */ 50 #define MPC_DMA_DESCRIPTORS 64 51 52 /* Macro definitions */ 53 #define MPC_DMA_CHANNELS 64 54 #define MPC_DMA_TCD_OFFSET 0x1000 55 56 /* Arbitration mode of group and channel */ 57 #define MPC_DMA_DMACR_EDCG (1 << 31) 58 #define MPC_DMA_DMACR_ERGA (1 << 3) 59 #define MPC_DMA_DMACR_ERCA (1 << 2) 60 61 /* Error codes */ 62 #define MPC_DMA_DMAES_VLD (1 << 31) 63 #define MPC_DMA_DMAES_GPE (1 << 15) 64 #define MPC_DMA_DMAES_CPE (1 << 14) 65 #define MPC_DMA_DMAES_ERRCHN(err) \ 66 (((err) >> 8) & 0x3f) 67 #define MPC_DMA_DMAES_SAE (1 << 7) 68 #define MPC_DMA_DMAES_SOE (1 << 6) 69 #define MPC_DMA_DMAES_DAE (1 << 5) 70 #define MPC_DMA_DMAES_DOE (1 << 4) 71 #define MPC_DMA_DMAES_NCE (1 << 3) 72 #define MPC_DMA_DMAES_SGE (1 << 2) 73 #define MPC_DMA_DMAES_SBE (1 << 1) 74 #define MPC_DMA_DMAES_DBE (1 << 0) 75 76 #define MPC_DMA_DMAGPOR_SNOOP_ENABLE (1 << 6) 77 78 #define MPC_DMA_TSIZE_1 0x00 79 #define MPC_DMA_TSIZE_2 0x01 80 #define MPC_DMA_TSIZE_4 0x02 81 #define MPC_DMA_TSIZE_16 0x04 82 #define MPC_DMA_TSIZE_32 0x05 83 84 /* MPC5121 DMA engine registers */ 85 struct __attribute__ ((__packed__)) mpc_dma_regs { 86 /* 0x00 */ 87 u32 dmacr; /* DMA control register */ 88 u32 dmaes; /* DMA error status */ 89 /* 0x08 */ 90 u32 dmaerqh; /* DMA enable request high(channels 63~32) */ 91 u32 dmaerql; /* DMA enable request low(channels 31~0) */ 92 u32 dmaeeih; /* DMA enable error interrupt high(ch63~32) */ 93 u32 dmaeeil; /* DMA enable error interrupt low(ch31~0) */ 94 /* 0x18 */ 95 u8 dmaserq; /* DMA set enable request */ 96 u8 dmacerq; /* DMA clear enable request */ 97 u8 dmaseei; /* DMA set enable error interrupt */ 98 u8 dmaceei; /* DMA clear enable error interrupt */ 99 /* 0x1c */ 100 u8 dmacint; /* DMA clear interrupt request */ 101 u8 dmacerr; /* DMA clear error */ 102 u8 dmassrt; /* DMA set start bit */ 103 u8 dmacdne; /* DMA clear DONE status bit */ 104 /* 0x20 */ 105 u32 dmainth; /* DMA interrupt request high(ch63~32) */ 106 u32 dmaintl; /* DMA interrupt request low(ch31~0) */ 107 u32 dmaerrh; /* DMA error high(ch63~32) */ 108 u32 dmaerrl; /* DMA error low(ch31~0) */ 109 /* 0x30 */ 110 u32 dmahrsh; /* DMA hw request status high(ch63~32) */ 111 u32 dmahrsl; /* DMA hardware request status low(ch31~0) */ 112 union { 113 u32 dmaihsa; /* DMA interrupt high select AXE(ch63~32) */ 114 u32 dmagpor; /* (General purpose register on MPC8308) */ 115 }; 116 u32 dmailsa; /* DMA interrupt low select AXE(ch31~0) */ 117 /* 0x40 ~ 0xff */ 118 u32 reserve0[48]; /* Reserved */ 119 /* 0x100 */ 120 u8 dchpri[MPC_DMA_CHANNELS]; 121 /* DMA channels(0~63) priority */ 122 }; 123 124 struct __attribute__ ((__packed__)) mpc_dma_tcd { 125 /* 0x00 */ 126 u32 saddr; /* Source address */ 127 128 u32 smod:5; /* Source address modulo */ 129 u32 ssize:3; /* Source data transfer size */ 130 u32 dmod:5; /* Destination address modulo */ 131 u32 dsize:3; /* Destination data transfer size */ 132 u32 soff:16; /* Signed source address offset */ 133 134 /* 0x08 */ 135 u32 nbytes; /* Inner "minor" byte count */ 136 u32 slast; /* Last source address adjustment */ 137 u32 daddr; /* Destination address */ 138 139 /* 0x14 */ 140 u32 citer_elink:1; /* Enable channel-to-channel linking on 141 * minor loop complete 142 */ 143 u32 citer_linkch:6; /* Link channel for minor loop complete */ 144 u32 citer:9; /* Current "major" iteration count */ 145 u32 doff:16; /* Signed destination address offset */ 146 147 /* 0x18 */ 148 u32 dlast_sga; /* Last Destination address adjustment/scatter 149 * gather address 150 */ 151 152 /* 0x1c */ 153 u32 biter_elink:1; /* Enable channel-to-channel linking on major 154 * loop complete 155 */ 156 u32 biter_linkch:6; 157 u32 biter:9; /* Beginning "major" iteration count */ 158 u32 bwc:2; /* Bandwidth control */ 159 u32 major_linkch:6; /* Link channel number */ 160 u32 done:1; /* Channel done */ 161 u32 active:1; /* Channel active */ 162 u32 major_elink:1; /* Enable channel-to-channel linking on major 163 * loop complete 164 */ 165 u32 e_sg:1; /* Enable scatter/gather processing */ 166 u32 d_req:1; /* Disable request */ 167 u32 int_half:1; /* Enable an interrupt when major counter is 168 * half complete 169 */ 170 u32 int_maj:1; /* Enable an interrupt when major iteration 171 * count completes 172 */ 173 u32 start:1; /* Channel start */ 174 }; 175 176 struct mpc_dma_desc { 177 struct dma_async_tx_descriptor desc; 178 struct mpc_dma_tcd *tcd; 179 dma_addr_t tcd_paddr; 180 int error; 181 struct list_head node; 182 }; 183 184 struct mpc_dma_chan { 185 struct dma_chan chan; 186 struct list_head free; 187 struct list_head prepared; 188 struct list_head queued; 189 struct list_head active; 190 struct list_head completed; 191 struct mpc_dma_tcd *tcd; 192 dma_addr_t tcd_paddr; 193 194 /* Lock for this structure */ 195 spinlock_t lock; 196 }; 197 198 struct mpc_dma { 199 struct dma_device dma; 200 struct tasklet_struct tasklet; 201 struct mpc_dma_chan channels[MPC_DMA_CHANNELS]; 202 struct mpc_dma_regs __iomem *regs; 203 struct mpc_dma_tcd __iomem *tcd; 204 int irq; 205 int irq2; 206 uint error_status; 207 int is_mpc8308; 208 209 /* Lock for error_status field in this structure */ 210 spinlock_t error_status_lock; 211 }; 212 213 #define DRV_NAME "mpc512x_dma" 214 215 /* Convert struct dma_chan to struct mpc_dma_chan */ 216 static inline struct mpc_dma_chan *dma_chan_to_mpc_dma_chan(struct dma_chan *c) 217 { 218 return container_of(c, struct mpc_dma_chan, chan); 219 } 220 221 /* Convert struct dma_chan to struct mpc_dma */ 222 static inline struct mpc_dma *dma_chan_to_mpc_dma(struct dma_chan *c) 223 { 224 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(c); 225 return container_of(mchan, struct mpc_dma, channels[c->chan_id]); 226 } 227 228 /* 229 * Execute all queued DMA descriptors. 230 * 231 * Following requirements must be met while calling mpc_dma_execute(): 232 * a) mchan->lock is acquired, 233 * b) mchan->active list is empty, 234 * c) mchan->queued list contains at least one entry. 235 */ 236 static void mpc_dma_execute(struct mpc_dma_chan *mchan) 237 { 238 struct mpc_dma *mdma = dma_chan_to_mpc_dma(&mchan->chan); 239 struct mpc_dma_desc *first = NULL; 240 struct mpc_dma_desc *prev = NULL; 241 struct mpc_dma_desc *mdesc; 242 int cid = mchan->chan.chan_id; 243 244 /* Move all queued descriptors to active list */ 245 list_splice_tail_init(&mchan->queued, &mchan->active); 246 247 /* Chain descriptors into one transaction */ 248 list_for_each_entry(mdesc, &mchan->active, node) { 249 if (!first) 250 first = mdesc; 251 252 if (!prev) { 253 prev = mdesc; 254 continue; 255 } 256 257 prev->tcd->dlast_sga = mdesc->tcd_paddr; 258 prev->tcd->e_sg = 1; 259 mdesc->tcd->start = 1; 260 261 prev = mdesc; 262 } 263 264 prev->tcd->int_maj = 1; 265 266 /* Send first descriptor in chain into hardware */ 267 memcpy_toio(&mdma->tcd[cid], first->tcd, sizeof(struct mpc_dma_tcd)); 268 269 if (first != prev) 270 mdma->tcd[cid].e_sg = 1; 271 out_8(&mdma->regs->dmassrt, cid); 272 } 273 274 /* Handle interrupt on one half of DMA controller (32 channels) */ 275 static void mpc_dma_irq_process(struct mpc_dma *mdma, u32 is, u32 es, int off) 276 { 277 struct mpc_dma_chan *mchan; 278 struct mpc_dma_desc *mdesc; 279 u32 status = is | es; 280 int ch; 281 282 while ((ch = fls(status) - 1) >= 0) { 283 status &= ~(1 << ch); 284 mchan = &mdma->channels[ch + off]; 285 286 spin_lock(&mchan->lock); 287 288 out_8(&mdma->regs->dmacint, ch + off); 289 out_8(&mdma->regs->dmacerr, ch + off); 290 291 /* Check error status */ 292 if (es & (1 << ch)) 293 list_for_each_entry(mdesc, &mchan->active, node) 294 mdesc->error = -EIO; 295 296 /* Execute queued descriptors */ 297 list_splice_tail_init(&mchan->active, &mchan->completed); 298 if (!list_empty(&mchan->queued)) 299 mpc_dma_execute(mchan); 300 301 spin_unlock(&mchan->lock); 302 } 303 } 304 305 /* Interrupt handler */ 306 static irqreturn_t mpc_dma_irq(int irq, void *data) 307 { 308 struct mpc_dma *mdma = data; 309 uint es; 310 311 /* Save error status register */ 312 es = in_be32(&mdma->regs->dmaes); 313 spin_lock(&mdma->error_status_lock); 314 if ((es & MPC_DMA_DMAES_VLD) && mdma->error_status == 0) 315 mdma->error_status = es; 316 spin_unlock(&mdma->error_status_lock); 317 318 /* Handle interrupt on each channel */ 319 if (mdma->dma.chancnt > 32) { 320 mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmainth), 321 in_be32(&mdma->regs->dmaerrh), 32); 322 } 323 mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmaintl), 324 in_be32(&mdma->regs->dmaerrl), 0); 325 326 /* Schedule tasklet */ 327 tasklet_schedule(&mdma->tasklet); 328 329 return IRQ_HANDLED; 330 } 331 332 /* process completed descriptors */ 333 static void mpc_dma_process_completed(struct mpc_dma *mdma) 334 { 335 dma_cookie_t last_cookie = 0; 336 struct mpc_dma_chan *mchan; 337 struct mpc_dma_desc *mdesc; 338 struct dma_async_tx_descriptor *desc; 339 unsigned long flags; 340 LIST_HEAD(list); 341 int i; 342 343 for (i = 0; i < mdma->dma.chancnt; i++) { 344 mchan = &mdma->channels[i]; 345 346 /* Get all completed descriptors */ 347 spin_lock_irqsave(&mchan->lock, flags); 348 if (!list_empty(&mchan->completed)) 349 list_splice_tail_init(&mchan->completed, &list); 350 spin_unlock_irqrestore(&mchan->lock, flags); 351 352 if (list_empty(&list)) 353 continue; 354 355 /* Execute callbacks and run dependencies */ 356 list_for_each_entry(mdesc, &list, node) { 357 desc = &mdesc->desc; 358 359 if (desc->callback) 360 desc->callback(desc->callback_param); 361 362 last_cookie = desc->cookie; 363 dma_run_dependencies(desc); 364 } 365 366 /* Free descriptors */ 367 spin_lock_irqsave(&mchan->lock, flags); 368 list_splice_tail_init(&list, &mchan->free); 369 mchan->chan.completed_cookie = last_cookie; 370 spin_unlock_irqrestore(&mchan->lock, flags); 371 } 372 } 373 374 /* DMA Tasklet */ 375 static void mpc_dma_tasklet(unsigned long data) 376 { 377 struct mpc_dma *mdma = (void *)data; 378 unsigned long flags; 379 uint es; 380 381 spin_lock_irqsave(&mdma->error_status_lock, flags); 382 es = mdma->error_status; 383 mdma->error_status = 0; 384 spin_unlock_irqrestore(&mdma->error_status_lock, flags); 385 386 /* Print nice error report */ 387 if (es) { 388 dev_err(mdma->dma.dev, 389 "Hardware reported following error(s) on channel %u:\n", 390 MPC_DMA_DMAES_ERRCHN(es)); 391 392 if (es & MPC_DMA_DMAES_GPE) 393 dev_err(mdma->dma.dev, "- Group Priority Error\n"); 394 if (es & MPC_DMA_DMAES_CPE) 395 dev_err(mdma->dma.dev, "- Channel Priority Error\n"); 396 if (es & MPC_DMA_DMAES_SAE) 397 dev_err(mdma->dma.dev, "- Source Address Error\n"); 398 if (es & MPC_DMA_DMAES_SOE) 399 dev_err(mdma->dma.dev, "- Source Offset" 400 " Configuration Error\n"); 401 if (es & MPC_DMA_DMAES_DAE) 402 dev_err(mdma->dma.dev, "- Destination Address" 403 " Error\n"); 404 if (es & MPC_DMA_DMAES_DOE) 405 dev_err(mdma->dma.dev, "- Destination Offset" 406 " Configuration Error\n"); 407 if (es & MPC_DMA_DMAES_NCE) 408 dev_err(mdma->dma.dev, "- NBytes/Citter" 409 " Configuration Error\n"); 410 if (es & MPC_DMA_DMAES_SGE) 411 dev_err(mdma->dma.dev, "- Scatter/Gather" 412 " Configuration Error\n"); 413 if (es & MPC_DMA_DMAES_SBE) 414 dev_err(mdma->dma.dev, "- Source Bus Error\n"); 415 if (es & MPC_DMA_DMAES_DBE) 416 dev_err(mdma->dma.dev, "- Destination Bus Error\n"); 417 } 418 419 mpc_dma_process_completed(mdma); 420 } 421 422 /* Submit descriptor to hardware */ 423 static dma_cookie_t mpc_dma_tx_submit(struct dma_async_tx_descriptor *txd) 424 { 425 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(txd->chan); 426 struct mpc_dma_desc *mdesc; 427 unsigned long flags; 428 dma_cookie_t cookie; 429 430 mdesc = container_of(txd, struct mpc_dma_desc, desc); 431 432 spin_lock_irqsave(&mchan->lock, flags); 433 434 /* Move descriptor to queue */ 435 list_move_tail(&mdesc->node, &mchan->queued); 436 437 /* If channel is idle, execute all queued descriptors */ 438 if (list_empty(&mchan->active)) 439 mpc_dma_execute(mchan); 440 441 /* Update cookie */ 442 cookie = dma_cookie_assign(txd); 443 spin_unlock_irqrestore(&mchan->lock, flags); 444 445 return cookie; 446 } 447 448 /* Alloc channel resources */ 449 static int mpc_dma_alloc_chan_resources(struct dma_chan *chan) 450 { 451 struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan); 452 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); 453 struct mpc_dma_desc *mdesc; 454 struct mpc_dma_tcd *tcd; 455 dma_addr_t tcd_paddr; 456 unsigned long flags; 457 LIST_HEAD(descs); 458 int i; 459 460 /* Alloc DMA memory for Transfer Control Descriptors */ 461 tcd = dma_alloc_coherent(mdma->dma.dev, 462 MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd), 463 &tcd_paddr, GFP_KERNEL); 464 if (!tcd) 465 return -ENOMEM; 466 467 /* Alloc descriptors for this channel */ 468 for (i = 0; i < MPC_DMA_DESCRIPTORS; i++) { 469 mdesc = kzalloc(sizeof(struct mpc_dma_desc), GFP_KERNEL); 470 if (!mdesc) { 471 dev_notice(mdma->dma.dev, "Memory allocation error. " 472 "Allocated only %u descriptors\n", i); 473 break; 474 } 475 476 dma_async_tx_descriptor_init(&mdesc->desc, chan); 477 mdesc->desc.flags = DMA_CTRL_ACK; 478 mdesc->desc.tx_submit = mpc_dma_tx_submit; 479 480 mdesc->tcd = &tcd[i]; 481 mdesc->tcd_paddr = tcd_paddr + (i * sizeof(struct mpc_dma_tcd)); 482 483 list_add_tail(&mdesc->node, &descs); 484 } 485 486 /* Return error only if no descriptors were allocated */ 487 if (i == 0) { 488 dma_free_coherent(mdma->dma.dev, 489 MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd), 490 tcd, tcd_paddr); 491 return -ENOMEM; 492 } 493 494 spin_lock_irqsave(&mchan->lock, flags); 495 mchan->tcd = tcd; 496 mchan->tcd_paddr = tcd_paddr; 497 list_splice_tail_init(&descs, &mchan->free); 498 spin_unlock_irqrestore(&mchan->lock, flags); 499 500 /* Enable Error Interrupt */ 501 out_8(&mdma->regs->dmaseei, chan->chan_id); 502 503 return 0; 504 } 505 506 /* Free channel resources */ 507 static void mpc_dma_free_chan_resources(struct dma_chan *chan) 508 { 509 struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan); 510 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); 511 struct mpc_dma_desc *mdesc, *tmp; 512 struct mpc_dma_tcd *tcd; 513 dma_addr_t tcd_paddr; 514 unsigned long flags; 515 LIST_HEAD(descs); 516 517 spin_lock_irqsave(&mchan->lock, flags); 518 519 /* Channel must be idle */ 520 BUG_ON(!list_empty(&mchan->prepared)); 521 BUG_ON(!list_empty(&mchan->queued)); 522 BUG_ON(!list_empty(&mchan->active)); 523 BUG_ON(!list_empty(&mchan->completed)); 524 525 /* Move data */ 526 list_splice_tail_init(&mchan->free, &descs); 527 tcd = mchan->tcd; 528 tcd_paddr = mchan->tcd_paddr; 529 530 spin_unlock_irqrestore(&mchan->lock, flags); 531 532 /* Free DMA memory used by descriptors */ 533 dma_free_coherent(mdma->dma.dev, 534 MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd), 535 tcd, tcd_paddr); 536 537 /* Free descriptors */ 538 list_for_each_entry_safe(mdesc, tmp, &descs, node) 539 kfree(mdesc); 540 541 /* Disable Error Interrupt */ 542 out_8(&mdma->regs->dmaceei, chan->chan_id); 543 } 544 545 /* Send all pending descriptor to hardware */ 546 static void mpc_dma_issue_pending(struct dma_chan *chan) 547 { 548 /* 549 * We are posting descriptors to the hardware as soon as 550 * they are ready, so this function does nothing. 551 */ 552 } 553 554 /* Check request completion status */ 555 static enum dma_status 556 mpc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, 557 struct dma_tx_state *txstate) 558 { 559 return dma_cookie_status(chan, cookie, txstate); 560 } 561 562 /* Prepare descriptor for memory to memory copy */ 563 static struct dma_async_tx_descriptor * 564 mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, 565 size_t len, unsigned long flags) 566 { 567 struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan); 568 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); 569 struct mpc_dma_desc *mdesc = NULL; 570 struct mpc_dma_tcd *tcd; 571 unsigned long iflags; 572 573 /* Get free descriptor */ 574 spin_lock_irqsave(&mchan->lock, iflags); 575 if (!list_empty(&mchan->free)) { 576 mdesc = list_first_entry(&mchan->free, struct mpc_dma_desc, 577 node); 578 list_del(&mdesc->node); 579 } 580 spin_unlock_irqrestore(&mchan->lock, iflags); 581 582 if (!mdesc) { 583 /* try to free completed descriptors */ 584 mpc_dma_process_completed(mdma); 585 return NULL; 586 } 587 588 mdesc->error = 0; 589 tcd = mdesc->tcd; 590 591 /* Prepare Transfer Control Descriptor for this transaction */ 592 memset(tcd, 0, sizeof(struct mpc_dma_tcd)); 593 594 if (IS_ALIGNED(src | dst | len, 32)) { 595 tcd->ssize = MPC_DMA_TSIZE_32; 596 tcd->dsize = MPC_DMA_TSIZE_32; 597 tcd->soff = 32; 598 tcd->doff = 32; 599 } else if (!mdma->is_mpc8308 && IS_ALIGNED(src | dst | len, 16)) { 600 /* MPC8308 doesn't support 16 byte transfers */ 601 tcd->ssize = MPC_DMA_TSIZE_16; 602 tcd->dsize = MPC_DMA_TSIZE_16; 603 tcd->soff = 16; 604 tcd->doff = 16; 605 } else if (IS_ALIGNED(src | dst | len, 4)) { 606 tcd->ssize = MPC_DMA_TSIZE_4; 607 tcd->dsize = MPC_DMA_TSIZE_4; 608 tcd->soff = 4; 609 tcd->doff = 4; 610 } else if (IS_ALIGNED(src | dst | len, 2)) { 611 tcd->ssize = MPC_DMA_TSIZE_2; 612 tcd->dsize = MPC_DMA_TSIZE_2; 613 tcd->soff = 2; 614 tcd->doff = 2; 615 } else { 616 tcd->ssize = MPC_DMA_TSIZE_1; 617 tcd->dsize = MPC_DMA_TSIZE_1; 618 tcd->soff = 1; 619 tcd->doff = 1; 620 } 621 622 tcd->saddr = src; 623 tcd->daddr = dst; 624 tcd->nbytes = len; 625 tcd->biter = 1; 626 tcd->citer = 1; 627 628 /* Place descriptor in prepared list */ 629 spin_lock_irqsave(&mchan->lock, iflags); 630 list_add_tail(&mdesc->node, &mchan->prepared); 631 spin_unlock_irqrestore(&mchan->lock, iflags); 632 633 return &mdesc->desc; 634 } 635 636 static int mpc_dma_probe(struct platform_device *op) 637 { 638 struct device_node *dn = op->dev.of_node; 639 struct device *dev = &op->dev; 640 struct dma_device *dma; 641 struct mpc_dma *mdma; 642 struct mpc_dma_chan *mchan; 643 struct resource res; 644 ulong regs_start, regs_size; 645 int retval, i; 646 647 mdma = devm_kzalloc(dev, sizeof(struct mpc_dma), GFP_KERNEL); 648 if (!mdma) { 649 dev_err(dev, "Memory exhausted!\n"); 650 return -ENOMEM; 651 } 652 653 mdma->irq = irq_of_parse_and_map(dn, 0); 654 if (mdma->irq == NO_IRQ) { 655 dev_err(dev, "Error mapping IRQ!\n"); 656 return -EINVAL; 657 } 658 659 if (of_device_is_compatible(dn, "fsl,mpc8308-dma")) { 660 mdma->is_mpc8308 = 1; 661 mdma->irq2 = irq_of_parse_and_map(dn, 1); 662 if (mdma->irq2 == NO_IRQ) { 663 dev_err(dev, "Error mapping IRQ!\n"); 664 return -EINVAL; 665 } 666 } 667 668 retval = of_address_to_resource(dn, 0, &res); 669 if (retval) { 670 dev_err(dev, "Error parsing memory region!\n"); 671 return retval; 672 } 673 674 regs_start = res.start; 675 regs_size = resource_size(&res); 676 677 if (!devm_request_mem_region(dev, regs_start, regs_size, DRV_NAME)) { 678 dev_err(dev, "Error requesting memory region!\n"); 679 return -EBUSY; 680 } 681 682 mdma->regs = devm_ioremap(dev, regs_start, regs_size); 683 if (!mdma->regs) { 684 dev_err(dev, "Error mapping memory region!\n"); 685 return -ENOMEM; 686 } 687 688 mdma->tcd = (struct mpc_dma_tcd *)((u8 *)(mdma->regs) 689 + MPC_DMA_TCD_OFFSET); 690 691 retval = devm_request_irq(dev, mdma->irq, &mpc_dma_irq, 0, DRV_NAME, 692 mdma); 693 if (retval) { 694 dev_err(dev, "Error requesting IRQ!\n"); 695 return -EINVAL; 696 } 697 698 if (mdma->is_mpc8308) { 699 retval = devm_request_irq(dev, mdma->irq2, &mpc_dma_irq, 0, 700 DRV_NAME, mdma); 701 if (retval) { 702 dev_err(dev, "Error requesting IRQ2!\n"); 703 return -EINVAL; 704 } 705 } 706 707 spin_lock_init(&mdma->error_status_lock); 708 709 dma = &mdma->dma; 710 dma->dev = dev; 711 if (!mdma->is_mpc8308) 712 dma->chancnt = MPC_DMA_CHANNELS; 713 else 714 dma->chancnt = 16; /* MPC8308 DMA has only 16 channels */ 715 dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources; 716 dma->device_free_chan_resources = mpc_dma_free_chan_resources; 717 dma->device_issue_pending = mpc_dma_issue_pending; 718 dma->device_tx_status = mpc_dma_tx_status; 719 dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy; 720 721 INIT_LIST_HEAD(&dma->channels); 722 dma_cap_set(DMA_MEMCPY, dma->cap_mask); 723 724 for (i = 0; i < dma->chancnt; i++) { 725 mchan = &mdma->channels[i]; 726 727 mchan->chan.device = dma; 728 dma_cookie_init(&mchan->chan); 729 730 INIT_LIST_HEAD(&mchan->free); 731 INIT_LIST_HEAD(&mchan->prepared); 732 INIT_LIST_HEAD(&mchan->queued); 733 INIT_LIST_HEAD(&mchan->active); 734 INIT_LIST_HEAD(&mchan->completed); 735 736 spin_lock_init(&mchan->lock); 737 list_add_tail(&mchan->chan.device_node, &dma->channels); 738 } 739 740 tasklet_init(&mdma->tasklet, mpc_dma_tasklet, (unsigned long)mdma); 741 742 /* 743 * Configure DMA Engine: 744 * - Dynamic clock, 745 * - Round-robin group arbitration, 746 * - Round-robin channel arbitration. 747 */ 748 if (!mdma->is_mpc8308) { 749 out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG | 750 MPC_DMA_DMACR_ERGA | MPC_DMA_DMACR_ERCA); 751 752 /* Disable hardware DMA requests */ 753 out_be32(&mdma->regs->dmaerqh, 0); 754 out_be32(&mdma->regs->dmaerql, 0); 755 756 /* Disable error interrupts */ 757 out_be32(&mdma->regs->dmaeeih, 0); 758 out_be32(&mdma->regs->dmaeeil, 0); 759 760 /* Clear interrupts status */ 761 out_be32(&mdma->regs->dmainth, 0xFFFFFFFF); 762 out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF); 763 out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF); 764 out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF); 765 766 /* Route interrupts to IPIC */ 767 out_be32(&mdma->regs->dmaihsa, 0); 768 out_be32(&mdma->regs->dmailsa, 0); 769 } else { 770 /* MPC8308 has 16 channels and lacks some registers */ 771 out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_ERCA); 772 773 /* enable snooping */ 774 out_be32(&mdma->regs->dmagpor, MPC_DMA_DMAGPOR_SNOOP_ENABLE); 775 /* Disable error interrupts */ 776 out_be32(&mdma->regs->dmaeeil, 0); 777 778 /* Clear interrupts status */ 779 out_be32(&mdma->regs->dmaintl, 0xFFFF); 780 out_be32(&mdma->regs->dmaerrl, 0xFFFF); 781 } 782 783 /* Register DMA engine */ 784 dev_set_drvdata(dev, mdma); 785 retval = dma_async_device_register(dma); 786 if (retval) { 787 devm_free_irq(dev, mdma->irq, mdma); 788 irq_dispose_mapping(mdma->irq); 789 } 790 791 return retval; 792 } 793 794 static int mpc_dma_remove(struct platform_device *op) 795 { 796 struct device *dev = &op->dev; 797 struct mpc_dma *mdma = dev_get_drvdata(dev); 798 799 dma_async_device_unregister(&mdma->dma); 800 devm_free_irq(dev, mdma->irq, mdma); 801 irq_dispose_mapping(mdma->irq); 802 803 return 0; 804 } 805 806 static struct of_device_id mpc_dma_match[] = { 807 { .compatible = "fsl,mpc5121-dma", }, 808 {}, 809 }; 810 811 static struct platform_driver mpc_dma_driver = { 812 .probe = mpc_dma_probe, 813 .remove = mpc_dma_remove, 814 .driver = { 815 .name = DRV_NAME, 816 .owner = THIS_MODULE, 817 .of_match_table = mpc_dma_match, 818 }, 819 }; 820 821 module_platform_driver(mpc_dma_driver); 822 823 MODULE_LICENSE("GPL"); 824 MODULE_AUTHOR("Piotr Ziecik <kosmo@semihalf.com>"); 825