1 /* 2 * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008. 3 * Copyright (C) Semihalf 2009 4 * Copyright (C) Ilya Yanok, Emcraft Systems 2010 5 * Copyright (C) Alexander Popov, Promcontroller 2014 6 * Copyright (C) Mario Six, Guntermann & Drunck GmbH, 2016 7 * 8 * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description 9 * (defines, structures and comments) was taken from MPC5121 DMA driver 10 * written by Hongjun Chen <hong-jun.chen@freescale.com>. 11 * 12 * Approved as OSADL project by a majority of OSADL members and funded 13 * by OSADL membership fees in 2009; for details see www.osadl.org. 14 * 15 * This program is free software; you can redistribute it and/or modify it 16 * under the terms of the GNU General Public License as published by the Free 17 * Software Foundation; either version 2 of the License, or (at your option) 18 * any later version. 19 * 20 * This program is distributed in the hope that it will be useful, but WITHOUT 21 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 22 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 23 * more details. 24 * 25 * The full GNU General Public License is included in this distribution in the 26 * file called COPYING. 27 */ 28 29 /* 30 * MPC512x and MPC8308 DMA driver. It supports memory to memory data transfers 31 * (tested using dmatest module) and data transfers between memory and 32 * peripheral I/O memory by means of slave scatter/gather with these 33 * limitations: 34 * - chunked transfers (described by s/g lists with more than one item) are 35 * refused as long as proper support for scatter/gather is missing 36 * - transfers on MPC8308 always start from software as this SoC does not have 37 * external request lines for peripheral flow control 38 * - memory <-> I/O memory transfer chunks of sizes of 1, 2, 4, 16 (for 39 * MPC512x), and 32 bytes are supported, and, consequently, source 40 * addresses and destination addresses must be aligned accordingly; 41 * furthermore, for MPC512x SoCs, the transfer size must be aligned on 42 * (chunk size * maxburst) 43 */ 44 45 #include <linux/module.h> 46 #include <linux/dmaengine.h> 47 #include <linux/dma-mapping.h> 48 #include <linux/interrupt.h> 49 #include <linux/io.h> 50 #include <linux/slab.h> 51 #include <linux/of_address.h> 52 #include <linux/of_device.h> 53 #include <linux/of_irq.h> 54 #include <linux/of_dma.h> 55 #include <linux/of_platform.h> 56 57 #include <linux/random.h> 58 59 #include "dmaengine.h" 60 61 /* Number of DMA Transfer descriptors allocated per channel */ 62 #define MPC_DMA_DESCRIPTORS 64 63 64 /* Macro definitions */ 65 #define MPC_DMA_TCD_OFFSET 0x1000 66 67 /* 68 * Maximum channel counts for individual hardware variants 69 * and the maximum channel count over all supported controllers, 70 * used for data structure size 71 */ 72 #define MPC8308_DMACHAN_MAX 16 73 #define MPC512x_DMACHAN_MAX 64 74 #define MPC_DMA_CHANNELS 64 75 76 /* Arbitration mode of group and channel */ 77 #define MPC_DMA_DMACR_EDCG (1 << 31) 78 #define MPC_DMA_DMACR_ERGA (1 << 3) 79 #define MPC_DMA_DMACR_ERCA (1 << 2) 80 81 /* Error codes */ 82 #define MPC_DMA_DMAES_VLD (1 << 31) 83 #define MPC_DMA_DMAES_GPE (1 << 15) 84 #define MPC_DMA_DMAES_CPE (1 << 14) 85 #define MPC_DMA_DMAES_ERRCHN(err) \ 86 (((err) >> 8) & 0x3f) 87 #define MPC_DMA_DMAES_SAE (1 << 7) 88 #define MPC_DMA_DMAES_SOE (1 << 6) 89 #define MPC_DMA_DMAES_DAE (1 << 5) 90 #define MPC_DMA_DMAES_DOE (1 << 4) 91 #define MPC_DMA_DMAES_NCE (1 << 3) 92 #define MPC_DMA_DMAES_SGE (1 << 2) 93 #define MPC_DMA_DMAES_SBE (1 << 1) 94 #define MPC_DMA_DMAES_DBE (1 << 0) 95 96 #define MPC_DMA_DMAGPOR_SNOOP_ENABLE (1 << 6) 97 98 #define MPC_DMA_TSIZE_1 0x00 99 #define MPC_DMA_TSIZE_2 0x01 100 #define MPC_DMA_TSIZE_4 0x02 101 #define MPC_DMA_TSIZE_16 0x04 102 #define MPC_DMA_TSIZE_32 0x05 103 104 /* MPC5121 DMA engine registers */ 105 struct __attribute__ ((__packed__)) mpc_dma_regs { 106 /* 0x00 */ 107 u32 dmacr; /* DMA control register */ 108 u32 dmaes; /* DMA error status */ 109 /* 0x08 */ 110 u32 dmaerqh; /* DMA enable request high(channels 63~32) */ 111 u32 dmaerql; /* DMA enable request low(channels 31~0) */ 112 u32 dmaeeih; /* DMA enable error interrupt high(ch63~32) */ 113 u32 dmaeeil; /* DMA enable error interrupt low(ch31~0) */ 114 /* 0x18 */ 115 u8 dmaserq; /* DMA set enable request */ 116 u8 dmacerq; /* DMA clear enable request */ 117 u8 dmaseei; /* DMA set enable error interrupt */ 118 u8 dmaceei; /* DMA clear enable error interrupt */ 119 /* 0x1c */ 120 u8 dmacint; /* DMA clear interrupt request */ 121 u8 dmacerr; /* DMA clear error */ 122 u8 dmassrt; /* DMA set start bit */ 123 u8 dmacdne; /* DMA clear DONE status bit */ 124 /* 0x20 */ 125 u32 dmainth; /* DMA interrupt request high(ch63~32) */ 126 u32 dmaintl; /* DMA interrupt request low(ch31~0) */ 127 u32 dmaerrh; /* DMA error high(ch63~32) */ 128 u32 dmaerrl; /* DMA error low(ch31~0) */ 129 /* 0x30 */ 130 u32 dmahrsh; /* DMA hw request status high(ch63~32) */ 131 u32 dmahrsl; /* DMA hardware request status low(ch31~0) */ 132 union { 133 u32 dmaihsa; /* DMA interrupt high select AXE(ch63~32) */ 134 u32 dmagpor; /* (General purpose register on MPC8308) */ 135 }; 136 u32 dmailsa; /* DMA interrupt low select AXE(ch31~0) */ 137 /* 0x40 ~ 0xff */ 138 u32 reserve0[48]; /* Reserved */ 139 /* 0x100 */ 140 u8 dchpri[MPC_DMA_CHANNELS]; 141 /* DMA channels(0~63) priority */ 142 }; 143 144 struct __attribute__ ((__packed__)) mpc_dma_tcd { 145 /* 0x00 */ 146 u32 saddr; /* Source address */ 147 148 u32 smod:5; /* Source address modulo */ 149 u32 ssize:3; /* Source data transfer size */ 150 u32 dmod:5; /* Destination address modulo */ 151 u32 dsize:3; /* Destination data transfer size */ 152 u32 soff:16; /* Signed source address offset */ 153 154 /* 0x08 */ 155 u32 nbytes; /* Inner "minor" byte count */ 156 u32 slast; /* Last source address adjustment */ 157 u32 daddr; /* Destination address */ 158 159 /* 0x14 */ 160 u32 citer_elink:1; /* Enable channel-to-channel linking on 161 * minor loop complete 162 */ 163 u32 citer_linkch:6; /* Link channel for minor loop complete */ 164 u32 citer:9; /* Current "major" iteration count */ 165 u32 doff:16; /* Signed destination address offset */ 166 167 /* 0x18 */ 168 u32 dlast_sga; /* Last Destination address adjustment/scatter 169 * gather address 170 */ 171 172 /* 0x1c */ 173 u32 biter_elink:1; /* Enable channel-to-channel linking on major 174 * loop complete 175 */ 176 u32 biter_linkch:6; 177 u32 biter:9; /* Beginning "major" iteration count */ 178 u32 bwc:2; /* Bandwidth control */ 179 u32 major_linkch:6; /* Link channel number */ 180 u32 done:1; /* Channel done */ 181 u32 active:1; /* Channel active */ 182 u32 major_elink:1; /* Enable channel-to-channel linking on major 183 * loop complete 184 */ 185 u32 e_sg:1; /* Enable scatter/gather processing */ 186 u32 d_req:1; /* Disable request */ 187 u32 int_half:1; /* Enable an interrupt when major counter is 188 * half complete 189 */ 190 u32 int_maj:1; /* Enable an interrupt when major iteration 191 * count completes 192 */ 193 u32 start:1; /* Channel start */ 194 }; 195 196 struct mpc_dma_desc { 197 struct dma_async_tx_descriptor desc; 198 struct mpc_dma_tcd *tcd; 199 dma_addr_t tcd_paddr; 200 int error; 201 struct list_head node; 202 int will_access_peripheral; 203 }; 204 205 struct mpc_dma_chan { 206 struct dma_chan chan; 207 struct list_head free; 208 struct list_head prepared; 209 struct list_head queued; 210 struct list_head active; 211 struct list_head completed; 212 struct mpc_dma_tcd *tcd; 213 dma_addr_t tcd_paddr; 214 215 /* Settings for access to peripheral FIFO */ 216 dma_addr_t src_per_paddr; 217 u32 src_tcd_nunits; 218 u8 swidth; 219 dma_addr_t dst_per_paddr; 220 u32 dst_tcd_nunits; 221 u8 dwidth; 222 223 /* Lock for this structure */ 224 spinlock_t lock; 225 }; 226 227 struct mpc_dma { 228 struct dma_device dma; 229 struct tasklet_struct tasklet; 230 struct mpc_dma_chan channels[MPC_DMA_CHANNELS]; 231 struct mpc_dma_regs __iomem *regs; 232 struct mpc_dma_tcd __iomem *tcd; 233 int irq; 234 int irq2; 235 uint error_status; 236 int is_mpc8308; 237 238 /* Lock for error_status field in this structure */ 239 spinlock_t error_status_lock; 240 }; 241 242 #define DRV_NAME "mpc512x_dma" 243 244 /* Convert struct dma_chan to struct mpc_dma_chan */ 245 static inline struct mpc_dma_chan *dma_chan_to_mpc_dma_chan(struct dma_chan *c) 246 { 247 return container_of(c, struct mpc_dma_chan, chan); 248 } 249 250 /* Convert struct dma_chan to struct mpc_dma */ 251 static inline struct mpc_dma *dma_chan_to_mpc_dma(struct dma_chan *c) 252 { 253 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(c); 254 return container_of(mchan, struct mpc_dma, channels[c->chan_id]); 255 } 256 257 /* 258 * Execute all queued DMA descriptors. 259 * 260 * Following requirements must be met while calling mpc_dma_execute(): 261 * a) mchan->lock is acquired, 262 * b) mchan->active list is empty, 263 * c) mchan->queued list contains at least one entry. 264 */ 265 static void mpc_dma_execute(struct mpc_dma_chan *mchan) 266 { 267 struct mpc_dma *mdma = dma_chan_to_mpc_dma(&mchan->chan); 268 struct mpc_dma_desc *first = NULL; 269 struct mpc_dma_desc *prev = NULL; 270 struct mpc_dma_desc *mdesc; 271 int cid = mchan->chan.chan_id; 272 273 while (!list_empty(&mchan->queued)) { 274 mdesc = list_first_entry(&mchan->queued, 275 struct mpc_dma_desc, node); 276 /* 277 * Grab either several mem-to-mem transfer descriptors 278 * or one peripheral transfer descriptor, 279 * don't mix mem-to-mem and peripheral transfer descriptors 280 * within the same 'active' list. 281 */ 282 if (mdesc->will_access_peripheral) { 283 if (list_empty(&mchan->active)) 284 list_move_tail(&mdesc->node, &mchan->active); 285 break; 286 } else { 287 list_move_tail(&mdesc->node, &mchan->active); 288 } 289 } 290 291 /* Chain descriptors into one transaction */ 292 list_for_each_entry(mdesc, &mchan->active, node) { 293 if (!first) 294 first = mdesc; 295 296 if (!prev) { 297 prev = mdesc; 298 continue; 299 } 300 301 prev->tcd->dlast_sga = mdesc->tcd_paddr; 302 prev->tcd->e_sg = 1; 303 mdesc->tcd->start = 1; 304 305 prev = mdesc; 306 } 307 308 prev->tcd->int_maj = 1; 309 310 /* Send first descriptor in chain into hardware */ 311 memcpy_toio(&mdma->tcd[cid], first->tcd, sizeof(struct mpc_dma_tcd)); 312 313 if (first != prev) 314 mdma->tcd[cid].e_sg = 1; 315 316 if (mdma->is_mpc8308) { 317 /* MPC8308, no request lines, software initiated start */ 318 out_8(&mdma->regs->dmassrt, cid); 319 } else if (first->will_access_peripheral) { 320 /* Peripherals involved, start by external request signal */ 321 out_8(&mdma->regs->dmaserq, cid); 322 } else { 323 /* Memory to memory transfer, software initiated start */ 324 out_8(&mdma->regs->dmassrt, cid); 325 } 326 } 327 328 /* Handle interrupt on one half of DMA controller (32 channels) */ 329 static void mpc_dma_irq_process(struct mpc_dma *mdma, u32 is, u32 es, int off) 330 { 331 struct mpc_dma_chan *mchan; 332 struct mpc_dma_desc *mdesc; 333 u32 status = is | es; 334 int ch; 335 336 while ((ch = fls(status) - 1) >= 0) { 337 status &= ~(1 << ch); 338 mchan = &mdma->channels[ch + off]; 339 340 spin_lock(&mchan->lock); 341 342 out_8(&mdma->regs->dmacint, ch + off); 343 out_8(&mdma->regs->dmacerr, ch + off); 344 345 /* Check error status */ 346 if (es & (1 << ch)) 347 list_for_each_entry(mdesc, &mchan->active, node) 348 mdesc->error = -EIO; 349 350 /* Execute queued descriptors */ 351 list_splice_tail_init(&mchan->active, &mchan->completed); 352 if (!list_empty(&mchan->queued)) 353 mpc_dma_execute(mchan); 354 355 spin_unlock(&mchan->lock); 356 } 357 } 358 359 /* Interrupt handler */ 360 static irqreturn_t mpc_dma_irq(int irq, void *data) 361 { 362 struct mpc_dma *mdma = data; 363 uint es; 364 365 /* Save error status register */ 366 es = in_be32(&mdma->regs->dmaes); 367 spin_lock(&mdma->error_status_lock); 368 if ((es & MPC_DMA_DMAES_VLD) && mdma->error_status == 0) 369 mdma->error_status = es; 370 spin_unlock(&mdma->error_status_lock); 371 372 /* Handle interrupt on each channel */ 373 if (mdma->dma.chancnt > 32) { 374 mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmainth), 375 in_be32(&mdma->regs->dmaerrh), 32); 376 } 377 mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmaintl), 378 in_be32(&mdma->regs->dmaerrl), 0); 379 380 /* Schedule tasklet */ 381 tasklet_schedule(&mdma->tasklet); 382 383 return IRQ_HANDLED; 384 } 385 386 /* process completed descriptors */ 387 static void mpc_dma_process_completed(struct mpc_dma *mdma) 388 { 389 dma_cookie_t last_cookie = 0; 390 struct mpc_dma_chan *mchan; 391 struct mpc_dma_desc *mdesc; 392 struct dma_async_tx_descriptor *desc; 393 unsigned long flags; 394 LIST_HEAD(list); 395 int i; 396 397 for (i = 0; i < mdma->dma.chancnt; i++) { 398 mchan = &mdma->channels[i]; 399 400 /* Get all completed descriptors */ 401 spin_lock_irqsave(&mchan->lock, flags); 402 if (!list_empty(&mchan->completed)) 403 list_splice_tail_init(&mchan->completed, &list); 404 spin_unlock_irqrestore(&mchan->lock, flags); 405 406 if (list_empty(&list)) 407 continue; 408 409 /* Execute callbacks and run dependencies */ 410 list_for_each_entry(mdesc, &list, node) { 411 desc = &mdesc->desc; 412 413 if (desc->callback) 414 desc->callback(desc->callback_param); 415 416 last_cookie = desc->cookie; 417 dma_run_dependencies(desc); 418 } 419 420 /* Free descriptors */ 421 spin_lock_irqsave(&mchan->lock, flags); 422 list_splice_tail_init(&list, &mchan->free); 423 mchan->chan.completed_cookie = last_cookie; 424 spin_unlock_irqrestore(&mchan->lock, flags); 425 } 426 } 427 428 /* DMA Tasklet */ 429 static void mpc_dma_tasklet(unsigned long data) 430 { 431 struct mpc_dma *mdma = (void *)data; 432 unsigned long flags; 433 uint es; 434 435 spin_lock_irqsave(&mdma->error_status_lock, flags); 436 es = mdma->error_status; 437 mdma->error_status = 0; 438 spin_unlock_irqrestore(&mdma->error_status_lock, flags); 439 440 /* Print nice error report */ 441 if (es) { 442 dev_err(mdma->dma.dev, 443 "Hardware reported following error(s) on channel %u:\n", 444 MPC_DMA_DMAES_ERRCHN(es)); 445 446 if (es & MPC_DMA_DMAES_GPE) 447 dev_err(mdma->dma.dev, "- Group Priority Error\n"); 448 if (es & MPC_DMA_DMAES_CPE) 449 dev_err(mdma->dma.dev, "- Channel Priority Error\n"); 450 if (es & MPC_DMA_DMAES_SAE) 451 dev_err(mdma->dma.dev, "- Source Address Error\n"); 452 if (es & MPC_DMA_DMAES_SOE) 453 dev_err(mdma->dma.dev, "- Source Offset" 454 " Configuration Error\n"); 455 if (es & MPC_DMA_DMAES_DAE) 456 dev_err(mdma->dma.dev, "- Destination Address" 457 " Error\n"); 458 if (es & MPC_DMA_DMAES_DOE) 459 dev_err(mdma->dma.dev, "- Destination Offset" 460 " Configuration Error\n"); 461 if (es & MPC_DMA_DMAES_NCE) 462 dev_err(mdma->dma.dev, "- NBytes/Citter" 463 " Configuration Error\n"); 464 if (es & MPC_DMA_DMAES_SGE) 465 dev_err(mdma->dma.dev, "- Scatter/Gather" 466 " Configuration Error\n"); 467 if (es & MPC_DMA_DMAES_SBE) 468 dev_err(mdma->dma.dev, "- Source Bus Error\n"); 469 if (es & MPC_DMA_DMAES_DBE) 470 dev_err(mdma->dma.dev, "- Destination Bus Error\n"); 471 } 472 473 mpc_dma_process_completed(mdma); 474 } 475 476 /* Submit descriptor to hardware */ 477 static dma_cookie_t mpc_dma_tx_submit(struct dma_async_tx_descriptor *txd) 478 { 479 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(txd->chan); 480 struct mpc_dma_desc *mdesc; 481 unsigned long flags; 482 dma_cookie_t cookie; 483 484 mdesc = container_of(txd, struct mpc_dma_desc, desc); 485 486 spin_lock_irqsave(&mchan->lock, flags); 487 488 /* Move descriptor to queue */ 489 list_move_tail(&mdesc->node, &mchan->queued); 490 491 /* If channel is idle, execute all queued descriptors */ 492 if (list_empty(&mchan->active)) 493 mpc_dma_execute(mchan); 494 495 /* Update cookie */ 496 cookie = dma_cookie_assign(txd); 497 spin_unlock_irqrestore(&mchan->lock, flags); 498 499 return cookie; 500 } 501 502 /* Alloc channel resources */ 503 static int mpc_dma_alloc_chan_resources(struct dma_chan *chan) 504 { 505 struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan); 506 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); 507 struct mpc_dma_desc *mdesc; 508 struct mpc_dma_tcd *tcd; 509 dma_addr_t tcd_paddr; 510 unsigned long flags; 511 LIST_HEAD(descs); 512 int i; 513 514 /* Alloc DMA memory for Transfer Control Descriptors */ 515 tcd = dma_alloc_coherent(mdma->dma.dev, 516 MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd), 517 &tcd_paddr, GFP_KERNEL); 518 if (!tcd) 519 return -ENOMEM; 520 521 /* Alloc descriptors for this channel */ 522 for (i = 0; i < MPC_DMA_DESCRIPTORS; i++) { 523 mdesc = kzalloc(sizeof(struct mpc_dma_desc), GFP_KERNEL); 524 if (!mdesc) { 525 dev_notice(mdma->dma.dev, "Memory allocation error. " 526 "Allocated only %u descriptors\n", i); 527 break; 528 } 529 530 dma_async_tx_descriptor_init(&mdesc->desc, chan); 531 mdesc->desc.flags = DMA_CTRL_ACK; 532 mdesc->desc.tx_submit = mpc_dma_tx_submit; 533 534 mdesc->tcd = &tcd[i]; 535 mdesc->tcd_paddr = tcd_paddr + (i * sizeof(struct mpc_dma_tcd)); 536 537 list_add_tail(&mdesc->node, &descs); 538 } 539 540 /* Return error only if no descriptors were allocated */ 541 if (i == 0) { 542 dma_free_coherent(mdma->dma.dev, 543 MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd), 544 tcd, tcd_paddr); 545 return -ENOMEM; 546 } 547 548 spin_lock_irqsave(&mchan->lock, flags); 549 mchan->tcd = tcd; 550 mchan->tcd_paddr = tcd_paddr; 551 list_splice_tail_init(&descs, &mchan->free); 552 spin_unlock_irqrestore(&mchan->lock, flags); 553 554 /* Enable Error Interrupt */ 555 out_8(&mdma->regs->dmaseei, chan->chan_id); 556 557 return 0; 558 } 559 560 /* Free channel resources */ 561 static void mpc_dma_free_chan_resources(struct dma_chan *chan) 562 { 563 struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan); 564 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); 565 struct mpc_dma_desc *mdesc, *tmp; 566 struct mpc_dma_tcd *tcd; 567 dma_addr_t tcd_paddr; 568 unsigned long flags; 569 LIST_HEAD(descs); 570 571 spin_lock_irqsave(&mchan->lock, flags); 572 573 /* Channel must be idle */ 574 BUG_ON(!list_empty(&mchan->prepared)); 575 BUG_ON(!list_empty(&mchan->queued)); 576 BUG_ON(!list_empty(&mchan->active)); 577 BUG_ON(!list_empty(&mchan->completed)); 578 579 /* Move data */ 580 list_splice_tail_init(&mchan->free, &descs); 581 tcd = mchan->tcd; 582 tcd_paddr = mchan->tcd_paddr; 583 584 spin_unlock_irqrestore(&mchan->lock, flags); 585 586 /* Free DMA memory used by descriptors */ 587 dma_free_coherent(mdma->dma.dev, 588 MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd), 589 tcd, tcd_paddr); 590 591 /* Free descriptors */ 592 list_for_each_entry_safe(mdesc, tmp, &descs, node) 593 kfree(mdesc); 594 595 /* Disable Error Interrupt */ 596 out_8(&mdma->regs->dmaceei, chan->chan_id); 597 } 598 599 /* Send all pending descriptor to hardware */ 600 static void mpc_dma_issue_pending(struct dma_chan *chan) 601 { 602 /* 603 * We are posting descriptors to the hardware as soon as 604 * they are ready, so this function does nothing. 605 */ 606 } 607 608 /* Check request completion status */ 609 static enum dma_status 610 mpc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, 611 struct dma_tx_state *txstate) 612 { 613 return dma_cookie_status(chan, cookie, txstate); 614 } 615 616 /* Prepare descriptor for memory to memory copy */ 617 static struct dma_async_tx_descriptor * 618 mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, 619 size_t len, unsigned long flags) 620 { 621 struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan); 622 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); 623 struct mpc_dma_desc *mdesc = NULL; 624 struct mpc_dma_tcd *tcd; 625 unsigned long iflags; 626 627 /* Get free descriptor */ 628 spin_lock_irqsave(&mchan->lock, iflags); 629 if (!list_empty(&mchan->free)) { 630 mdesc = list_first_entry(&mchan->free, struct mpc_dma_desc, 631 node); 632 list_del(&mdesc->node); 633 } 634 spin_unlock_irqrestore(&mchan->lock, iflags); 635 636 if (!mdesc) { 637 /* try to free completed descriptors */ 638 mpc_dma_process_completed(mdma); 639 return NULL; 640 } 641 642 mdesc->error = 0; 643 mdesc->will_access_peripheral = 0; 644 tcd = mdesc->tcd; 645 646 /* Prepare Transfer Control Descriptor for this transaction */ 647 memset(tcd, 0, sizeof(struct mpc_dma_tcd)); 648 649 if (IS_ALIGNED(src | dst | len, 32)) { 650 tcd->ssize = MPC_DMA_TSIZE_32; 651 tcd->dsize = MPC_DMA_TSIZE_32; 652 tcd->soff = 32; 653 tcd->doff = 32; 654 } else if (!mdma->is_mpc8308 && IS_ALIGNED(src | dst | len, 16)) { 655 /* MPC8308 doesn't support 16 byte transfers */ 656 tcd->ssize = MPC_DMA_TSIZE_16; 657 tcd->dsize = MPC_DMA_TSIZE_16; 658 tcd->soff = 16; 659 tcd->doff = 16; 660 } else if (IS_ALIGNED(src | dst | len, 4)) { 661 tcd->ssize = MPC_DMA_TSIZE_4; 662 tcd->dsize = MPC_DMA_TSIZE_4; 663 tcd->soff = 4; 664 tcd->doff = 4; 665 } else if (IS_ALIGNED(src | dst | len, 2)) { 666 tcd->ssize = MPC_DMA_TSIZE_2; 667 tcd->dsize = MPC_DMA_TSIZE_2; 668 tcd->soff = 2; 669 tcd->doff = 2; 670 } else { 671 tcd->ssize = MPC_DMA_TSIZE_1; 672 tcd->dsize = MPC_DMA_TSIZE_1; 673 tcd->soff = 1; 674 tcd->doff = 1; 675 } 676 677 tcd->saddr = src; 678 tcd->daddr = dst; 679 tcd->nbytes = len; 680 tcd->biter = 1; 681 tcd->citer = 1; 682 683 /* Place descriptor in prepared list */ 684 spin_lock_irqsave(&mchan->lock, iflags); 685 list_add_tail(&mdesc->node, &mchan->prepared); 686 spin_unlock_irqrestore(&mchan->lock, iflags); 687 688 return &mdesc->desc; 689 } 690 691 inline u8 buswidth_to_dmatsize(u8 buswidth) 692 { 693 u8 res; 694 695 for (res = 0; buswidth > 1; buswidth /= 2) 696 res++; 697 return res; 698 } 699 700 static struct dma_async_tx_descriptor * 701 mpc_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, 702 unsigned int sg_len, enum dma_transfer_direction direction, 703 unsigned long flags, void *context) 704 { 705 struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan); 706 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); 707 struct mpc_dma_desc *mdesc = NULL; 708 dma_addr_t per_paddr; 709 u32 tcd_nunits; 710 struct mpc_dma_tcd *tcd; 711 unsigned long iflags; 712 struct scatterlist *sg; 713 size_t len; 714 int iter, i; 715 716 /* Currently there is no proper support for scatter/gather */ 717 if (sg_len != 1) 718 return NULL; 719 720 if (!is_slave_direction(direction)) 721 return NULL; 722 723 for_each_sg(sgl, sg, sg_len, i) { 724 spin_lock_irqsave(&mchan->lock, iflags); 725 726 mdesc = list_first_entry(&mchan->free, 727 struct mpc_dma_desc, node); 728 if (!mdesc) { 729 spin_unlock_irqrestore(&mchan->lock, iflags); 730 /* Try to free completed descriptors */ 731 mpc_dma_process_completed(mdma); 732 return NULL; 733 } 734 735 list_del(&mdesc->node); 736 737 if (direction == DMA_DEV_TO_MEM) { 738 per_paddr = mchan->src_per_paddr; 739 tcd_nunits = mchan->src_tcd_nunits; 740 } else { 741 per_paddr = mchan->dst_per_paddr; 742 tcd_nunits = mchan->dst_tcd_nunits; 743 } 744 745 spin_unlock_irqrestore(&mchan->lock, iflags); 746 747 if (per_paddr == 0 || tcd_nunits == 0) 748 goto err_prep; 749 750 mdesc->error = 0; 751 mdesc->will_access_peripheral = 1; 752 753 /* Prepare Transfer Control Descriptor for this transaction */ 754 tcd = mdesc->tcd; 755 756 memset(tcd, 0, sizeof(struct mpc_dma_tcd)); 757 758 if (direction == DMA_DEV_TO_MEM) { 759 tcd->saddr = per_paddr; 760 tcd->daddr = sg_dma_address(sg); 761 762 if (!IS_ALIGNED(sg_dma_address(sg), mchan->dwidth)) 763 goto err_prep; 764 765 tcd->soff = 0; 766 tcd->doff = mchan->dwidth; 767 } else { 768 tcd->saddr = sg_dma_address(sg); 769 tcd->daddr = per_paddr; 770 771 if (!IS_ALIGNED(sg_dma_address(sg), mchan->swidth)) 772 goto err_prep; 773 774 tcd->soff = mchan->swidth; 775 tcd->doff = 0; 776 } 777 778 tcd->ssize = buswidth_to_dmatsize(mchan->swidth); 779 tcd->dsize = buswidth_to_dmatsize(mchan->dwidth); 780 781 if (mdma->is_mpc8308) { 782 tcd->nbytes = sg_dma_len(sg); 783 if (!IS_ALIGNED(tcd->nbytes, mchan->swidth)) 784 goto err_prep; 785 786 /* No major loops for MPC8303 */ 787 tcd->biter = 1; 788 tcd->citer = 1; 789 } else { 790 len = sg_dma_len(sg); 791 tcd->nbytes = tcd_nunits * tcd->ssize; 792 if (!IS_ALIGNED(len, tcd->nbytes)) 793 goto err_prep; 794 795 iter = len / tcd->nbytes; 796 if (iter >= 1 << 15) { 797 /* len is too big */ 798 goto err_prep; 799 } 800 /* citer_linkch contains the high bits of iter */ 801 tcd->biter = iter & 0x1ff; 802 tcd->biter_linkch = iter >> 9; 803 tcd->citer = tcd->biter; 804 tcd->citer_linkch = tcd->biter_linkch; 805 } 806 807 tcd->e_sg = 0; 808 tcd->d_req = 1; 809 810 /* Place descriptor in prepared list */ 811 spin_lock_irqsave(&mchan->lock, iflags); 812 list_add_tail(&mdesc->node, &mchan->prepared); 813 spin_unlock_irqrestore(&mchan->lock, iflags); 814 } 815 816 return &mdesc->desc; 817 818 err_prep: 819 /* Put the descriptor back */ 820 spin_lock_irqsave(&mchan->lock, iflags); 821 list_add_tail(&mdesc->node, &mchan->free); 822 spin_unlock_irqrestore(&mchan->lock, iflags); 823 824 return NULL; 825 } 826 827 inline bool is_buswidth_valid(u8 buswidth, bool is_mpc8308) 828 { 829 switch (buswidth) { 830 case 16: 831 if (is_mpc8308) 832 return false; 833 case 1: 834 case 2: 835 case 4: 836 case 32: 837 break; 838 default: 839 return false; 840 } 841 842 return true; 843 } 844 845 static int mpc_dma_device_config(struct dma_chan *chan, 846 struct dma_slave_config *cfg) 847 { 848 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); 849 struct mpc_dma *mdma = dma_chan_to_mpc_dma(&mchan->chan); 850 unsigned long flags; 851 852 /* 853 * Software constraints: 854 * - only transfers between a peripheral device and memory are 855 * supported 856 * - transfer chunk sizes of 1, 2, 4, 16 (for MPC512x), and 32 bytes 857 * are supported, and, consequently, source addresses and 858 * destination addresses; must be aligned accordingly; furthermore, 859 * for MPC512x SoCs, the transfer size must be aligned on (chunk 860 * size * maxburst) 861 * - during the transfer, the RAM address is incremented by the size 862 * of transfer chunk 863 * - the peripheral port's address is constant during the transfer. 864 */ 865 866 if (!IS_ALIGNED(cfg->src_addr, cfg->src_addr_width) || 867 !IS_ALIGNED(cfg->dst_addr, cfg->dst_addr_width)) { 868 return -EINVAL; 869 } 870 871 if (!is_buswidth_valid(cfg->src_addr_width, mdma->is_mpc8308) || 872 !is_buswidth_valid(cfg->dst_addr_width, mdma->is_mpc8308)) 873 return -EINVAL; 874 875 spin_lock_irqsave(&mchan->lock, flags); 876 877 mchan->src_per_paddr = cfg->src_addr; 878 mchan->src_tcd_nunits = cfg->src_maxburst; 879 mchan->swidth = cfg->src_addr_width; 880 mchan->dst_per_paddr = cfg->dst_addr; 881 mchan->dst_tcd_nunits = cfg->dst_maxburst; 882 mchan->dwidth = cfg->dst_addr_width; 883 884 /* Apply defaults */ 885 if (mchan->src_tcd_nunits == 0) 886 mchan->src_tcd_nunits = 1; 887 if (mchan->dst_tcd_nunits == 0) 888 mchan->dst_tcd_nunits = 1; 889 890 spin_unlock_irqrestore(&mchan->lock, flags); 891 892 return 0; 893 } 894 895 static int mpc_dma_device_terminate_all(struct dma_chan *chan) 896 { 897 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); 898 struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan); 899 unsigned long flags; 900 901 /* Disable channel requests */ 902 spin_lock_irqsave(&mchan->lock, flags); 903 904 out_8(&mdma->regs->dmacerq, chan->chan_id); 905 list_splice_tail_init(&mchan->prepared, &mchan->free); 906 list_splice_tail_init(&mchan->queued, &mchan->free); 907 list_splice_tail_init(&mchan->active, &mchan->free); 908 909 spin_unlock_irqrestore(&mchan->lock, flags); 910 911 return 0; 912 } 913 914 static int mpc_dma_probe(struct platform_device *op) 915 { 916 struct device_node *dn = op->dev.of_node; 917 struct device *dev = &op->dev; 918 struct dma_device *dma; 919 struct mpc_dma *mdma; 920 struct mpc_dma_chan *mchan; 921 struct resource res; 922 ulong regs_start, regs_size; 923 int retval, i; 924 u8 chancnt; 925 926 mdma = devm_kzalloc(dev, sizeof(struct mpc_dma), GFP_KERNEL); 927 if (!mdma) { 928 dev_err(dev, "Memory exhausted!\n"); 929 retval = -ENOMEM; 930 goto err; 931 } 932 933 mdma->irq = irq_of_parse_and_map(dn, 0); 934 if (mdma->irq == NO_IRQ) { 935 dev_err(dev, "Error mapping IRQ!\n"); 936 retval = -EINVAL; 937 goto err; 938 } 939 940 if (of_device_is_compatible(dn, "fsl,mpc8308-dma")) { 941 mdma->is_mpc8308 = 1; 942 mdma->irq2 = irq_of_parse_and_map(dn, 1); 943 if (mdma->irq2 == NO_IRQ) { 944 dev_err(dev, "Error mapping IRQ!\n"); 945 retval = -EINVAL; 946 goto err_dispose1; 947 } 948 } 949 950 retval = of_address_to_resource(dn, 0, &res); 951 if (retval) { 952 dev_err(dev, "Error parsing memory region!\n"); 953 goto err_dispose2; 954 } 955 956 regs_start = res.start; 957 regs_size = resource_size(&res); 958 959 if (!devm_request_mem_region(dev, regs_start, regs_size, DRV_NAME)) { 960 dev_err(dev, "Error requesting memory region!\n"); 961 retval = -EBUSY; 962 goto err_dispose2; 963 } 964 965 mdma->regs = devm_ioremap(dev, regs_start, regs_size); 966 if (!mdma->regs) { 967 dev_err(dev, "Error mapping memory region!\n"); 968 retval = -ENOMEM; 969 goto err_dispose2; 970 } 971 972 mdma->tcd = (struct mpc_dma_tcd *)((u8 *)(mdma->regs) 973 + MPC_DMA_TCD_OFFSET); 974 975 retval = request_irq(mdma->irq, &mpc_dma_irq, 0, DRV_NAME, mdma); 976 if (retval) { 977 dev_err(dev, "Error requesting IRQ!\n"); 978 retval = -EINVAL; 979 goto err_dispose2; 980 } 981 982 if (mdma->is_mpc8308) { 983 retval = request_irq(mdma->irq2, &mpc_dma_irq, 0, 984 DRV_NAME, mdma); 985 if (retval) { 986 dev_err(dev, "Error requesting IRQ2!\n"); 987 retval = -EINVAL; 988 goto err_free1; 989 } 990 } 991 992 spin_lock_init(&mdma->error_status_lock); 993 994 dma = &mdma->dma; 995 dma->dev = dev; 996 dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources; 997 dma->device_free_chan_resources = mpc_dma_free_chan_resources; 998 dma->device_issue_pending = mpc_dma_issue_pending; 999 dma->device_tx_status = mpc_dma_tx_status; 1000 dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy; 1001 dma->device_prep_slave_sg = mpc_dma_prep_slave_sg; 1002 dma->device_config = mpc_dma_device_config; 1003 dma->device_terminate_all = mpc_dma_device_terminate_all; 1004 1005 INIT_LIST_HEAD(&dma->channels); 1006 dma_cap_set(DMA_MEMCPY, dma->cap_mask); 1007 dma_cap_set(DMA_SLAVE, dma->cap_mask); 1008 1009 if (mdma->is_mpc8308) 1010 chancnt = MPC8308_DMACHAN_MAX; 1011 else 1012 chancnt = MPC512x_DMACHAN_MAX; 1013 1014 for (i = 0; i < chancnt; i++) { 1015 mchan = &mdma->channels[i]; 1016 1017 mchan->chan.device = dma; 1018 dma_cookie_init(&mchan->chan); 1019 1020 INIT_LIST_HEAD(&mchan->free); 1021 INIT_LIST_HEAD(&mchan->prepared); 1022 INIT_LIST_HEAD(&mchan->queued); 1023 INIT_LIST_HEAD(&mchan->active); 1024 INIT_LIST_HEAD(&mchan->completed); 1025 1026 spin_lock_init(&mchan->lock); 1027 list_add_tail(&mchan->chan.device_node, &dma->channels); 1028 } 1029 1030 tasklet_init(&mdma->tasklet, mpc_dma_tasklet, (unsigned long)mdma); 1031 1032 /* 1033 * Configure DMA Engine: 1034 * - Dynamic clock, 1035 * - Round-robin group arbitration, 1036 * - Round-robin channel arbitration. 1037 */ 1038 if (mdma->is_mpc8308) { 1039 /* MPC8308 has 16 channels and lacks some registers */ 1040 out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_ERCA); 1041 1042 /* enable snooping */ 1043 out_be32(&mdma->regs->dmagpor, MPC_DMA_DMAGPOR_SNOOP_ENABLE); 1044 /* Disable error interrupts */ 1045 out_be32(&mdma->regs->dmaeeil, 0); 1046 1047 /* Clear interrupts status */ 1048 out_be32(&mdma->regs->dmaintl, 0xFFFF); 1049 out_be32(&mdma->regs->dmaerrl, 0xFFFF); 1050 } else { 1051 out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG | 1052 MPC_DMA_DMACR_ERGA | MPC_DMA_DMACR_ERCA); 1053 1054 /* Disable hardware DMA requests */ 1055 out_be32(&mdma->regs->dmaerqh, 0); 1056 out_be32(&mdma->regs->dmaerql, 0); 1057 1058 /* Disable error interrupts */ 1059 out_be32(&mdma->regs->dmaeeih, 0); 1060 out_be32(&mdma->regs->dmaeeil, 0); 1061 1062 /* Clear interrupts status */ 1063 out_be32(&mdma->regs->dmainth, 0xFFFFFFFF); 1064 out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF); 1065 out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF); 1066 out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF); 1067 1068 /* Route interrupts to IPIC */ 1069 out_be32(&mdma->regs->dmaihsa, 0); 1070 out_be32(&mdma->regs->dmailsa, 0); 1071 } 1072 1073 /* Register DMA engine */ 1074 dev_set_drvdata(dev, mdma); 1075 retval = dma_async_device_register(dma); 1076 if (retval) 1077 goto err_free2; 1078 1079 /* Register with OF helpers for DMA lookups (nonfatal) */ 1080 if (dev->of_node) { 1081 retval = of_dma_controller_register(dev->of_node, 1082 of_dma_xlate_by_chan_id, mdma); 1083 if (retval) 1084 dev_warn(dev, "Could not register for OF lookup\n"); 1085 } 1086 1087 return 0; 1088 1089 err_free2: 1090 if (mdma->is_mpc8308) 1091 free_irq(mdma->irq2, mdma); 1092 err_free1: 1093 free_irq(mdma->irq, mdma); 1094 err_dispose2: 1095 if (mdma->is_mpc8308) 1096 irq_dispose_mapping(mdma->irq2); 1097 err_dispose1: 1098 irq_dispose_mapping(mdma->irq); 1099 err: 1100 return retval; 1101 } 1102 1103 static int mpc_dma_remove(struct platform_device *op) 1104 { 1105 struct device *dev = &op->dev; 1106 struct mpc_dma *mdma = dev_get_drvdata(dev); 1107 1108 if (dev->of_node) 1109 of_dma_controller_free(dev->of_node); 1110 dma_async_device_unregister(&mdma->dma); 1111 if (mdma->is_mpc8308) { 1112 free_irq(mdma->irq2, mdma); 1113 irq_dispose_mapping(mdma->irq2); 1114 } 1115 free_irq(mdma->irq, mdma); 1116 irq_dispose_mapping(mdma->irq); 1117 1118 return 0; 1119 } 1120 1121 static const struct of_device_id mpc_dma_match[] = { 1122 { .compatible = "fsl,mpc5121-dma", }, 1123 { .compatible = "fsl,mpc8308-dma", }, 1124 {}, 1125 }; 1126 MODULE_DEVICE_TABLE(of, mpc_dma_match); 1127 1128 static struct platform_driver mpc_dma_driver = { 1129 .probe = mpc_dma_probe, 1130 .remove = mpc_dma_remove, 1131 .driver = { 1132 .name = DRV_NAME, 1133 .of_match_table = mpc_dma_match, 1134 }, 1135 }; 1136 1137 module_platform_driver(mpc_dma_driver); 1138 1139 MODULE_LICENSE("GPL"); 1140 MODULE_AUTHOR("Piotr Ziecik <kosmo@semihalf.com>"); 1141