1 /* 2 * DMA Engine support for Tsi721 PCIExpress-to-SRIO bridge 3 * 4 * Copyright (c) 2011-2014 Integrated Device Technology, Inc. 5 * Alexandre Bounine <alexandre.bounine@idt.com> 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms of the GNU General Public License as published by the Free 9 * Software Foundation; either version 2 of the License, or (at your option) 10 * any later version. 11 * 12 * This program is distributed in the hope that it will be useful, but WITHOUT 13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 15 * more details. 16 * 17 * The full GNU General Public License is included in this distribution in the 18 * file called COPYING. 19 */ 20 21 #include <linux/io.h> 22 #include <linux/errno.h> 23 #include <linux/init.h> 24 #include <linux/ioport.h> 25 #include <linux/kernel.h> 26 #include <linux/module.h> 27 #include <linux/pci.h> 28 #include <linux/rio.h> 29 #include <linux/rio_drv.h> 30 #include <linux/dma-mapping.h> 31 #include <linux/interrupt.h> 32 #include <linux/kfifo.h> 33 #include <linux/sched.h> 34 #include <linux/delay.h> 35 #include "../../dma/dmaengine.h" 36 37 #include "tsi721.h" 38 39 #ifdef CONFIG_PCI_MSI 40 static irqreturn_t tsi721_bdma_msix(int irq, void *ptr); 41 #endif 42 static int tsi721_submit_sg(struct tsi721_tx_desc *desc); 43 44 static unsigned int dma_desc_per_channel = 128; 45 module_param(dma_desc_per_channel, uint, S_IRUGO); 46 MODULE_PARM_DESC(dma_desc_per_channel, 47 "Number of DMA descriptors per channel (default: 128)"); 48 49 static unsigned int dma_txqueue_sz = 16; 50 module_param(dma_txqueue_sz, uint, S_IRUGO); 51 MODULE_PARM_DESC(dma_txqueue_sz, 52 "DMA Transactions Queue Size (default: 16)"); 53 54 static u8 dma_sel = 0x7f; 55 module_param(dma_sel, byte, S_IRUGO); 56 MODULE_PARM_DESC(dma_sel, 57 "DMA Channel Selection Mask (default: 0x7f = all)"); 58 59 static inline struct tsi721_bdma_chan *to_tsi721_chan(struct dma_chan *chan) 60 { 61 return container_of(chan, struct tsi721_bdma_chan, dchan); 62 } 63 64 static inline struct tsi721_device *to_tsi721(struct dma_device *ddev) 65 { 66 return container_of(ddev, struct rio_mport, dma)->priv; 67 } 68 69 static inline 70 struct tsi721_tx_desc *to_tsi721_desc(struct dma_async_tx_descriptor *txd) 71 { 72 return container_of(txd, struct tsi721_tx_desc, txd); 73 } 74 75 static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan, int bd_num) 76 { 77 struct tsi721_dma_desc *bd_ptr; 78 struct device *dev = bdma_chan->dchan.device->dev; 79 u64 *sts_ptr; 80 dma_addr_t bd_phys; 81 dma_addr_t sts_phys; 82 int sts_size; 83 #ifdef CONFIG_PCI_MSI 84 struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device); 85 #endif 86 87 tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d", bdma_chan->id); 88 89 /* 90 * Allocate space for DMA descriptors 91 * (add an extra element for link descriptor) 92 */ 93 bd_ptr = dma_zalloc_coherent(dev, 94 (bd_num + 1) * sizeof(struct tsi721_dma_desc), 95 &bd_phys, GFP_ATOMIC); 96 if (!bd_ptr) 97 return -ENOMEM; 98 99 bdma_chan->bd_num = bd_num; 100 bdma_chan->bd_phys = bd_phys; 101 bdma_chan->bd_base = bd_ptr; 102 103 tsi_debug(DMA, &bdma_chan->dchan.dev->device, 104 "DMAC%d descriptors @ %p (phys = %pad)", 105 bdma_chan->id, bd_ptr, &bd_phys); 106 107 /* Allocate space for descriptor status FIFO */ 108 sts_size = ((bd_num + 1) >= TSI721_DMA_MINSTSSZ) ? 109 (bd_num + 1) : TSI721_DMA_MINSTSSZ; 110 sts_size = roundup_pow_of_two(sts_size); 111 sts_ptr = dma_zalloc_coherent(dev, 112 sts_size * sizeof(struct tsi721_dma_sts), 113 &sts_phys, GFP_ATOMIC); 114 if (!sts_ptr) { 115 /* Free space allocated for DMA descriptors */ 116 dma_free_coherent(dev, 117 (bd_num + 1) * sizeof(struct tsi721_dma_desc), 118 bd_ptr, bd_phys); 119 bdma_chan->bd_base = NULL; 120 return -ENOMEM; 121 } 122 123 bdma_chan->sts_phys = sts_phys; 124 bdma_chan->sts_base = sts_ptr; 125 bdma_chan->sts_size = sts_size; 126 127 tsi_debug(DMA, &bdma_chan->dchan.dev->device, 128 "DMAC%d desc status FIFO @ %p (phys = %pad) size=0x%x", 129 bdma_chan->id, sts_ptr, &sts_phys, sts_size); 130 131 /* Initialize DMA descriptors ring using added link descriptor */ 132 bd_ptr[bd_num].type_id = cpu_to_le32(DTYPE3 << 29); 133 bd_ptr[bd_num].next_lo = cpu_to_le32((u64)bd_phys & 134 TSI721_DMAC_DPTRL_MASK); 135 bd_ptr[bd_num].next_hi = cpu_to_le32((u64)bd_phys >> 32); 136 137 /* Setup DMA descriptor pointers */ 138 iowrite32(((u64)bd_phys >> 32), 139 bdma_chan->regs + TSI721_DMAC_DPTRH); 140 iowrite32(((u64)bd_phys & TSI721_DMAC_DPTRL_MASK), 141 bdma_chan->regs + TSI721_DMAC_DPTRL); 142 143 /* Setup descriptor status FIFO */ 144 iowrite32(((u64)sts_phys >> 32), 145 bdma_chan->regs + TSI721_DMAC_DSBH); 146 iowrite32(((u64)sts_phys & TSI721_DMAC_DSBL_MASK), 147 bdma_chan->regs + TSI721_DMAC_DSBL); 148 iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size), 149 bdma_chan->regs + TSI721_DMAC_DSSZ); 150 151 /* Clear interrupt bits */ 152 iowrite32(TSI721_DMAC_INT_ALL, 153 bdma_chan->regs + TSI721_DMAC_INT); 154 155 ioread32(bdma_chan->regs + TSI721_DMAC_INT); 156 157 #ifdef CONFIG_PCI_MSI 158 /* Request interrupt service if we are in MSI-X mode */ 159 if (priv->flags & TSI721_USING_MSIX) { 160 int rc, idx; 161 162 idx = TSI721_VECT_DMA0_DONE + bdma_chan->id; 163 164 rc = request_irq(priv->msix[idx].vector, tsi721_bdma_msix, 0, 165 priv->msix[idx].irq_name, (void *)bdma_chan); 166 167 if (rc) { 168 tsi_debug(DMA, &bdma_chan->dchan.dev->device, 169 "Unable to get MSI-X for DMAC%d-DONE", 170 bdma_chan->id); 171 goto err_out; 172 } 173 174 idx = TSI721_VECT_DMA0_INT + bdma_chan->id; 175 176 rc = request_irq(priv->msix[idx].vector, tsi721_bdma_msix, 0, 177 priv->msix[idx].irq_name, (void *)bdma_chan); 178 179 if (rc) { 180 tsi_debug(DMA, &bdma_chan->dchan.dev->device, 181 "Unable to get MSI-X for DMAC%d-INT", 182 bdma_chan->id); 183 free_irq( 184 priv->msix[TSI721_VECT_DMA0_DONE + 185 bdma_chan->id].vector, 186 (void *)bdma_chan); 187 } 188 189 err_out: 190 if (rc) { 191 /* Free space allocated for DMA descriptors */ 192 dma_free_coherent(dev, 193 (bd_num + 1) * sizeof(struct tsi721_dma_desc), 194 bd_ptr, bd_phys); 195 bdma_chan->bd_base = NULL; 196 197 /* Free space allocated for status descriptors */ 198 dma_free_coherent(dev, 199 sts_size * sizeof(struct tsi721_dma_sts), 200 sts_ptr, sts_phys); 201 bdma_chan->sts_base = NULL; 202 203 return -EIO; 204 } 205 } 206 #endif /* CONFIG_PCI_MSI */ 207 208 /* Toggle DMA channel initialization */ 209 iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL); 210 ioread32(bdma_chan->regs + TSI721_DMAC_CTL); 211 bdma_chan->wr_count = bdma_chan->wr_count_next = 0; 212 bdma_chan->sts_rdptr = 0; 213 udelay(10); 214 215 return 0; 216 } 217 218 static int tsi721_bdma_ch_free(struct tsi721_bdma_chan *bdma_chan) 219 { 220 u32 ch_stat; 221 #ifdef CONFIG_PCI_MSI 222 struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device); 223 #endif 224 225 if (bdma_chan->bd_base == NULL) 226 return 0; 227 228 /* Check if DMA channel still running */ 229 ch_stat = ioread32(bdma_chan->regs + TSI721_DMAC_STS); 230 if (ch_stat & TSI721_DMAC_STS_RUN) 231 return -EFAULT; 232 233 /* Put DMA channel into init state */ 234 iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL); 235 236 #ifdef CONFIG_PCI_MSI 237 if (priv->flags & TSI721_USING_MSIX) { 238 free_irq(priv->msix[TSI721_VECT_DMA0_DONE + 239 bdma_chan->id].vector, (void *)bdma_chan); 240 free_irq(priv->msix[TSI721_VECT_DMA0_INT + 241 bdma_chan->id].vector, (void *)bdma_chan); 242 } 243 #endif /* CONFIG_PCI_MSI */ 244 245 /* Free space allocated for DMA descriptors */ 246 dma_free_coherent(bdma_chan->dchan.device->dev, 247 (bdma_chan->bd_num + 1) * sizeof(struct tsi721_dma_desc), 248 bdma_chan->bd_base, bdma_chan->bd_phys); 249 bdma_chan->bd_base = NULL; 250 251 /* Free space allocated for status FIFO */ 252 dma_free_coherent(bdma_chan->dchan.device->dev, 253 bdma_chan->sts_size * sizeof(struct tsi721_dma_sts), 254 bdma_chan->sts_base, bdma_chan->sts_phys); 255 bdma_chan->sts_base = NULL; 256 return 0; 257 } 258 259 static void 260 tsi721_bdma_interrupt_enable(struct tsi721_bdma_chan *bdma_chan, int enable) 261 { 262 if (enable) { 263 /* Clear pending BDMA channel interrupts */ 264 iowrite32(TSI721_DMAC_INT_ALL, 265 bdma_chan->regs + TSI721_DMAC_INT); 266 ioread32(bdma_chan->regs + TSI721_DMAC_INT); 267 /* Enable BDMA channel interrupts */ 268 iowrite32(TSI721_DMAC_INT_ALL, 269 bdma_chan->regs + TSI721_DMAC_INTE); 270 } else { 271 /* Disable BDMA channel interrupts */ 272 iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE); 273 /* Clear pending BDMA channel interrupts */ 274 iowrite32(TSI721_DMAC_INT_ALL, 275 bdma_chan->regs + TSI721_DMAC_INT); 276 } 277 278 } 279 280 static bool tsi721_dma_is_idle(struct tsi721_bdma_chan *bdma_chan) 281 { 282 u32 sts; 283 284 sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS); 285 return ((sts & TSI721_DMAC_STS_RUN) == 0); 286 } 287 288 void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan) 289 { 290 /* Disable BDMA channel interrupts */ 291 iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE); 292 if (bdma_chan->active) 293 tasklet_hi_schedule(&bdma_chan->tasklet); 294 } 295 296 #ifdef CONFIG_PCI_MSI 297 /** 298 * tsi721_omsg_msix - MSI-X interrupt handler for BDMA channels 299 * @irq: Linux interrupt number 300 * @ptr: Pointer to interrupt-specific data (BDMA channel structure) 301 * 302 * Handles BDMA channel interrupts signaled using MSI-X. 303 */ 304 static irqreturn_t tsi721_bdma_msix(int irq, void *ptr) 305 { 306 struct tsi721_bdma_chan *bdma_chan = ptr; 307 308 if (bdma_chan->active) 309 tasklet_hi_schedule(&bdma_chan->tasklet); 310 return IRQ_HANDLED; 311 } 312 #endif /* CONFIG_PCI_MSI */ 313 314 /* Must be called with the spinlock held */ 315 static void tsi721_start_dma(struct tsi721_bdma_chan *bdma_chan) 316 { 317 if (!tsi721_dma_is_idle(bdma_chan)) { 318 tsi_err(&bdma_chan->dchan.dev->device, 319 "DMAC%d Attempt to start non-idle channel", 320 bdma_chan->id); 321 return; 322 } 323 324 if (bdma_chan->wr_count == bdma_chan->wr_count_next) { 325 tsi_err(&bdma_chan->dchan.dev->device, 326 "DMAC%d Attempt to start DMA with no BDs ready %d", 327 bdma_chan->id, task_pid_nr(current)); 328 return; 329 } 330 331 tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d (wrc=%d) %d", 332 bdma_chan->id, bdma_chan->wr_count_next, 333 task_pid_nr(current)); 334 335 iowrite32(bdma_chan->wr_count_next, 336 bdma_chan->regs + TSI721_DMAC_DWRCNT); 337 ioread32(bdma_chan->regs + TSI721_DMAC_DWRCNT); 338 339 bdma_chan->wr_count = bdma_chan->wr_count_next; 340 } 341 342 static int 343 tsi721_desc_fill_init(struct tsi721_tx_desc *desc, 344 struct tsi721_dma_desc *bd_ptr, 345 struct scatterlist *sg, u32 sys_size) 346 { 347 u64 rio_addr; 348 349 if (bd_ptr == NULL) 350 return -EINVAL; 351 352 /* Initialize DMA descriptor */ 353 bd_ptr->type_id = cpu_to_le32((DTYPE1 << 29) | 354 (desc->rtype << 19) | desc->destid); 355 bd_ptr->bcount = cpu_to_le32(((desc->rio_addr & 0x3) << 30) | 356 (sys_size << 26)); 357 rio_addr = (desc->rio_addr >> 2) | 358 ((u64)(desc->rio_addr_u & 0x3) << 62); 359 bd_ptr->raddr_lo = cpu_to_le32(rio_addr & 0xffffffff); 360 bd_ptr->raddr_hi = cpu_to_le32(rio_addr >> 32); 361 bd_ptr->t1.bufptr_lo = cpu_to_le32( 362 (u64)sg_dma_address(sg) & 0xffffffff); 363 bd_ptr->t1.bufptr_hi = cpu_to_le32((u64)sg_dma_address(sg) >> 32); 364 bd_ptr->t1.s_dist = 0; 365 bd_ptr->t1.s_size = 0; 366 367 return 0; 368 } 369 370 static int 371 tsi721_desc_fill_end(struct tsi721_dma_desc *bd_ptr, u32 bcount, bool interrupt) 372 { 373 if (bd_ptr == NULL) 374 return -EINVAL; 375 376 /* Update DMA descriptor */ 377 if (interrupt) 378 bd_ptr->type_id |= cpu_to_le32(TSI721_DMAD_IOF); 379 bd_ptr->bcount |= cpu_to_le32(bcount & TSI721_DMAD_BCOUNT1); 380 381 return 0; 382 } 383 384 static void tsi721_dma_tx_err(struct tsi721_bdma_chan *bdma_chan, 385 struct tsi721_tx_desc *desc) 386 { 387 struct dma_async_tx_descriptor *txd = &desc->txd; 388 dma_async_tx_callback callback = txd->callback; 389 void *param = txd->callback_param; 390 391 list_move(&desc->desc_node, &bdma_chan->free_list); 392 393 if (callback) 394 callback(param); 395 } 396 397 static void tsi721_clr_stat(struct tsi721_bdma_chan *bdma_chan) 398 { 399 u32 srd_ptr; 400 u64 *sts_ptr; 401 int i, j; 402 403 /* Check and clear descriptor status FIFO entries */ 404 srd_ptr = bdma_chan->sts_rdptr; 405 sts_ptr = bdma_chan->sts_base; 406 j = srd_ptr * 8; 407 while (sts_ptr[j]) { 408 for (i = 0; i < 8 && sts_ptr[j]; i++, j++) 409 sts_ptr[j] = 0; 410 411 ++srd_ptr; 412 srd_ptr %= bdma_chan->sts_size; 413 j = srd_ptr * 8; 414 } 415 416 iowrite32(srd_ptr, bdma_chan->regs + TSI721_DMAC_DSRP); 417 bdma_chan->sts_rdptr = srd_ptr; 418 } 419 420 /* Must be called with the channel spinlock held */ 421 static int tsi721_submit_sg(struct tsi721_tx_desc *desc) 422 { 423 struct dma_chan *dchan = desc->txd.chan; 424 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); 425 u32 sys_size; 426 u64 rio_addr; 427 dma_addr_t next_addr; 428 u32 bcount; 429 struct scatterlist *sg; 430 unsigned int i; 431 int err = 0; 432 struct tsi721_dma_desc *bd_ptr = NULL; 433 u32 idx, rd_idx; 434 u32 add_count = 0; 435 struct device *ch_dev = &dchan->dev->device; 436 437 if (!tsi721_dma_is_idle(bdma_chan)) { 438 tsi_err(ch_dev, "DMAC%d ERR: Attempt to use non-idle channel", 439 bdma_chan->id); 440 return -EIO; 441 } 442 443 /* 444 * Fill DMA channel's hardware buffer descriptors. 445 * (NOTE: RapidIO destination address is limited to 64 bits for now) 446 */ 447 rio_addr = desc->rio_addr; 448 next_addr = -1; 449 bcount = 0; 450 sys_size = dma_to_mport(dchan->device)->sys_size; 451 452 rd_idx = ioread32(bdma_chan->regs + TSI721_DMAC_DRDCNT); 453 rd_idx %= (bdma_chan->bd_num + 1); 454 455 idx = bdma_chan->wr_count_next % (bdma_chan->bd_num + 1); 456 if (idx == bdma_chan->bd_num) { 457 /* wrap around link descriptor */ 458 idx = 0; 459 add_count++; 460 } 461 462 tsi_debug(DMA, ch_dev, "DMAC%d BD ring status: rdi=%d wri=%d", 463 bdma_chan->id, rd_idx, idx); 464 465 for_each_sg(desc->sg, sg, desc->sg_len, i) { 466 467 tsi_debug(DMAV, ch_dev, "DMAC%d sg%d/%d addr: 0x%llx len: %d", 468 bdma_chan->id, i, desc->sg_len, 469 (unsigned long long)sg_dma_address(sg), sg_dma_len(sg)); 470 471 if (sg_dma_len(sg) > TSI721_BDMA_MAX_BCOUNT) { 472 tsi_err(ch_dev, "DMAC%d SG entry %d is too large", 473 bdma_chan->id, i); 474 err = -EINVAL; 475 break; 476 } 477 478 /* 479 * If this sg entry forms contiguous block with previous one, 480 * try to merge it into existing DMA descriptor 481 */ 482 if (next_addr == sg_dma_address(sg) && 483 bcount + sg_dma_len(sg) <= TSI721_BDMA_MAX_BCOUNT) { 484 /* Adjust byte count of the descriptor */ 485 bcount += sg_dma_len(sg); 486 goto entry_done; 487 } else if (next_addr != -1) { 488 /* Finalize descriptor using total byte count value */ 489 tsi721_desc_fill_end(bd_ptr, bcount, 0); 490 tsi_debug(DMAV, ch_dev, "DMAC%d prev desc final len: %d", 491 bdma_chan->id, bcount); 492 } 493 494 desc->rio_addr = rio_addr; 495 496 if (i && idx == rd_idx) { 497 tsi_debug(DMAV, ch_dev, 498 "DMAC%d HW descriptor ring is full @ %d", 499 bdma_chan->id, i); 500 desc->sg = sg; 501 desc->sg_len -= i; 502 break; 503 } 504 505 bd_ptr = &((struct tsi721_dma_desc *)bdma_chan->bd_base)[idx]; 506 err = tsi721_desc_fill_init(desc, bd_ptr, sg, sys_size); 507 if (err) { 508 tsi_err(ch_dev, "Failed to build desc: err=%d", err); 509 break; 510 } 511 512 tsi_debug(DMAV, ch_dev, "DMAC%d bd_ptr = %p did=%d raddr=0x%llx", 513 bdma_chan->id, bd_ptr, desc->destid, desc->rio_addr); 514 515 next_addr = sg_dma_address(sg); 516 bcount = sg_dma_len(sg); 517 518 add_count++; 519 if (++idx == bdma_chan->bd_num) { 520 /* wrap around link descriptor */ 521 idx = 0; 522 add_count++; 523 } 524 525 entry_done: 526 if (sg_is_last(sg)) { 527 tsi721_desc_fill_end(bd_ptr, bcount, 0); 528 tsi_debug(DMAV, ch_dev, 529 "DMAC%d last desc final len: %d", 530 bdma_chan->id, bcount); 531 desc->sg_len = 0; 532 } else { 533 rio_addr += sg_dma_len(sg); 534 next_addr += sg_dma_len(sg); 535 } 536 } 537 538 if (!err) 539 bdma_chan->wr_count_next += add_count; 540 541 return err; 542 } 543 544 static void tsi721_advance_work(struct tsi721_bdma_chan *bdma_chan, 545 struct tsi721_tx_desc *desc) 546 { 547 int err; 548 549 tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d", bdma_chan->id); 550 551 if (!tsi721_dma_is_idle(bdma_chan)) 552 return; 553 554 /* 555 * If there is no data transfer in progress, fetch new descriptor from 556 * the pending queue. 557 */ 558 559 if (desc == NULL && bdma_chan->active_tx == NULL && 560 !list_empty(&bdma_chan->queue)) { 561 desc = list_first_entry(&bdma_chan->queue, 562 struct tsi721_tx_desc, desc_node); 563 list_del_init((&desc->desc_node)); 564 bdma_chan->active_tx = desc; 565 } 566 567 if (desc) { 568 err = tsi721_submit_sg(desc); 569 if (!err) 570 tsi721_start_dma(bdma_chan); 571 else { 572 tsi721_dma_tx_err(bdma_chan, desc); 573 tsi_debug(DMA, &bdma_chan->dchan.dev->device, 574 "DMAC%d ERR: tsi721_submit_sg failed with err=%d", 575 bdma_chan->id, err); 576 } 577 } 578 579 tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d Exit", 580 bdma_chan->id); 581 } 582 583 static void tsi721_dma_tasklet(unsigned long data) 584 { 585 struct tsi721_bdma_chan *bdma_chan = (struct tsi721_bdma_chan *)data; 586 u32 dmac_int, dmac_sts; 587 588 dmac_int = ioread32(bdma_chan->regs + TSI721_DMAC_INT); 589 tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d_INT = 0x%x", 590 bdma_chan->id, dmac_int); 591 /* Clear channel interrupts */ 592 iowrite32(dmac_int, bdma_chan->regs + TSI721_DMAC_INT); 593 594 if (dmac_int & TSI721_DMAC_INT_ERR) { 595 int i = 10000; 596 struct tsi721_tx_desc *desc; 597 598 desc = bdma_chan->active_tx; 599 dmac_sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS); 600 tsi_err(&bdma_chan->dchan.dev->device, 601 "DMAC%d_STS = 0x%x did=%d raddr=0x%llx", 602 bdma_chan->id, dmac_sts, desc->destid, desc->rio_addr); 603 604 /* Re-initialize DMA channel if possible */ 605 606 if ((dmac_sts & TSI721_DMAC_STS_ABORT) == 0) 607 goto err_out; 608 609 tsi721_clr_stat(bdma_chan); 610 611 spin_lock(&bdma_chan->lock); 612 613 /* Put DMA channel into init state */ 614 iowrite32(TSI721_DMAC_CTL_INIT, 615 bdma_chan->regs + TSI721_DMAC_CTL); 616 do { 617 udelay(1); 618 dmac_sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS); 619 i--; 620 } while ((dmac_sts & TSI721_DMAC_STS_ABORT) && i); 621 622 if (dmac_sts & TSI721_DMAC_STS_ABORT) { 623 tsi_err(&bdma_chan->dchan.dev->device, 624 "Failed to re-initiate DMAC%d", bdma_chan->id); 625 spin_unlock(&bdma_chan->lock); 626 goto err_out; 627 } 628 629 /* Setup DMA descriptor pointers */ 630 iowrite32(((u64)bdma_chan->bd_phys >> 32), 631 bdma_chan->regs + TSI721_DMAC_DPTRH); 632 iowrite32(((u64)bdma_chan->bd_phys & TSI721_DMAC_DPTRL_MASK), 633 bdma_chan->regs + TSI721_DMAC_DPTRL); 634 635 /* Setup descriptor status FIFO */ 636 iowrite32(((u64)bdma_chan->sts_phys >> 32), 637 bdma_chan->regs + TSI721_DMAC_DSBH); 638 iowrite32(((u64)bdma_chan->sts_phys & TSI721_DMAC_DSBL_MASK), 639 bdma_chan->regs + TSI721_DMAC_DSBL); 640 iowrite32(TSI721_DMAC_DSSZ_SIZE(bdma_chan->sts_size), 641 bdma_chan->regs + TSI721_DMAC_DSSZ); 642 643 /* Clear interrupt bits */ 644 iowrite32(TSI721_DMAC_INT_ALL, 645 bdma_chan->regs + TSI721_DMAC_INT); 646 647 ioread32(bdma_chan->regs + TSI721_DMAC_INT); 648 649 bdma_chan->wr_count = bdma_chan->wr_count_next = 0; 650 bdma_chan->sts_rdptr = 0; 651 udelay(10); 652 653 desc = bdma_chan->active_tx; 654 desc->status = DMA_ERROR; 655 dma_cookie_complete(&desc->txd); 656 list_add(&desc->desc_node, &bdma_chan->free_list); 657 bdma_chan->active_tx = NULL; 658 if (bdma_chan->active) 659 tsi721_advance_work(bdma_chan, NULL); 660 spin_unlock(&bdma_chan->lock); 661 } 662 663 if (dmac_int & TSI721_DMAC_INT_STFULL) { 664 tsi_err(&bdma_chan->dchan.dev->device, 665 "DMAC%d descriptor status FIFO is full", 666 bdma_chan->id); 667 } 668 669 if (dmac_int & (TSI721_DMAC_INT_DONE | TSI721_DMAC_INT_IOFDONE)) { 670 struct tsi721_tx_desc *desc; 671 672 tsi721_clr_stat(bdma_chan); 673 spin_lock(&bdma_chan->lock); 674 desc = bdma_chan->active_tx; 675 676 if (desc->sg_len == 0) { 677 dma_async_tx_callback callback = NULL; 678 void *param = NULL; 679 680 desc->status = DMA_COMPLETE; 681 dma_cookie_complete(&desc->txd); 682 if (desc->txd.flags & DMA_PREP_INTERRUPT) { 683 callback = desc->txd.callback; 684 param = desc->txd.callback_param; 685 } 686 list_add(&desc->desc_node, &bdma_chan->free_list); 687 bdma_chan->active_tx = NULL; 688 if (bdma_chan->active) 689 tsi721_advance_work(bdma_chan, NULL); 690 spin_unlock(&bdma_chan->lock); 691 if (callback) 692 callback(param); 693 } else { 694 if (bdma_chan->active) 695 tsi721_advance_work(bdma_chan, 696 bdma_chan->active_tx); 697 spin_unlock(&bdma_chan->lock); 698 } 699 } 700 err_out: 701 /* Re-Enable BDMA channel interrupts */ 702 iowrite32(TSI721_DMAC_INT_ALL, bdma_chan->regs + TSI721_DMAC_INTE); 703 } 704 705 static dma_cookie_t tsi721_tx_submit(struct dma_async_tx_descriptor *txd) 706 { 707 struct tsi721_tx_desc *desc = to_tsi721_desc(txd); 708 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(txd->chan); 709 dma_cookie_t cookie; 710 711 /* Check if the descriptor is detached from any lists */ 712 if (!list_empty(&desc->desc_node)) { 713 tsi_err(&bdma_chan->dchan.dev->device, 714 "DMAC%d wrong state of descriptor %p", 715 bdma_chan->id, txd); 716 return -EIO; 717 } 718 719 spin_lock_bh(&bdma_chan->lock); 720 721 if (!bdma_chan->active) { 722 spin_unlock_bh(&bdma_chan->lock); 723 return -ENODEV; 724 } 725 726 cookie = dma_cookie_assign(txd); 727 desc->status = DMA_IN_PROGRESS; 728 list_add_tail(&desc->desc_node, &bdma_chan->queue); 729 tsi721_advance_work(bdma_chan, NULL); 730 731 spin_unlock_bh(&bdma_chan->lock); 732 return cookie; 733 } 734 735 static int tsi721_alloc_chan_resources(struct dma_chan *dchan) 736 { 737 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); 738 struct tsi721_tx_desc *desc = NULL; 739 int i; 740 741 tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id); 742 743 if (bdma_chan->bd_base) 744 return dma_txqueue_sz; 745 746 /* Initialize BDMA channel */ 747 if (tsi721_bdma_ch_init(bdma_chan, dma_desc_per_channel)) { 748 tsi_err(&dchan->dev->device, "Unable to initialize DMAC%d", 749 bdma_chan->id); 750 return -ENODEV; 751 } 752 753 /* Allocate queue of transaction descriptors */ 754 desc = kcalloc(dma_txqueue_sz, sizeof(struct tsi721_tx_desc), 755 GFP_ATOMIC); 756 if (!desc) { 757 tsi_err(&dchan->dev->device, 758 "DMAC%d Failed to allocate logical descriptors", 759 bdma_chan->id); 760 tsi721_bdma_ch_free(bdma_chan); 761 return -ENOMEM; 762 } 763 764 bdma_chan->tx_desc = desc; 765 766 for (i = 0; i < dma_txqueue_sz; i++) { 767 dma_async_tx_descriptor_init(&desc[i].txd, dchan); 768 desc[i].txd.tx_submit = tsi721_tx_submit; 769 desc[i].txd.flags = DMA_CTRL_ACK; 770 list_add(&desc[i].desc_node, &bdma_chan->free_list); 771 } 772 773 dma_cookie_init(dchan); 774 775 bdma_chan->active = true; 776 tsi721_bdma_interrupt_enable(bdma_chan, 1); 777 778 return dma_txqueue_sz; 779 } 780 781 static void tsi721_sync_dma_irq(struct tsi721_bdma_chan *bdma_chan) 782 { 783 struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device); 784 785 #ifdef CONFIG_PCI_MSI 786 if (priv->flags & TSI721_USING_MSIX) { 787 synchronize_irq(priv->msix[TSI721_VECT_DMA0_DONE + 788 bdma_chan->id].vector); 789 synchronize_irq(priv->msix[TSI721_VECT_DMA0_INT + 790 bdma_chan->id].vector); 791 } else 792 #endif 793 synchronize_irq(priv->pdev->irq); 794 } 795 796 static void tsi721_free_chan_resources(struct dma_chan *dchan) 797 { 798 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); 799 800 tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id); 801 802 if (bdma_chan->bd_base == NULL) 803 return; 804 805 tsi721_bdma_interrupt_enable(bdma_chan, 0); 806 bdma_chan->active = false; 807 tsi721_sync_dma_irq(bdma_chan); 808 tasklet_kill(&bdma_chan->tasklet); 809 INIT_LIST_HEAD(&bdma_chan->free_list); 810 kfree(bdma_chan->tx_desc); 811 tsi721_bdma_ch_free(bdma_chan); 812 } 813 814 static 815 enum dma_status tsi721_tx_status(struct dma_chan *dchan, dma_cookie_t cookie, 816 struct dma_tx_state *txstate) 817 { 818 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); 819 enum dma_status status; 820 821 spin_lock_bh(&bdma_chan->lock); 822 status = dma_cookie_status(dchan, cookie, txstate); 823 spin_unlock_bh(&bdma_chan->lock); 824 return status; 825 } 826 827 static void tsi721_issue_pending(struct dma_chan *dchan) 828 { 829 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); 830 831 tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id); 832 833 spin_lock_bh(&bdma_chan->lock); 834 if (tsi721_dma_is_idle(bdma_chan) && bdma_chan->active) { 835 tsi721_advance_work(bdma_chan, NULL); 836 } 837 spin_unlock_bh(&bdma_chan->lock); 838 } 839 840 static 841 struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan, 842 struct scatterlist *sgl, unsigned int sg_len, 843 enum dma_transfer_direction dir, unsigned long flags, 844 void *tinfo) 845 { 846 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); 847 struct tsi721_tx_desc *desc; 848 struct rio_dma_ext *rext = tinfo; 849 enum dma_rtype rtype; 850 struct dma_async_tx_descriptor *txd = NULL; 851 852 if (!sgl || !sg_len) { 853 tsi_err(&dchan->dev->device, "DMAC%d No SG list", 854 bdma_chan->id); 855 return ERR_PTR(-EINVAL); 856 } 857 858 tsi_debug(DMA, &dchan->dev->device, "DMAC%d %s", bdma_chan->id, 859 (dir == DMA_DEV_TO_MEM)?"READ":"WRITE"); 860 861 if (dir == DMA_DEV_TO_MEM) 862 rtype = NREAD; 863 else if (dir == DMA_MEM_TO_DEV) { 864 switch (rext->wr_type) { 865 case RDW_ALL_NWRITE: 866 rtype = ALL_NWRITE; 867 break; 868 case RDW_ALL_NWRITE_R: 869 rtype = ALL_NWRITE_R; 870 break; 871 case RDW_LAST_NWRITE_R: 872 default: 873 rtype = LAST_NWRITE_R; 874 break; 875 } 876 } else { 877 tsi_err(&dchan->dev->device, 878 "DMAC%d Unsupported DMA direction option", 879 bdma_chan->id); 880 return ERR_PTR(-EINVAL); 881 } 882 883 spin_lock_bh(&bdma_chan->lock); 884 885 if (!list_empty(&bdma_chan->free_list)) { 886 desc = list_first_entry(&bdma_chan->free_list, 887 struct tsi721_tx_desc, desc_node); 888 list_del_init(&desc->desc_node); 889 desc->destid = rext->destid; 890 desc->rio_addr = rext->rio_addr; 891 desc->rio_addr_u = 0; 892 desc->rtype = rtype; 893 desc->sg_len = sg_len; 894 desc->sg = sgl; 895 txd = &desc->txd; 896 txd->flags = flags; 897 } 898 899 spin_unlock_bh(&bdma_chan->lock); 900 901 if (!txd) { 902 tsi_debug(DMA, &dchan->dev->device, 903 "DMAC%d free TXD is not available", bdma_chan->id); 904 return ERR_PTR(-EBUSY); 905 } 906 907 return txd; 908 } 909 910 static int tsi721_terminate_all(struct dma_chan *dchan) 911 { 912 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); 913 struct tsi721_tx_desc *desc, *_d; 914 LIST_HEAD(list); 915 916 tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id); 917 918 spin_lock_bh(&bdma_chan->lock); 919 920 bdma_chan->active = false; 921 922 while (!tsi721_dma_is_idle(bdma_chan)) { 923 924 udelay(5); 925 #if (0) 926 /* make sure to stop the transfer */ 927 iowrite32(TSI721_DMAC_CTL_SUSP, 928 bdma_chan->regs + TSI721_DMAC_CTL); 929 930 /* Wait until DMA channel stops */ 931 do { 932 dmac_int = ioread32(bdma_chan->regs + TSI721_DMAC_INT); 933 } while ((dmac_int & TSI721_DMAC_INT_SUSP) == 0); 934 #endif 935 } 936 937 if (bdma_chan->active_tx) 938 list_add(&bdma_chan->active_tx->desc_node, &list); 939 list_splice_init(&bdma_chan->queue, &list); 940 941 list_for_each_entry_safe(desc, _d, &list, desc_node) 942 tsi721_dma_tx_err(bdma_chan, desc); 943 944 spin_unlock_bh(&bdma_chan->lock); 945 946 return 0; 947 } 948 949 static void tsi721_dma_stop(struct tsi721_bdma_chan *bdma_chan) 950 { 951 if (!bdma_chan->active) 952 return; 953 spin_lock_bh(&bdma_chan->lock); 954 if (!tsi721_dma_is_idle(bdma_chan)) { 955 int timeout = 100000; 956 957 /* stop the transfer in progress */ 958 iowrite32(TSI721_DMAC_CTL_SUSP, 959 bdma_chan->regs + TSI721_DMAC_CTL); 960 961 /* Wait until DMA channel stops */ 962 while (!tsi721_dma_is_idle(bdma_chan) && --timeout) 963 udelay(1); 964 } 965 966 spin_unlock_bh(&bdma_chan->lock); 967 } 968 969 void tsi721_dma_stop_all(struct tsi721_device *priv) 970 { 971 int i; 972 973 for (i = 0; i < TSI721_DMA_MAXCH; i++) { 974 if ((i != TSI721_DMACH_MAINT) && (dma_sel & (1 << i))) 975 tsi721_dma_stop(&priv->bdma[i]); 976 } 977 } 978 979 int tsi721_register_dma(struct tsi721_device *priv) 980 { 981 int i; 982 int nr_channels = 0; 983 int err; 984 struct rio_mport *mport = &priv->mport; 985 986 INIT_LIST_HEAD(&mport->dma.channels); 987 988 for (i = 0; i < TSI721_DMA_MAXCH; i++) { 989 struct tsi721_bdma_chan *bdma_chan = &priv->bdma[i]; 990 991 if ((i == TSI721_DMACH_MAINT) || (dma_sel & (1 << i)) == 0) 992 continue; 993 994 bdma_chan->regs = priv->regs + TSI721_DMAC_BASE(i); 995 996 bdma_chan->dchan.device = &mport->dma; 997 bdma_chan->dchan.cookie = 1; 998 bdma_chan->dchan.chan_id = i; 999 bdma_chan->id = i; 1000 bdma_chan->active = false; 1001 1002 spin_lock_init(&bdma_chan->lock); 1003 1004 bdma_chan->active_tx = NULL; 1005 INIT_LIST_HEAD(&bdma_chan->queue); 1006 INIT_LIST_HEAD(&bdma_chan->free_list); 1007 1008 tasklet_init(&bdma_chan->tasklet, tsi721_dma_tasklet, 1009 (unsigned long)bdma_chan); 1010 list_add_tail(&bdma_chan->dchan.device_node, 1011 &mport->dma.channels); 1012 nr_channels++; 1013 } 1014 1015 mport->dma.chancnt = nr_channels; 1016 dma_cap_zero(mport->dma.cap_mask); 1017 dma_cap_set(DMA_PRIVATE, mport->dma.cap_mask); 1018 dma_cap_set(DMA_SLAVE, mport->dma.cap_mask); 1019 1020 mport->dma.dev = &priv->pdev->dev; 1021 mport->dma.device_alloc_chan_resources = tsi721_alloc_chan_resources; 1022 mport->dma.device_free_chan_resources = tsi721_free_chan_resources; 1023 mport->dma.device_tx_status = tsi721_tx_status; 1024 mport->dma.device_issue_pending = tsi721_issue_pending; 1025 mport->dma.device_prep_slave_sg = tsi721_prep_rio_sg; 1026 mport->dma.device_terminate_all = tsi721_terminate_all; 1027 1028 err = dma_async_device_register(&mport->dma); 1029 if (err) 1030 tsi_err(&priv->pdev->dev, "Failed to register DMA device"); 1031 1032 return err; 1033 } 1034 1035 void tsi721_unregister_dma(struct tsi721_device *priv) 1036 { 1037 struct rio_mport *mport = &priv->mport; 1038 struct dma_chan *chan, *_c; 1039 struct tsi721_bdma_chan *bdma_chan; 1040 1041 tsi721_dma_stop_all(priv); 1042 dma_async_device_unregister(&mport->dma); 1043 1044 list_for_each_entry_safe(chan, _c, &mport->dma.channels, 1045 device_node) { 1046 bdma_chan = to_tsi721_chan(chan); 1047 if (bdma_chan->active) { 1048 tsi721_bdma_interrupt_enable(bdma_chan, 0); 1049 bdma_chan->active = false; 1050 tsi721_sync_dma_irq(bdma_chan); 1051 tasklet_kill(&bdma_chan->tasklet); 1052 INIT_LIST_HEAD(&bdma_chan->free_list); 1053 kfree(bdma_chan->tx_desc); 1054 tsi721_bdma_ch_free(bdma_chan); 1055 } 1056 1057 list_del(&chan->device_node); 1058 } 1059 } 1060