1 /* 2 * DMA Engine support for Tsi721 PCIExpress-to-SRIO bridge 3 * 4 * Copyright 2011 Integrated Device Technology, Inc. 5 * Alexandre Bounine <alexandre.bounine@idt.com> 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms of the GNU General Public License as published by the Free 9 * Software Foundation; either version 2 of the License, or (at your option) 10 * any later version. 11 * 12 * This program is distributed in the hope that it will be useful, but WITHOUT 13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 15 * more details. 16 * 17 * You should have received a copy of the GNU General Public License along with 18 * this program; if not, write to the Free Software Foundation, Inc., 59 19 * Temple Place - Suite 330, Boston, MA 02111-1307, USA. 20 */ 21 22 #include <linux/io.h> 23 #include <linux/errno.h> 24 #include <linux/init.h> 25 #include <linux/ioport.h> 26 #include <linux/kernel.h> 27 #include <linux/module.h> 28 #include <linux/pci.h> 29 #include <linux/rio.h> 30 #include <linux/rio_drv.h> 31 #include <linux/dma-mapping.h> 32 #include <linux/interrupt.h> 33 #include <linux/kfifo.h> 34 #include <linux/delay.h> 35 36 #include "tsi721.h" 37 38 static inline struct tsi721_bdma_chan *to_tsi721_chan(struct dma_chan *chan) 39 { 40 return container_of(chan, struct tsi721_bdma_chan, dchan); 41 } 42 43 static inline struct tsi721_device *to_tsi721(struct dma_device *ddev) 44 { 45 return container_of(ddev, struct rio_mport, dma)->priv; 46 } 47 48 static inline 49 struct tsi721_tx_desc *to_tsi721_desc(struct dma_async_tx_descriptor *txd) 50 { 51 return container_of(txd, struct tsi721_tx_desc, txd); 52 } 53 54 static inline 55 struct tsi721_tx_desc *tsi721_dma_first_active( 56 struct tsi721_bdma_chan *bdma_chan) 57 { 58 return list_first_entry(&bdma_chan->active_list, 59 struct tsi721_tx_desc, desc_node); 60 } 61 62 static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan) 63 { 64 struct tsi721_dma_desc *bd_ptr; 65 struct device *dev = bdma_chan->dchan.device->dev; 66 u64 *sts_ptr; 67 dma_addr_t bd_phys; 68 dma_addr_t sts_phys; 69 int sts_size; 70 int bd_num = bdma_chan->bd_num; 71 72 dev_dbg(dev, "Init Block DMA Engine, CH%d\n", bdma_chan->id); 73 74 /* Allocate space for DMA descriptors */ 75 bd_ptr = dma_zalloc_coherent(dev, 76 bd_num * sizeof(struct tsi721_dma_desc), 77 &bd_phys, GFP_KERNEL); 78 if (!bd_ptr) 79 return -ENOMEM; 80 81 bdma_chan->bd_phys = bd_phys; 82 bdma_chan->bd_base = bd_ptr; 83 84 dev_dbg(dev, "DMA descriptors @ %p (phys = %llx)\n", 85 bd_ptr, (unsigned long long)bd_phys); 86 87 /* Allocate space for descriptor status FIFO */ 88 sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ? 89 bd_num : TSI721_DMA_MINSTSSZ; 90 sts_size = roundup_pow_of_two(sts_size); 91 sts_ptr = dma_zalloc_coherent(dev, 92 sts_size * sizeof(struct tsi721_dma_sts), 93 &sts_phys, GFP_KERNEL); 94 if (!sts_ptr) { 95 /* Free space allocated for DMA descriptors */ 96 dma_free_coherent(dev, 97 bd_num * sizeof(struct tsi721_dma_desc), 98 bd_ptr, bd_phys); 99 bdma_chan->bd_base = NULL; 100 return -ENOMEM; 101 } 102 103 bdma_chan->sts_phys = sts_phys; 104 bdma_chan->sts_base = sts_ptr; 105 bdma_chan->sts_size = sts_size; 106 107 dev_dbg(dev, 108 "desc status FIFO @ %p (phys = %llx) size=0x%x\n", 109 sts_ptr, (unsigned long long)sts_phys, sts_size); 110 111 /* Initialize DMA descriptors ring */ 112 bd_ptr[bd_num - 1].type_id = cpu_to_le32(DTYPE3 << 29); 113 bd_ptr[bd_num - 1].next_lo = cpu_to_le32((u64)bd_phys & 114 TSI721_DMAC_DPTRL_MASK); 115 bd_ptr[bd_num - 1].next_hi = cpu_to_le32((u64)bd_phys >> 32); 116 117 /* Setup DMA descriptor pointers */ 118 iowrite32(((u64)bd_phys >> 32), 119 bdma_chan->regs + TSI721_DMAC_DPTRH); 120 iowrite32(((u64)bd_phys & TSI721_DMAC_DPTRL_MASK), 121 bdma_chan->regs + TSI721_DMAC_DPTRL); 122 123 /* Setup descriptor status FIFO */ 124 iowrite32(((u64)sts_phys >> 32), 125 bdma_chan->regs + TSI721_DMAC_DSBH); 126 iowrite32(((u64)sts_phys & TSI721_DMAC_DSBL_MASK), 127 bdma_chan->regs + TSI721_DMAC_DSBL); 128 iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size), 129 bdma_chan->regs + TSI721_DMAC_DSSZ); 130 131 /* Clear interrupt bits */ 132 iowrite32(TSI721_DMAC_INT_ALL, 133 bdma_chan->regs + TSI721_DMAC_INT); 134 135 ioread32(bdma_chan->regs + TSI721_DMAC_INT); 136 137 /* Toggle DMA channel initialization */ 138 iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL); 139 ioread32(bdma_chan->regs + TSI721_DMAC_CTL); 140 bdma_chan->wr_count = bdma_chan->wr_count_next = 0; 141 bdma_chan->sts_rdptr = 0; 142 udelay(10); 143 144 return 0; 145 } 146 147 static int tsi721_bdma_ch_free(struct tsi721_bdma_chan *bdma_chan) 148 { 149 u32 ch_stat; 150 151 if (bdma_chan->bd_base == NULL) 152 return 0; 153 154 /* Check if DMA channel still running */ 155 ch_stat = ioread32(bdma_chan->regs + TSI721_DMAC_STS); 156 if (ch_stat & TSI721_DMAC_STS_RUN) 157 return -EFAULT; 158 159 /* Put DMA channel into init state */ 160 iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL); 161 162 /* Free space allocated for DMA descriptors */ 163 dma_free_coherent(bdma_chan->dchan.device->dev, 164 bdma_chan->bd_num * sizeof(struct tsi721_dma_desc), 165 bdma_chan->bd_base, bdma_chan->bd_phys); 166 bdma_chan->bd_base = NULL; 167 168 /* Free space allocated for status FIFO */ 169 dma_free_coherent(bdma_chan->dchan.device->dev, 170 bdma_chan->sts_size * sizeof(struct tsi721_dma_sts), 171 bdma_chan->sts_base, bdma_chan->sts_phys); 172 bdma_chan->sts_base = NULL; 173 return 0; 174 } 175 176 static void 177 tsi721_bdma_interrupt_enable(struct tsi721_bdma_chan *bdma_chan, int enable) 178 { 179 if (enable) { 180 /* Clear pending BDMA channel interrupts */ 181 iowrite32(TSI721_DMAC_INT_ALL, 182 bdma_chan->regs + TSI721_DMAC_INT); 183 ioread32(bdma_chan->regs + TSI721_DMAC_INT); 184 /* Enable BDMA channel interrupts */ 185 iowrite32(TSI721_DMAC_INT_ALL, 186 bdma_chan->regs + TSI721_DMAC_INTE); 187 } else { 188 /* Disable BDMA channel interrupts */ 189 iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE); 190 /* Clear pending BDMA channel interrupts */ 191 iowrite32(TSI721_DMAC_INT_ALL, 192 bdma_chan->regs + TSI721_DMAC_INT); 193 } 194 195 } 196 197 static bool tsi721_dma_is_idle(struct tsi721_bdma_chan *bdma_chan) 198 { 199 u32 sts; 200 201 sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS); 202 return ((sts & TSI721_DMAC_STS_RUN) == 0); 203 } 204 205 void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan) 206 { 207 /* Disable BDMA channel interrupts */ 208 iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE); 209 if (bdma_chan->active) 210 tasklet_schedule(&bdma_chan->tasklet); 211 } 212 213 #ifdef CONFIG_PCI_MSI 214 /** 215 * tsi721_omsg_msix - MSI-X interrupt handler for BDMA channels 216 * @irq: Linux interrupt number 217 * @ptr: Pointer to interrupt-specific data (BDMA channel structure) 218 * 219 * Handles BDMA channel interrupts signaled using MSI-X. 220 */ 221 static irqreturn_t tsi721_bdma_msix(int irq, void *ptr) 222 { 223 struct tsi721_bdma_chan *bdma_chan = ptr; 224 225 tsi721_bdma_handler(bdma_chan); 226 return IRQ_HANDLED; 227 } 228 #endif /* CONFIG_PCI_MSI */ 229 230 /* Must be called with the spinlock held */ 231 static void tsi721_start_dma(struct tsi721_bdma_chan *bdma_chan) 232 { 233 if (!tsi721_dma_is_idle(bdma_chan)) { 234 dev_err(bdma_chan->dchan.device->dev, 235 "BUG: Attempt to start non-idle channel\n"); 236 return; 237 } 238 239 if (bdma_chan->wr_count == bdma_chan->wr_count_next) { 240 dev_err(bdma_chan->dchan.device->dev, 241 "BUG: Attempt to start DMA with no BDs ready\n"); 242 return; 243 } 244 245 dev_dbg(bdma_chan->dchan.device->dev, 246 "tx_chan: %p, chan: %d, regs: %p\n", 247 bdma_chan, bdma_chan->dchan.chan_id, bdma_chan->regs); 248 249 iowrite32(bdma_chan->wr_count_next, 250 bdma_chan->regs + TSI721_DMAC_DWRCNT); 251 ioread32(bdma_chan->regs + TSI721_DMAC_DWRCNT); 252 253 bdma_chan->wr_count = bdma_chan->wr_count_next; 254 } 255 256 static void tsi721_desc_put(struct tsi721_bdma_chan *bdma_chan, 257 struct tsi721_tx_desc *desc) 258 { 259 dev_dbg(bdma_chan->dchan.device->dev, 260 "Put desc: %p into free list\n", desc); 261 262 if (desc) { 263 spin_lock_bh(&bdma_chan->lock); 264 list_splice_init(&desc->tx_list, &bdma_chan->free_list); 265 list_add(&desc->desc_node, &bdma_chan->free_list); 266 bdma_chan->wr_count_next = bdma_chan->wr_count; 267 spin_unlock_bh(&bdma_chan->lock); 268 } 269 } 270 271 static 272 struct tsi721_tx_desc *tsi721_desc_get(struct tsi721_bdma_chan *bdma_chan) 273 { 274 struct tsi721_tx_desc *tx_desc, *_tx_desc; 275 struct tsi721_tx_desc *ret = NULL; 276 int i; 277 278 spin_lock_bh(&bdma_chan->lock); 279 list_for_each_entry_safe(tx_desc, _tx_desc, 280 &bdma_chan->free_list, desc_node) { 281 if (async_tx_test_ack(&tx_desc->txd)) { 282 list_del(&tx_desc->desc_node); 283 ret = tx_desc; 284 break; 285 } 286 dev_dbg(bdma_chan->dchan.device->dev, 287 "desc %p not ACKed\n", tx_desc); 288 } 289 290 i = bdma_chan->wr_count_next % bdma_chan->bd_num; 291 if (i == bdma_chan->bd_num - 1) { 292 i = 0; 293 bdma_chan->wr_count_next++; /* skip link descriptor */ 294 } 295 296 bdma_chan->wr_count_next++; 297 tx_desc->txd.phys = bdma_chan->bd_phys + 298 i * sizeof(struct tsi721_dma_desc); 299 tx_desc->hw_desc = &((struct tsi721_dma_desc *)bdma_chan->bd_base)[i]; 300 301 spin_unlock_bh(&bdma_chan->lock); 302 303 return ret; 304 } 305 306 static int 307 tsi721_desc_fill_init(struct tsi721_tx_desc *desc, struct scatterlist *sg, 308 enum dma_rtype rtype, u32 sys_size) 309 { 310 struct tsi721_dma_desc *bd_ptr = desc->hw_desc; 311 u64 rio_addr; 312 313 /* Initialize DMA descriptor */ 314 bd_ptr->type_id = cpu_to_le32((DTYPE1 << 29) | 315 (rtype << 19) | desc->destid); 316 bd_ptr->bcount = cpu_to_le32(((desc->rio_addr & 0x3) << 30) | 317 (sys_size << 26)); 318 rio_addr = (desc->rio_addr >> 2) | 319 ((u64)(desc->rio_addr_u & 0x3) << 62); 320 bd_ptr->raddr_lo = cpu_to_le32(rio_addr & 0xffffffff); 321 bd_ptr->raddr_hi = cpu_to_le32(rio_addr >> 32); 322 bd_ptr->t1.bufptr_lo = cpu_to_le32( 323 (u64)sg_dma_address(sg) & 0xffffffff); 324 bd_ptr->t1.bufptr_hi = cpu_to_le32((u64)sg_dma_address(sg) >> 32); 325 bd_ptr->t1.s_dist = 0; 326 bd_ptr->t1.s_size = 0; 327 328 return 0; 329 } 330 331 static int 332 tsi721_desc_fill_end(struct tsi721_tx_desc *desc) 333 { 334 struct tsi721_dma_desc *bd_ptr = desc->hw_desc; 335 336 /* Update DMA descriptor */ 337 if (desc->interrupt) 338 bd_ptr->type_id |= cpu_to_le32(TSI721_DMAD_IOF); 339 bd_ptr->bcount |= cpu_to_le32(desc->bcount & TSI721_DMAD_BCOUNT1); 340 341 return 0; 342 } 343 344 345 static void tsi721_dma_chain_complete(struct tsi721_bdma_chan *bdma_chan, 346 struct tsi721_tx_desc *desc) 347 { 348 struct dma_async_tx_descriptor *txd = &desc->txd; 349 dma_async_tx_callback callback = txd->callback; 350 void *param = txd->callback_param; 351 352 list_splice_init(&desc->tx_list, &bdma_chan->free_list); 353 list_move(&desc->desc_node, &bdma_chan->free_list); 354 bdma_chan->completed_cookie = txd->cookie; 355 356 if (callback) 357 callback(param); 358 } 359 360 static void tsi721_dma_complete_all(struct tsi721_bdma_chan *bdma_chan) 361 { 362 struct tsi721_tx_desc *desc, *_d; 363 LIST_HEAD(list); 364 365 BUG_ON(!tsi721_dma_is_idle(bdma_chan)); 366 367 if (!list_empty(&bdma_chan->queue)) 368 tsi721_start_dma(bdma_chan); 369 370 list_splice_init(&bdma_chan->active_list, &list); 371 list_splice_init(&bdma_chan->queue, &bdma_chan->active_list); 372 373 list_for_each_entry_safe(desc, _d, &list, desc_node) 374 tsi721_dma_chain_complete(bdma_chan, desc); 375 } 376 377 static void tsi721_clr_stat(struct tsi721_bdma_chan *bdma_chan) 378 { 379 u32 srd_ptr; 380 u64 *sts_ptr; 381 int i, j; 382 383 /* Check and clear descriptor status FIFO entries */ 384 srd_ptr = bdma_chan->sts_rdptr; 385 sts_ptr = bdma_chan->sts_base; 386 j = srd_ptr * 8; 387 while (sts_ptr[j]) { 388 for (i = 0; i < 8 && sts_ptr[j]; i++, j++) 389 sts_ptr[j] = 0; 390 391 ++srd_ptr; 392 srd_ptr %= bdma_chan->sts_size; 393 j = srd_ptr * 8; 394 } 395 396 iowrite32(srd_ptr, bdma_chan->regs + TSI721_DMAC_DSRP); 397 bdma_chan->sts_rdptr = srd_ptr; 398 } 399 400 static void tsi721_advance_work(struct tsi721_bdma_chan *bdma_chan) 401 { 402 if (list_empty(&bdma_chan->active_list) || 403 list_is_singular(&bdma_chan->active_list)) { 404 dev_dbg(bdma_chan->dchan.device->dev, 405 "%s: Active_list empty\n", __func__); 406 tsi721_dma_complete_all(bdma_chan); 407 } else { 408 dev_dbg(bdma_chan->dchan.device->dev, 409 "%s: Active_list NOT empty\n", __func__); 410 tsi721_dma_chain_complete(bdma_chan, 411 tsi721_dma_first_active(bdma_chan)); 412 tsi721_start_dma(bdma_chan); 413 } 414 } 415 416 static void tsi721_dma_tasklet(unsigned long data) 417 { 418 struct tsi721_bdma_chan *bdma_chan = (struct tsi721_bdma_chan *)data; 419 u32 dmac_int, dmac_sts; 420 421 dmac_int = ioread32(bdma_chan->regs + TSI721_DMAC_INT); 422 dev_dbg(bdma_chan->dchan.device->dev, "%s: DMAC%d_INT = 0x%x\n", 423 __func__, bdma_chan->id, dmac_int); 424 /* Clear channel interrupts */ 425 iowrite32(dmac_int, bdma_chan->regs + TSI721_DMAC_INT); 426 427 if (dmac_int & TSI721_DMAC_INT_ERR) { 428 dmac_sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS); 429 dev_err(bdma_chan->dchan.device->dev, 430 "%s: DMA ERROR - DMAC%d_STS = 0x%x\n", 431 __func__, bdma_chan->id, dmac_sts); 432 } 433 434 if (dmac_int & TSI721_DMAC_INT_STFULL) { 435 dev_err(bdma_chan->dchan.device->dev, 436 "%s: DMAC%d descriptor status FIFO is full\n", 437 __func__, bdma_chan->id); 438 } 439 440 if (dmac_int & (TSI721_DMAC_INT_DONE | TSI721_DMAC_INT_IOFDONE)) { 441 tsi721_clr_stat(bdma_chan); 442 spin_lock(&bdma_chan->lock); 443 tsi721_advance_work(bdma_chan); 444 spin_unlock(&bdma_chan->lock); 445 } 446 447 /* Re-Enable BDMA channel interrupts */ 448 iowrite32(TSI721_DMAC_INT_ALL, bdma_chan->regs + TSI721_DMAC_INTE); 449 } 450 451 static dma_cookie_t tsi721_tx_submit(struct dma_async_tx_descriptor *txd) 452 { 453 struct tsi721_tx_desc *desc = to_tsi721_desc(txd); 454 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(txd->chan); 455 dma_cookie_t cookie; 456 457 spin_lock_bh(&bdma_chan->lock); 458 459 cookie = txd->chan->cookie; 460 if (++cookie < 0) 461 cookie = 1; 462 txd->chan->cookie = cookie; 463 txd->cookie = cookie; 464 465 if (list_empty(&bdma_chan->active_list)) { 466 list_add_tail(&desc->desc_node, &bdma_chan->active_list); 467 tsi721_start_dma(bdma_chan); 468 } else { 469 list_add_tail(&desc->desc_node, &bdma_chan->queue); 470 } 471 472 spin_unlock_bh(&bdma_chan->lock); 473 return cookie; 474 } 475 476 static int tsi721_alloc_chan_resources(struct dma_chan *dchan) 477 { 478 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); 479 #ifdef CONFIG_PCI_MSI 480 struct tsi721_device *priv = to_tsi721(dchan->device); 481 #endif 482 struct tsi721_tx_desc *desc = NULL; 483 LIST_HEAD(tmp_list); 484 int i; 485 int rc; 486 487 if (bdma_chan->bd_base) 488 return bdma_chan->bd_num - 1; 489 490 /* Initialize BDMA channel */ 491 if (tsi721_bdma_ch_init(bdma_chan)) { 492 dev_err(dchan->device->dev, "Unable to initialize data DMA" 493 " channel %d, aborting\n", bdma_chan->id); 494 return -ENOMEM; 495 } 496 497 /* Alocate matching number of logical descriptors */ 498 desc = kcalloc((bdma_chan->bd_num - 1), sizeof(struct tsi721_tx_desc), 499 GFP_KERNEL); 500 if (!desc) { 501 dev_err(dchan->device->dev, 502 "Failed to allocate logical descriptors\n"); 503 rc = -ENOMEM; 504 goto err_out; 505 } 506 507 bdma_chan->tx_desc = desc; 508 509 for (i = 0; i < bdma_chan->bd_num - 1; i++) { 510 dma_async_tx_descriptor_init(&desc[i].txd, dchan); 511 desc[i].txd.tx_submit = tsi721_tx_submit; 512 desc[i].txd.flags = DMA_CTRL_ACK; 513 INIT_LIST_HEAD(&desc[i].tx_list); 514 list_add_tail(&desc[i].desc_node, &tmp_list); 515 } 516 517 spin_lock_bh(&bdma_chan->lock); 518 list_splice(&tmp_list, &bdma_chan->free_list); 519 bdma_chan->completed_cookie = dchan->cookie = 1; 520 spin_unlock_bh(&bdma_chan->lock); 521 522 #ifdef CONFIG_PCI_MSI 523 if (priv->flags & TSI721_USING_MSIX) { 524 /* Request interrupt service if we are in MSI-X mode */ 525 rc = request_irq( 526 priv->msix[TSI721_VECT_DMA0_DONE + 527 bdma_chan->id].vector, 528 tsi721_bdma_msix, 0, 529 priv->msix[TSI721_VECT_DMA0_DONE + 530 bdma_chan->id].irq_name, 531 (void *)bdma_chan); 532 533 if (rc) { 534 dev_dbg(dchan->device->dev, 535 "Unable to allocate MSI-X interrupt for " 536 "BDMA%d-DONE\n", bdma_chan->id); 537 goto err_out; 538 } 539 540 rc = request_irq(priv->msix[TSI721_VECT_DMA0_INT + 541 bdma_chan->id].vector, 542 tsi721_bdma_msix, 0, 543 priv->msix[TSI721_VECT_DMA0_INT + 544 bdma_chan->id].irq_name, 545 (void *)bdma_chan); 546 547 if (rc) { 548 dev_dbg(dchan->device->dev, 549 "Unable to allocate MSI-X interrupt for " 550 "BDMA%d-INT\n", bdma_chan->id); 551 free_irq( 552 priv->msix[TSI721_VECT_DMA0_DONE + 553 bdma_chan->id].vector, 554 (void *)bdma_chan); 555 rc = -EIO; 556 goto err_out; 557 } 558 } 559 #endif /* CONFIG_PCI_MSI */ 560 561 bdma_chan->active = true; 562 tsi721_bdma_interrupt_enable(bdma_chan, 1); 563 564 return bdma_chan->bd_num - 1; 565 566 err_out: 567 kfree(desc); 568 tsi721_bdma_ch_free(bdma_chan); 569 return rc; 570 } 571 572 static void tsi721_free_chan_resources(struct dma_chan *dchan) 573 { 574 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); 575 struct tsi721_device *priv = to_tsi721(dchan->device); 576 LIST_HEAD(list); 577 578 dev_dbg(dchan->device->dev, "%s: Entry\n", __func__); 579 580 if (bdma_chan->bd_base == NULL) 581 return; 582 583 BUG_ON(!list_empty(&bdma_chan->active_list)); 584 BUG_ON(!list_empty(&bdma_chan->queue)); 585 586 tsi721_bdma_interrupt_enable(bdma_chan, 0); 587 bdma_chan->active = false; 588 589 #ifdef CONFIG_PCI_MSI 590 if (priv->flags & TSI721_USING_MSIX) { 591 synchronize_irq(priv->msix[TSI721_VECT_DMA0_DONE + 592 bdma_chan->id].vector); 593 synchronize_irq(priv->msix[TSI721_VECT_DMA0_INT + 594 bdma_chan->id].vector); 595 } else 596 #endif 597 synchronize_irq(priv->pdev->irq); 598 599 tasklet_kill(&bdma_chan->tasklet); 600 601 spin_lock_bh(&bdma_chan->lock); 602 list_splice_init(&bdma_chan->free_list, &list); 603 spin_unlock_bh(&bdma_chan->lock); 604 605 #ifdef CONFIG_PCI_MSI 606 if (priv->flags & TSI721_USING_MSIX) { 607 free_irq(priv->msix[TSI721_VECT_DMA0_DONE + 608 bdma_chan->id].vector, (void *)bdma_chan); 609 free_irq(priv->msix[TSI721_VECT_DMA0_INT + 610 bdma_chan->id].vector, (void *)bdma_chan); 611 } 612 #endif /* CONFIG_PCI_MSI */ 613 614 tsi721_bdma_ch_free(bdma_chan); 615 kfree(bdma_chan->tx_desc); 616 } 617 618 static 619 enum dma_status tsi721_tx_status(struct dma_chan *dchan, dma_cookie_t cookie, 620 struct dma_tx_state *txstate) 621 { 622 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); 623 dma_cookie_t last_used; 624 dma_cookie_t last_completed; 625 int ret; 626 627 spin_lock_bh(&bdma_chan->lock); 628 last_completed = bdma_chan->completed_cookie; 629 last_used = dchan->cookie; 630 spin_unlock_bh(&bdma_chan->lock); 631 632 ret = dma_async_is_complete(cookie, last_completed, last_used); 633 634 dma_set_tx_state(txstate, last_completed, last_used, 0); 635 636 dev_dbg(dchan->device->dev, 637 "%s: exit, ret: %d, last_completed: %d, last_used: %d\n", 638 __func__, ret, last_completed, last_used); 639 640 return ret; 641 } 642 643 static void tsi721_issue_pending(struct dma_chan *dchan) 644 { 645 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); 646 647 dev_dbg(dchan->device->dev, "%s: Entry\n", __func__); 648 649 if (tsi721_dma_is_idle(bdma_chan)) { 650 spin_lock_bh(&bdma_chan->lock); 651 tsi721_advance_work(bdma_chan); 652 spin_unlock_bh(&bdma_chan->lock); 653 } else 654 dev_dbg(dchan->device->dev, 655 "%s: DMA channel still busy\n", __func__); 656 } 657 658 static 659 struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan, 660 struct scatterlist *sgl, unsigned int sg_len, 661 enum dma_transfer_direction dir, unsigned long flags, 662 void *tinfo) 663 { 664 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); 665 struct tsi721_tx_desc *desc = NULL; 666 struct tsi721_tx_desc *first = NULL; 667 struct scatterlist *sg; 668 struct rio_dma_ext *rext = tinfo; 669 u64 rio_addr = rext->rio_addr; /* limited to 64-bit rio_addr for now */ 670 unsigned int i; 671 u32 sys_size = dma_to_mport(dchan->device)->sys_size; 672 enum dma_rtype rtype; 673 dma_addr_t next_addr = -1; 674 675 if (!sgl || !sg_len) { 676 dev_err(dchan->device->dev, "%s: No SG list\n", __func__); 677 return NULL; 678 } 679 680 if (dir == DMA_DEV_TO_MEM) 681 rtype = NREAD; 682 else if (dir == DMA_MEM_TO_DEV) { 683 switch (rext->wr_type) { 684 case RDW_ALL_NWRITE: 685 rtype = ALL_NWRITE; 686 break; 687 case RDW_ALL_NWRITE_R: 688 rtype = ALL_NWRITE_R; 689 break; 690 case RDW_LAST_NWRITE_R: 691 default: 692 rtype = LAST_NWRITE_R; 693 break; 694 } 695 } else { 696 dev_err(dchan->device->dev, 697 "%s: Unsupported DMA direction option\n", __func__); 698 return NULL; 699 } 700 701 for_each_sg(sgl, sg, sg_len, i) { 702 int err; 703 704 if (sg_dma_len(sg) > TSI721_BDMA_MAX_BCOUNT) { 705 dev_err(dchan->device->dev, 706 "%s: SG entry %d is too large\n", __func__, i); 707 goto err_desc_put; 708 } 709 710 /* 711 * If this sg entry forms contiguous block with previous one, 712 * try to merge it into existing DMA descriptor 713 */ 714 if (desc) { 715 if (next_addr == sg_dma_address(sg) && 716 desc->bcount + sg_dma_len(sg) <= 717 TSI721_BDMA_MAX_BCOUNT) { 718 /* Adjust byte count of the descriptor */ 719 desc->bcount += sg_dma_len(sg); 720 goto entry_done; 721 } 722 723 /* 724 * Finalize this descriptor using total 725 * byte count value. 726 */ 727 tsi721_desc_fill_end(desc); 728 dev_dbg(dchan->device->dev, "%s: desc final len: %d\n", 729 __func__, desc->bcount); 730 } 731 732 /* 733 * Obtain and initialize a new descriptor 734 */ 735 desc = tsi721_desc_get(bdma_chan); 736 if (!desc) { 737 dev_err(dchan->device->dev, 738 "%s: Failed to get new descriptor for SG %d\n", 739 __func__, i); 740 goto err_desc_put; 741 } 742 743 desc->destid = rext->destid; 744 desc->rio_addr = rio_addr; 745 desc->rio_addr_u = 0; 746 desc->bcount = sg_dma_len(sg); 747 748 dev_dbg(dchan->device->dev, 749 "sg%d desc: 0x%llx, addr: 0x%llx len: %d\n", 750 i, (u64)desc->txd.phys, 751 (unsigned long long)sg_dma_address(sg), 752 sg_dma_len(sg)); 753 754 dev_dbg(dchan->device->dev, 755 "bd_ptr = %p did=%d raddr=0x%llx\n", 756 desc->hw_desc, desc->destid, desc->rio_addr); 757 758 err = tsi721_desc_fill_init(desc, sg, rtype, sys_size); 759 if (err) { 760 dev_err(dchan->device->dev, 761 "Failed to build desc: %d\n", err); 762 goto err_desc_put; 763 } 764 765 next_addr = sg_dma_address(sg); 766 767 if (!first) 768 first = desc; 769 else 770 list_add_tail(&desc->desc_node, &first->tx_list); 771 772 entry_done: 773 if (sg_is_last(sg)) { 774 desc->interrupt = (flags & DMA_PREP_INTERRUPT) != 0; 775 tsi721_desc_fill_end(desc); 776 dev_dbg(dchan->device->dev, "%s: desc final len: %d\n", 777 __func__, desc->bcount); 778 } else { 779 rio_addr += sg_dma_len(sg); 780 next_addr += sg_dma_len(sg); 781 } 782 } 783 784 first->txd.cookie = -EBUSY; 785 desc->txd.flags = flags; 786 787 return &first->txd; 788 789 err_desc_put: 790 tsi721_desc_put(bdma_chan, first); 791 return NULL; 792 } 793 794 static int tsi721_device_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd, 795 unsigned long arg) 796 { 797 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); 798 struct tsi721_tx_desc *desc, *_d; 799 LIST_HEAD(list); 800 801 dev_dbg(dchan->device->dev, "%s: Entry\n", __func__); 802 803 if (cmd != DMA_TERMINATE_ALL) 804 return -ENXIO; 805 806 spin_lock_bh(&bdma_chan->lock); 807 808 /* make sure to stop the transfer */ 809 iowrite32(TSI721_DMAC_CTL_SUSP, bdma_chan->regs + TSI721_DMAC_CTL); 810 811 list_splice_init(&bdma_chan->active_list, &list); 812 list_splice_init(&bdma_chan->queue, &list); 813 814 list_for_each_entry_safe(desc, _d, &list, desc_node) 815 tsi721_dma_chain_complete(bdma_chan, desc); 816 817 spin_unlock_bh(&bdma_chan->lock); 818 819 return 0; 820 } 821 822 int tsi721_register_dma(struct tsi721_device *priv) 823 { 824 int i; 825 int nr_channels = TSI721_DMA_MAXCH; 826 int err; 827 struct rio_mport *mport = priv->mport; 828 829 mport->dma.dev = &priv->pdev->dev; 830 mport->dma.chancnt = nr_channels; 831 832 INIT_LIST_HEAD(&mport->dma.channels); 833 834 for (i = 0; i < nr_channels; i++) { 835 struct tsi721_bdma_chan *bdma_chan = &priv->bdma[i]; 836 837 if (i == TSI721_DMACH_MAINT) 838 continue; 839 840 bdma_chan->bd_num = TSI721_BDMA_BD_RING_SZ; 841 bdma_chan->regs = priv->regs + TSI721_DMAC_BASE(i); 842 843 bdma_chan->dchan.device = &mport->dma; 844 bdma_chan->dchan.cookie = 1; 845 bdma_chan->dchan.chan_id = i; 846 bdma_chan->id = i; 847 bdma_chan->active = false; 848 849 spin_lock_init(&bdma_chan->lock); 850 851 INIT_LIST_HEAD(&bdma_chan->active_list); 852 INIT_LIST_HEAD(&bdma_chan->queue); 853 INIT_LIST_HEAD(&bdma_chan->free_list); 854 855 tasklet_init(&bdma_chan->tasklet, tsi721_dma_tasklet, 856 (unsigned long)bdma_chan); 857 list_add_tail(&bdma_chan->dchan.device_node, 858 &mport->dma.channels); 859 } 860 861 dma_cap_zero(mport->dma.cap_mask); 862 dma_cap_set(DMA_PRIVATE, mport->dma.cap_mask); 863 dma_cap_set(DMA_SLAVE, mport->dma.cap_mask); 864 865 mport->dma.device_alloc_chan_resources = tsi721_alloc_chan_resources; 866 mport->dma.device_free_chan_resources = tsi721_free_chan_resources; 867 mport->dma.device_tx_status = tsi721_tx_status; 868 mport->dma.device_issue_pending = tsi721_issue_pending; 869 mport->dma.device_prep_slave_sg = tsi721_prep_rio_sg; 870 mport->dma.device_control = tsi721_device_control; 871 872 err = dma_async_device_register(&mport->dma); 873 if (err) 874 dev_err(&priv->pdev->dev, "Failed to register DMA device\n"); 875 876 return err; 877 } 878