1 /* 2 * TUSB6010 USB 2.0 OTG Dual Role controller OMAP DMA interface 3 * 4 * Copyright (C) 2006 Nokia Corporation 5 * Tony Lindgren <tony@atomide.com> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/module.h> 12 #include <linux/kernel.h> 13 #include <linux/errno.h> 14 #include <linux/init.h> 15 #include <linux/usb.h> 16 #include <linux/platform_device.h> 17 #include <linux/dma-mapping.h> 18 #include <mach/dma.h> 19 #include <mach/mux.h> 20 21 #include "musb_core.h" 22 23 #define to_chdat(c) ((struct tusb_omap_dma_ch *)(c)->private_data) 24 25 #define MAX_DMAREQ 5 /* REVISIT: Really 6, but req5 not OK */ 26 27 struct tusb_omap_dma_ch { 28 struct musb *musb; 29 void __iomem *tbase; 30 unsigned long phys_offset; 31 int epnum; 32 u8 tx; 33 struct musb_hw_ep *hw_ep; 34 35 int ch; 36 s8 dmareq; 37 s8 sync_dev; 38 39 struct tusb_omap_dma *tusb_dma; 40 41 void __iomem *dma_addr; 42 43 u32 len; 44 u16 packet_sz; 45 u16 transfer_packet_sz; 46 u32 transfer_len; 47 u32 completed_len; 48 }; 49 50 struct tusb_omap_dma { 51 struct dma_controller controller; 52 struct musb *musb; 53 void __iomem *tbase; 54 55 int ch; 56 s8 dmareq; 57 s8 sync_dev; 58 unsigned multichannel:1; 59 }; 60 61 static int tusb_omap_dma_start(struct dma_controller *c) 62 { 63 struct tusb_omap_dma *tusb_dma; 64 65 tusb_dma = container_of(c, struct tusb_omap_dma, controller); 66 67 /* DBG(3, "ep%i ch: %i\n", chdat->epnum, chdat->ch); */ 68 69 return 0; 70 } 71 72 static int tusb_omap_dma_stop(struct dma_controller *c) 73 { 74 struct tusb_omap_dma *tusb_dma; 75 76 tusb_dma = container_of(c, struct tusb_omap_dma, controller); 77 78 /* DBG(3, "ep%i ch: %i\n", chdat->epnum, chdat->ch); */ 79 80 return 0; 81 } 82 83 /* 84 * Allocate dmareq0 to the current channel unless it's already taken 85 */ 86 static inline int tusb_omap_use_shared_dmareq(struct tusb_omap_dma_ch *chdat) 87 { 88 u32 reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP); 89 90 if (reg != 0) { 91 DBG(3, "ep%i dmareq0 is busy for ep%i\n", 92 chdat->epnum, reg & 0xf); 93 return -EAGAIN; 94 } 95 96 if (chdat->tx) 97 reg = (1 << 4) | chdat->epnum; 98 else 99 reg = chdat->epnum; 100 101 musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, reg); 102 103 return 0; 104 } 105 106 static inline void tusb_omap_free_shared_dmareq(struct tusb_omap_dma_ch *chdat) 107 { 108 u32 reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP); 109 110 if ((reg & 0xf) != chdat->epnum) { 111 printk(KERN_ERR "ep%i trying to release dmareq0 for ep%i\n", 112 chdat->epnum, reg & 0xf); 113 return; 114 } 115 musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, 0); 116 } 117 118 /* 119 * See also musb_dma_completion in plat_uds.c and musb_g_[tx|rx]() in 120 * musb_gadget.c. 121 */ 122 static void tusb_omap_dma_cb(int lch, u16 ch_status, void *data) 123 { 124 struct dma_channel *channel = (struct dma_channel *)data; 125 struct tusb_omap_dma_ch *chdat = to_chdat(channel); 126 struct tusb_omap_dma *tusb_dma = chdat->tusb_dma; 127 struct musb *musb = chdat->musb; 128 struct musb_hw_ep *hw_ep = chdat->hw_ep; 129 void __iomem *ep_conf = hw_ep->conf; 130 void __iomem *mbase = musb->mregs; 131 unsigned long remaining, flags, pio; 132 int ch; 133 134 spin_lock_irqsave(&musb->lock, flags); 135 136 if (tusb_dma->multichannel) 137 ch = chdat->ch; 138 else 139 ch = tusb_dma->ch; 140 141 if (ch_status != OMAP_DMA_BLOCK_IRQ) 142 printk(KERN_ERR "TUSB DMA error status: %i\n", ch_status); 143 144 DBG(3, "ep%i %s dma callback ch: %i status: %x\n", 145 chdat->epnum, chdat->tx ? "tx" : "rx", 146 ch, ch_status); 147 148 if (chdat->tx) 149 remaining = musb_readl(ep_conf, TUSB_EP_TX_OFFSET); 150 else 151 remaining = musb_readl(ep_conf, TUSB_EP_RX_OFFSET); 152 153 remaining = TUSB_EP_CONFIG_XFR_SIZE(remaining); 154 155 /* HW issue #10: XFR_SIZE may get corrupt on DMA (both async & sync) */ 156 if (unlikely(remaining > chdat->transfer_len)) { 157 DBG(2, "Corrupt %s dma ch%i XFR_SIZE: 0x%08lx\n", 158 chdat->tx ? "tx" : "rx", chdat->ch, 159 remaining); 160 remaining = 0; 161 } 162 163 channel->actual_len = chdat->transfer_len - remaining; 164 pio = chdat->len - channel->actual_len; 165 166 DBG(3, "DMA remaining %lu/%u\n", remaining, chdat->transfer_len); 167 168 /* Transfer remaining 1 - 31 bytes */ 169 if (pio > 0 && pio < 32) { 170 u8 *buf; 171 172 DBG(3, "Using PIO for remaining %lu bytes\n", pio); 173 buf = phys_to_virt((u32)chdat->dma_addr) + chdat->transfer_len; 174 if (chdat->tx) { 175 dma_cache_maint(phys_to_virt((u32)chdat->dma_addr), 176 chdat->transfer_len, DMA_TO_DEVICE); 177 musb_write_fifo(hw_ep, pio, buf); 178 } else { 179 musb_read_fifo(hw_ep, pio, buf); 180 dma_cache_maint(phys_to_virt((u32)chdat->dma_addr), 181 chdat->transfer_len, DMA_FROM_DEVICE); 182 } 183 channel->actual_len += pio; 184 } 185 186 if (!tusb_dma->multichannel) 187 tusb_omap_free_shared_dmareq(chdat); 188 189 channel->status = MUSB_DMA_STATUS_FREE; 190 191 /* Handle only RX callbacks here. TX callbacks must be handled based 192 * on the TUSB DMA status interrupt. 193 * REVISIT: Use both TUSB DMA status interrupt and OMAP DMA callback 194 * interrupt for RX and TX. 195 */ 196 if (!chdat->tx) 197 musb_dma_completion(musb, chdat->epnum, chdat->tx); 198 199 /* We must terminate short tx transfers manually by setting TXPKTRDY. 200 * REVISIT: This same problem may occur with other MUSB dma as well. 201 * Easy to test with g_ether by pinging the MUSB board with ping -s54. 202 */ 203 if ((chdat->transfer_len < chdat->packet_sz) 204 || (chdat->transfer_len % chdat->packet_sz != 0)) { 205 u16 csr; 206 207 if (chdat->tx) { 208 DBG(3, "terminating short tx packet\n"); 209 musb_ep_select(mbase, chdat->epnum); 210 csr = musb_readw(hw_ep->regs, MUSB_TXCSR); 211 csr |= MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY 212 | MUSB_TXCSR_P_WZC_BITS; 213 musb_writew(hw_ep->regs, MUSB_TXCSR, csr); 214 } 215 } 216 217 spin_unlock_irqrestore(&musb->lock, flags); 218 } 219 220 static int tusb_omap_dma_program(struct dma_channel *channel, u16 packet_sz, 221 u8 rndis_mode, dma_addr_t dma_addr, u32 len) 222 { 223 struct tusb_omap_dma_ch *chdat = to_chdat(channel); 224 struct tusb_omap_dma *tusb_dma = chdat->tusb_dma; 225 struct musb *musb = chdat->musb; 226 struct musb_hw_ep *hw_ep = chdat->hw_ep; 227 void __iomem *mbase = musb->mregs; 228 void __iomem *ep_conf = hw_ep->conf; 229 dma_addr_t fifo = hw_ep->fifo_sync; 230 struct omap_dma_channel_params dma_params; 231 u32 dma_remaining; 232 int src_burst, dst_burst; 233 u16 csr; 234 int ch; 235 s8 dmareq; 236 s8 sync_dev; 237 238 if (unlikely(dma_addr & 0x1) || (len < 32) || (len > packet_sz)) 239 return false; 240 241 /* 242 * HW issue #10: Async dma will eventually corrupt the XFR_SIZE 243 * register which will cause missed DMA interrupt. We could try to 244 * use a timer for the callback, but it is unsafe as the XFR_SIZE 245 * register is corrupt, and we won't know if the DMA worked. 246 */ 247 if (dma_addr & 0x2) 248 return false; 249 250 /* 251 * Because of HW issue #10, it seems like mixing sync DMA and async 252 * PIO access can confuse the DMA. Make sure XFR_SIZE is reset before 253 * using the channel for DMA. 254 */ 255 if (chdat->tx) 256 dma_remaining = musb_readl(ep_conf, TUSB_EP_TX_OFFSET); 257 else 258 dma_remaining = musb_readl(ep_conf, TUSB_EP_RX_OFFSET); 259 260 dma_remaining = TUSB_EP_CONFIG_XFR_SIZE(dma_remaining); 261 if (dma_remaining) { 262 DBG(2, "Busy %s dma ch%i, not using: %08x\n", 263 chdat->tx ? "tx" : "rx", chdat->ch, 264 dma_remaining); 265 return false; 266 } 267 268 chdat->transfer_len = len & ~0x1f; 269 270 if (len < packet_sz) 271 chdat->transfer_packet_sz = chdat->transfer_len; 272 else 273 chdat->transfer_packet_sz = packet_sz; 274 275 if (tusb_dma->multichannel) { 276 ch = chdat->ch; 277 dmareq = chdat->dmareq; 278 sync_dev = chdat->sync_dev; 279 } else { 280 if (tusb_omap_use_shared_dmareq(chdat) != 0) { 281 DBG(3, "could not get dma for ep%i\n", chdat->epnum); 282 return false; 283 } 284 if (tusb_dma->ch < 0) { 285 /* REVISIT: This should get blocked earlier, happens 286 * with MSC ErrorRecoveryTest 287 */ 288 WARN_ON(1); 289 return false; 290 } 291 292 ch = tusb_dma->ch; 293 dmareq = tusb_dma->dmareq; 294 sync_dev = tusb_dma->sync_dev; 295 omap_set_dma_callback(ch, tusb_omap_dma_cb, channel); 296 } 297 298 chdat->packet_sz = packet_sz; 299 chdat->len = len; 300 channel->actual_len = 0; 301 chdat->dma_addr = (void __iomem *)dma_addr; 302 channel->status = MUSB_DMA_STATUS_BUSY; 303 304 /* Since we're recycling dma areas, we need to clean or invalidate */ 305 if (chdat->tx) 306 dma_cache_maint(phys_to_virt(dma_addr), len, DMA_TO_DEVICE); 307 else 308 dma_cache_maint(phys_to_virt(dma_addr), len, DMA_FROM_DEVICE); 309 310 /* Use 16-bit transfer if dma_addr is not 32-bit aligned */ 311 if ((dma_addr & 0x3) == 0) { 312 dma_params.data_type = OMAP_DMA_DATA_TYPE_S32; 313 dma_params.elem_count = 8; /* Elements in frame */ 314 } else { 315 dma_params.data_type = OMAP_DMA_DATA_TYPE_S16; 316 dma_params.elem_count = 16; /* Elements in frame */ 317 fifo = hw_ep->fifo_async; 318 } 319 320 dma_params.frame_count = chdat->transfer_len / 32; /* Burst sz frame */ 321 322 DBG(3, "ep%i %s dma ch%i dma: %08x len: %u(%u) packet_sz: %i(%i)\n", 323 chdat->epnum, chdat->tx ? "tx" : "rx", 324 ch, dma_addr, chdat->transfer_len, len, 325 chdat->transfer_packet_sz, packet_sz); 326 327 /* 328 * Prepare omap DMA for transfer 329 */ 330 if (chdat->tx) { 331 dma_params.src_amode = OMAP_DMA_AMODE_POST_INC; 332 dma_params.src_start = (unsigned long)dma_addr; 333 dma_params.src_ei = 0; 334 dma_params.src_fi = 0; 335 336 dma_params.dst_amode = OMAP_DMA_AMODE_DOUBLE_IDX; 337 dma_params.dst_start = (unsigned long)fifo; 338 dma_params.dst_ei = 1; 339 dma_params.dst_fi = -31; /* Loop 32 byte window */ 340 341 dma_params.trigger = sync_dev; 342 dma_params.sync_mode = OMAP_DMA_SYNC_FRAME; 343 dma_params.src_or_dst_synch = 0; /* Dest sync */ 344 345 src_burst = OMAP_DMA_DATA_BURST_16; /* 16x32 read */ 346 dst_burst = OMAP_DMA_DATA_BURST_8; /* 8x32 write */ 347 } else { 348 dma_params.src_amode = OMAP_DMA_AMODE_DOUBLE_IDX; 349 dma_params.src_start = (unsigned long)fifo; 350 dma_params.src_ei = 1; 351 dma_params.src_fi = -31; /* Loop 32 byte window */ 352 353 dma_params.dst_amode = OMAP_DMA_AMODE_POST_INC; 354 dma_params.dst_start = (unsigned long)dma_addr; 355 dma_params.dst_ei = 0; 356 dma_params.dst_fi = 0; 357 358 dma_params.trigger = sync_dev; 359 dma_params.sync_mode = OMAP_DMA_SYNC_FRAME; 360 dma_params.src_or_dst_synch = 1; /* Source sync */ 361 362 src_burst = OMAP_DMA_DATA_BURST_8; /* 8x32 read */ 363 dst_burst = OMAP_DMA_DATA_BURST_16; /* 16x32 write */ 364 } 365 366 DBG(3, "ep%i %s using %i-bit %s dma from 0x%08lx to 0x%08lx\n", 367 chdat->epnum, chdat->tx ? "tx" : "rx", 368 (dma_params.data_type == OMAP_DMA_DATA_TYPE_S32) ? 32 : 16, 369 ((dma_addr & 0x3) == 0) ? "sync" : "async", 370 dma_params.src_start, dma_params.dst_start); 371 372 omap_set_dma_params(ch, &dma_params); 373 omap_set_dma_src_burst_mode(ch, src_burst); 374 omap_set_dma_dest_burst_mode(ch, dst_burst); 375 omap_set_dma_write_mode(ch, OMAP_DMA_WRITE_LAST_NON_POSTED); 376 377 /* 378 * Prepare MUSB for DMA transfer 379 */ 380 if (chdat->tx) { 381 musb_ep_select(mbase, chdat->epnum); 382 csr = musb_readw(hw_ep->regs, MUSB_TXCSR); 383 csr |= (MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB 384 | MUSB_TXCSR_DMAMODE | MUSB_TXCSR_MODE); 385 csr &= ~MUSB_TXCSR_P_UNDERRUN; 386 musb_writew(hw_ep->regs, MUSB_TXCSR, csr); 387 } else { 388 musb_ep_select(mbase, chdat->epnum); 389 csr = musb_readw(hw_ep->regs, MUSB_RXCSR); 390 csr |= MUSB_RXCSR_DMAENAB; 391 csr &= ~(MUSB_RXCSR_AUTOCLEAR | MUSB_RXCSR_DMAMODE); 392 musb_writew(hw_ep->regs, MUSB_RXCSR, 393 csr | MUSB_RXCSR_P_WZC_BITS); 394 } 395 396 /* 397 * Start DMA transfer 398 */ 399 omap_start_dma(ch); 400 401 if (chdat->tx) { 402 /* Send transfer_packet_sz packets at a time */ 403 musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET, 404 chdat->transfer_packet_sz); 405 406 musb_writel(ep_conf, TUSB_EP_TX_OFFSET, 407 TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len)); 408 } else { 409 /* Receive transfer_packet_sz packets at a time */ 410 musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET, 411 chdat->transfer_packet_sz << 16); 412 413 musb_writel(ep_conf, TUSB_EP_RX_OFFSET, 414 TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len)); 415 } 416 417 return true; 418 } 419 420 static int tusb_omap_dma_abort(struct dma_channel *channel) 421 { 422 struct tusb_omap_dma_ch *chdat = to_chdat(channel); 423 struct tusb_omap_dma *tusb_dma = chdat->tusb_dma; 424 425 if (!tusb_dma->multichannel) { 426 if (tusb_dma->ch >= 0) { 427 omap_stop_dma(tusb_dma->ch); 428 omap_free_dma(tusb_dma->ch); 429 tusb_dma->ch = -1; 430 } 431 432 tusb_dma->dmareq = -1; 433 tusb_dma->sync_dev = -1; 434 } 435 436 channel->status = MUSB_DMA_STATUS_FREE; 437 438 return 0; 439 } 440 441 static inline int tusb_omap_dma_allocate_dmareq(struct tusb_omap_dma_ch *chdat) 442 { 443 u32 reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP); 444 int i, dmareq_nr = -1; 445 446 const int sync_dev[6] = { 447 OMAP24XX_DMA_EXT_DMAREQ0, 448 OMAP24XX_DMA_EXT_DMAREQ1, 449 OMAP242X_DMA_EXT_DMAREQ2, 450 OMAP242X_DMA_EXT_DMAREQ3, 451 OMAP242X_DMA_EXT_DMAREQ4, 452 OMAP242X_DMA_EXT_DMAREQ5, 453 }; 454 455 for (i = 0; i < MAX_DMAREQ; i++) { 456 int cur = (reg & (0xf << (i * 5))) >> (i * 5); 457 if (cur == 0) { 458 dmareq_nr = i; 459 break; 460 } 461 } 462 463 if (dmareq_nr == -1) 464 return -EAGAIN; 465 466 reg |= (chdat->epnum << (dmareq_nr * 5)); 467 if (chdat->tx) 468 reg |= ((1 << 4) << (dmareq_nr * 5)); 469 musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, reg); 470 471 chdat->dmareq = dmareq_nr; 472 chdat->sync_dev = sync_dev[chdat->dmareq]; 473 474 return 0; 475 } 476 477 static inline void tusb_omap_dma_free_dmareq(struct tusb_omap_dma_ch *chdat) 478 { 479 u32 reg; 480 481 if (!chdat || chdat->dmareq < 0) 482 return; 483 484 reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP); 485 reg &= ~(0x1f << (chdat->dmareq * 5)); 486 musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, reg); 487 488 chdat->dmareq = -1; 489 chdat->sync_dev = -1; 490 } 491 492 static struct dma_channel *dma_channel_pool[MAX_DMAREQ]; 493 494 static struct dma_channel * 495 tusb_omap_dma_allocate(struct dma_controller *c, 496 struct musb_hw_ep *hw_ep, 497 u8 tx) 498 { 499 int ret, i; 500 const char *dev_name; 501 struct tusb_omap_dma *tusb_dma; 502 struct musb *musb; 503 void __iomem *tbase; 504 struct dma_channel *channel = NULL; 505 struct tusb_omap_dma_ch *chdat = NULL; 506 u32 reg; 507 508 tusb_dma = container_of(c, struct tusb_omap_dma, controller); 509 musb = tusb_dma->musb; 510 tbase = musb->ctrl_base; 511 512 reg = musb_readl(tbase, TUSB_DMA_INT_MASK); 513 if (tx) 514 reg &= ~(1 << hw_ep->epnum); 515 else 516 reg &= ~(1 << (hw_ep->epnum + 15)); 517 musb_writel(tbase, TUSB_DMA_INT_MASK, reg); 518 519 /* REVISIT: Why does dmareq5 not work? */ 520 if (hw_ep->epnum == 0) { 521 DBG(3, "Not allowing DMA for ep0 %s\n", tx ? "tx" : "rx"); 522 return NULL; 523 } 524 525 for (i = 0; i < MAX_DMAREQ; i++) { 526 struct dma_channel *ch = dma_channel_pool[i]; 527 if (ch->status == MUSB_DMA_STATUS_UNKNOWN) { 528 ch->status = MUSB_DMA_STATUS_FREE; 529 channel = ch; 530 chdat = ch->private_data; 531 break; 532 } 533 } 534 535 if (!channel) 536 return NULL; 537 538 if (tx) { 539 chdat->tx = 1; 540 dev_name = "TUSB transmit"; 541 } else { 542 chdat->tx = 0; 543 dev_name = "TUSB receive"; 544 } 545 546 chdat->musb = tusb_dma->musb; 547 chdat->tbase = tusb_dma->tbase; 548 chdat->hw_ep = hw_ep; 549 chdat->epnum = hw_ep->epnum; 550 chdat->dmareq = -1; 551 chdat->completed_len = 0; 552 chdat->tusb_dma = tusb_dma; 553 554 channel->max_len = 0x7fffffff; 555 channel->desired_mode = 0; 556 channel->actual_len = 0; 557 558 if (tusb_dma->multichannel) { 559 ret = tusb_omap_dma_allocate_dmareq(chdat); 560 if (ret != 0) 561 goto free_dmareq; 562 563 ret = omap_request_dma(chdat->sync_dev, dev_name, 564 tusb_omap_dma_cb, channel, &chdat->ch); 565 if (ret != 0) 566 goto free_dmareq; 567 } else if (tusb_dma->ch == -1) { 568 tusb_dma->dmareq = 0; 569 tusb_dma->sync_dev = OMAP24XX_DMA_EXT_DMAREQ0; 570 571 /* Callback data gets set later in the shared dmareq case */ 572 ret = omap_request_dma(tusb_dma->sync_dev, "TUSB shared", 573 tusb_omap_dma_cb, NULL, &tusb_dma->ch); 574 if (ret != 0) 575 goto free_dmareq; 576 577 chdat->dmareq = -1; 578 chdat->ch = -1; 579 } 580 581 DBG(3, "ep%i %s dma: %s dma%i dmareq%i sync%i\n", 582 chdat->epnum, 583 chdat->tx ? "tx" : "rx", 584 chdat->ch >= 0 ? "dedicated" : "shared", 585 chdat->ch >= 0 ? chdat->ch : tusb_dma->ch, 586 chdat->dmareq >= 0 ? chdat->dmareq : tusb_dma->dmareq, 587 chdat->sync_dev >= 0 ? chdat->sync_dev : tusb_dma->sync_dev); 588 589 return channel; 590 591 free_dmareq: 592 tusb_omap_dma_free_dmareq(chdat); 593 594 DBG(3, "ep%i: Could not get a DMA channel\n", chdat->epnum); 595 channel->status = MUSB_DMA_STATUS_UNKNOWN; 596 597 return NULL; 598 } 599 600 static void tusb_omap_dma_release(struct dma_channel *channel) 601 { 602 struct tusb_omap_dma_ch *chdat = to_chdat(channel); 603 struct musb *musb = chdat->musb; 604 void __iomem *tbase = musb->ctrl_base; 605 u32 reg; 606 607 DBG(3, "ep%i ch%i\n", chdat->epnum, chdat->ch); 608 609 reg = musb_readl(tbase, TUSB_DMA_INT_MASK); 610 if (chdat->tx) 611 reg |= (1 << chdat->epnum); 612 else 613 reg |= (1 << (chdat->epnum + 15)); 614 musb_writel(tbase, TUSB_DMA_INT_MASK, reg); 615 616 reg = musb_readl(tbase, TUSB_DMA_INT_CLEAR); 617 if (chdat->tx) 618 reg |= (1 << chdat->epnum); 619 else 620 reg |= (1 << (chdat->epnum + 15)); 621 musb_writel(tbase, TUSB_DMA_INT_CLEAR, reg); 622 623 channel->status = MUSB_DMA_STATUS_UNKNOWN; 624 625 if (chdat->ch >= 0) { 626 omap_stop_dma(chdat->ch); 627 omap_free_dma(chdat->ch); 628 chdat->ch = -1; 629 } 630 631 if (chdat->dmareq >= 0) 632 tusb_omap_dma_free_dmareq(chdat); 633 634 channel = NULL; 635 } 636 637 void dma_controller_destroy(struct dma_controller *c) 638 { 639 struct tusb_omap_dma *tusb_dma; 640 int i; 641 642 tusb_dma = container_of(c, struct tusb_omap_dma, controller); 643 for (i = 0; i < MAX_DMAREQ; i++) { 644 struct dma_channel *ch = dma_channel_pool[i]; 645 if (ch) { 646 kfree(ch->private_data); 647 kfree(ch); 648 } 649 } 650 651 if (!tusb_dma->multichannel && tusb_dma && tusb_dma->ch >= 0) 652 omap_free_dma(tusb_dma->ch); 653 654 kfree(tusb_dma); 655 } 656 657 struct dma_controller *__init 658 dma_controller_create(struct musb *musb, void __iomem *base) 659 { 660 void __iomem *tbase = musb->ctrl_base; 661 struct tusb_omap_dma *tusb_dma; 662 int i; 663 664 /* REVISIT: Get dmareq lines used from board-*.c */ 665 666 musb_writel(musb->ctrl_base, TUSB_DMA_INT_MASK, 0x7fffffff); 667 musb_writel(musb->ctrl_base, TUSB_DMA_EP_MAP, 0); 668 669 musb_writel(tbase, TUSB_DMA_REQ_CONF, 670 TUSB_DMA_REQ_CONF_BURST_SIZE(2) 671 | TUSB_DMA_REQ_CONF_DMA_REQ_EN(0x3f) 672 | TUSB_DMA_REQ_CONF_DMA_REQ_ASSER(2)); 673 674 tusb_dma = kzalloc(sizeof(struct tusb_omap_dma), GFP_KERNEL); 675 if (!tusb_dma) 676 goto cleanup; 677 678 tusb_dma->musb = musb; 679 tusb_dma->tbase = musb->ctrl_base; 680 681 tusb_dma->ch = -1; 682 tusb_dma->dmareq = -1; 683 tusb_dma->sync_dev = -1; 684 685 tusb_dma->controller.start = tusb_omap_dma_start; 686 tusb_dma->controller.stop = tusb_omap_dma_stop; 687 tusb_dma->controller.channel_alloc = tusb_omap_dma_allocate; 688 tusb_dma->controller.channel_release = tusb_omap_dma_release; 689 tusb_dma->controller.channel_program = tusb_omap_dma_program; 690 tusb_dma->controller.channel_abort = tusb_omap_dma_abort; 691 692 if (tusb_get_revision(musb) >= TUSB_REV_30) 693 tusb_dma->multichannel = 1; 694 695 for (i = 0; i < MAX_DMAREQ; i++) { 696 struct dma_channel *ch; 697 struct tusb_omap_dma_ch *chdat; 698 699 ch = kzalloc(sizeof(struct dma_channel), GFP_KERNEL); 700 if (!ch) 701 goto cleanup; 702 703 dma_channel_pool[i] = ch; 704 705 chdat = kzalloc(sizeof(struct tusb_omap_dma_ch), GFP_KERNEL); 706 if (!chdat) 707 goto cleanup; 708 709 ch->status = MUSB_DMA_STATUS_UNKNOWN; 710 ch->private_data = chdat; 711 } 712 713 return &tusb_dma->controller; 714 715 cleanup: 716 dma_controller_destroy(&tusb_dma->controller); 717 718 return NULL; 719 } 720