1 #include <linux/device.h> 2 #include <linux/dma-mapping.h> 3 #include <linux/dmaengine.h> 4 #include <linux/sizes.h> 5 #include <linux/platform_device.h> 6 #include <linux/of.h> 7 8 #include "musb_core.h" 9 10 #define RNDIS_REG(x) (0x80 + ((x - 1) * 4)) 11 12 #define EP_MODE_AUTOREG_NONE 0 13 #define EP_MODE_AUTOREG_ALL_NEOP 1 14 #define EP_MODE_AUTOREG_ALWAYS 3 15 16 #define EP_MODE_DMA_TRANSPARENT 0 17 #define EP_MODE_DMA_RNDIS 1 18 #define EP_MODE_DMA_GEN_RNDIS 3 19 20 #define USB_CTRL_TX_MODE 0x70 21 #define USB_CTRL_RX_MODE 0x74 22 #define USB_CTRL_AUTOREQ 0xd0 23 #define USB_TDOWN 0xd8 24 25 struct cppi41_dma_channel { 26 struct dma_channel channel; 27 struct cppi41_dma_controller *controller; 28 struct musb_hw_ep *hw_ep; 29 struct dma_chan *dc; 30 dma_cookie_t cookie; 31 u8 port_num; 32 u8 is_tx; 33 u8 is_allocated; 34 u8 usb_toggle; 35 36 dma_addr_t buf_addr; 37 u32 total_len; 38 u32 prog_len; 39 u32 transferred; 40 u32 packet_sz; 41 struct list_head tx_check; 42 struct work_struct dma_completion; 43 }; 44 45 #define MUSB_DMA_NUM_CHANNELS 15 46 47 struct cppi41_dma_controller { 48 struct dma_controller controller; 49 struct cppi41_dma_channel rx_channel[MUSB_DMA_NUM_CHANNELS]; 50 struct cppi41_dma_channel tx_channel[MUSB_DMA_NUM_CHANNELS]; 51 struct musb *musb; 52 struct hrtimer early_tx; 53 struct list_head early_tx_list; 54 u32 rx_mode; 55 u32 tx_mode; 56 u32 auto_req; 57 }; 58 59 static void save_rx_toggle(struct cppi41_dma_channel *cppi41_channel) 60 { 61 u16 csr; 62 u8 toggle; 63 64 if (cppi41_channel->is_tx) 65 return; 66 if (!is_host_active(cppi41_channel->controller->musb)) 67 return; 68 69 csr = musb_readw(cppi41_channel->hw_ep->regs, MUSB_RXCSR); 70 toggle = csr & MUSB_RXCSR_H_DATATOGGLE ? 1 : 0; 71 72 cppi41_channel->usb_toggle = toggle; 73 } 74 75 static void update_rx_toggle(struct cppi41_dma_channel *cppi41_channel) 76 { 77 u16 csr; 78 u8 toggle; 79 80 if (cppi41_channel->is_tx) 81 return; 82 if (!is_host_active(cppi41_channel->controller->musb)) 83 return; 84 85 csr = musb_readw(cppi41_channel->hw_ep->regs, MUSB_RXCSR); 86 toggle = csr & MUSB_RXCSR_H_DATATOGGLE ? 1 : 0; 87 88 /* 89 * AM335x Advisory 1.0.13: Due to internal synchronisation error the 90 * data toggle may reset from DATA1 to DATA0 during receiving data from 91 * more than one endpoint. 92 */ 93 if (!toggle && toggle == cppi41_channel->usb_toggle) { 94 csr |= MUSB_RXCSR_H_DATATOGGLE | MUSB_RXCSR_H_WR_DATATOGGLE; 95 musb_writew(cppi41_channel->hw_ep->regs, MUSB_RXCSR, csr); 96 dev_dbg(cppi41_channel->controller->musb->controller, 97 "Restoring DATA1 toggle.\n"); 98 } 99 100 cppi41_channel->usb_toggle = toggle; 101 } 102 103 static bool musb_is_tx_fifo_empty(struct musb_hw_ep *hw_ep) 104 { 105 u8 epnum = hw_ep->epnum; 106 struct musb *musb = hw_ep->musb; 107 void __iomem *epio = musb->endpoints[epnum].regs; 108 u16 csr; 109 110 csr = musb_readw(epio, MUSB_TXCSR); 111 if (csr & MUSB_TXCSR_TXPKTRDY) 112 return false; 113 return true; 114 } 115 116 static bool is_isoc(struct musb_hw_ep *hw_ep, bool in) 117 { 118 if (in && hw_ep->in_qh) { 119 if (hw_ep->in_qh->type == USB_ENDPOINT_XFER_ISOC) 120 return true; 121 } else if (hw_ep->out_qh) { 122 if (hw_ep->out_qh->type == USB_ENDPOINT_XFER_ISOC) 123 return true; 124 } 125 return false; 126 } 127 128 static void cppi41_dma_callback(void *private_data); 129 130 static void cppi41_trans_done(struct cppi41_dma_channel *cppi41_channel) 131 { 132 struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep; 133 struct musb *musb = hw_ep->musb; 134 135 if (!cppi41_channel->prog_len || 136 (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE)) { 137 138 /* done, complete */ 139 cppi41_channel->channel.actual_len = 140 cppi41_channel->transferred; 141 cppi41_channel->channel.status = MUSB_DMA_STATUS_FREE; 142 musb_dma_completion(musb, hw_ep->epnum, cppi41_channel->is_tx); 143 } else { 144 /* next iteration, reload */ 145 struct dma_chan *dc = cppi41_channel->dc; 146 struct dma_async_tx_descriptor *dma_desc; 147 enum dma_transfer_direction direction; 148 u16 csr; 149 u32 remain_bytes; 150 void __iomem *epio = cppi41_channel->hw_ep->regs; 151 152 cppi41_channel->buf_addr += cppi41_channel->packet_sz; 153 154 remain_bytes = cppi41_channel->total_len; 155 remain_bytes -= cppi41_channel->transferred; 156 remain_bytes = min(remain_bytes, cppi41_channel->packet_sz); 157 cppi41_channel->prog_len = remain_bytes; 158 159 direction = cppi41_channel->is_tx ? DMA_MEM_TO_DEV 160 : DMA_DEV_TO_MEM; 161 dma_desc = dmaengine_prep_slave_single(dc, 162 cppi41_channel->buf_addr, 163 remain_bytes, 164 direction, 165 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 166 if (WARN_ON(!dma_desc)) 167 return; 168 169 dma_desc->callback = cppi41_dma_callback; 170 dma_desc->callback_param = &cppi41_channel->channel; 171 cppi41_channel->cookie = dma_desc->tx_submit(dma_desc); 172 dma_async_issue_pending(dc); 173 174 if (!cppi41_channel->is_tx) { 175 csr = musb_readw(epio, MUSB_RXCSR); 176 csr |= MUSB_RXCSR_H_REQPKT; 177 musb_writew(epio, MUSB_RXCSR, csr); 178 } 179 } 180 } 181 182 static void cppi_trans_done_work(struct work_struct *work) 183 { 184 unsigned long flags; 185 struct cppi41_dma_channel *cppi41_channel = 186 container_of(work, struct cppi41_dma_channel, dma_completion); 187 struct cppi41_dma_controller *controller = cppi41_channel->controller; 188 struct musb *musb = controller->musb; 189 struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep; 190 bool empty; 191 192 if (!cppi41_channel->is_tx && is_isoc(hw_ep, 1)) { 193 spin_lock_irqsave(&musb->lock, flags); 194 cppi41_trans_done(cppi41_channel); 195 spin_unlock_irqrestore(&musb->lock, flags); 196 } else { 197 empty = musb_is_tx_fifo_empty(hw_ep); 198 if (empty) { 199 spin_lock_irqsave(&musb->lock, flags); 200 cppi41_trans_done(cppi41_channel); 201 spin_unlock_irqrestore(&musb->lock, flags); 202 } else { 203 schedule_work(&cppi41_channel->dma_completion); 204 } 205 } 206 } 207 208 static enum hrtimer_restart cppi41_recheck_tx_req(struct hrtimer *timer) 209 { 210 struct cppi41_dma_controller *controller; 211 struct cppi41_dma_channel *cppi41_channel, *n; 212 struct musb *musb; 213 unsigned long flags; 214 enum hrtimer_restart ret = HRTIMER_NORESTART; 215 216 controller = container_of(timer, struct cppi41_dma_controller, 217 early_tx); 218 musb = controller->musb; 219 220 spin_lock_irqsave(&musb->lock, flags); 221 list_for_each_entry_safe(cppi41_channel, n, &controller->early_tx_list, 222 tx_check) { 223 bool empty; 224 struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep; 225 226 empty = musb_is_tx_fifo_empty(hw_ep); 227 if (empty) { 228 list_del_init(&cppi41_channel->tx_check); 229 cppi41_trans_done(cppi41_channel); 230 } 231 } 232 233 if (!list_empty(&controller->early_tx_list)) { 234 ret = HRTIMER_RESTART; 235 hrtimer_forward_now(&controller->early_tx, 236 ktime_set(0, 150 * NSEC_PER_USEC)); 237 } 238 239 spin_unlock_irqrestore(&musb->lock, flags); 240 return ret; 241 } 242 243 static void cppi41_dma_callback(void *private_data) 244 { 245 struct dma_channel *channel = private_data; 246 struct cppi41_dma_channel *cppi41_channel = channel->private_data; 247 struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep; 248 struct musb *musb = hw_ep->musb; 249 unsigned long flags; 250 struct dma_tx_state txstate; 251 u32 transferred; 252 bool empty; 253 254 spin_lock_irqsave(&musb->lock, flags); 255 256 dmaengine_tx_status(cppi41_channel->dc, cppi41_channel->cookie, 257 &txstate); 258 transferred = cppi41_channel->prog_len - txstate.residue; 259 cppi41_channel->transferred += transferred; 260 261 dev_dbg(musb->controller, "DMA transfer done on hw_ep=%d bytes=%d/%d\n", 262 hw_ep->epnum, cppi41_channel->transferred, 263 cppi41_channel->total_len); 264 265 update_rx_toggle(cppi41_channel); 266 267 if (cppi41_channel->transferred == cppi41_channel->total_len || 268 transferred < cppi41_channel->packet_sz) 269 cppi41_channel->prog_len = 0; 270 271 if (!cppi41_channel->is_tx) { 272 if (is_isoc(hw_ep, 1)) 273 schedule_work(&cppi41_channel->dma_completion); 274 else 275 cppi41_trans_done(cppi41_channel); 276 goto out; 277 } 278 279 empty = musb_is_tx_fifo_empty(hw_ep); 280 if (empty) { 281 cppi41_trans_done(cppi41_channel); 282 } else { 283 struct cppi41_dma_controller *controller; 284 /* 285 * On AM335x it has been observed that the TX interrupt fires 286 * too early that means the TXFIFO is not yet empty but the DMA 287 * engine says that it is done with the transfer. We don't 288 * receive a FIFO empty interrupt so the only thing we can do is 289 * to poll for the bit. On HS it usually takes 2us, on FS around 290 * 110us - 150us depending on the transfer size. 291 * We spin on HS (no longer than than 25us and setup a timer on 292 * FS to check for the bit and complete the transfer. 293 */ 294 controller = cppi41_channel->controller; 295 296 if (musb->g.speed == USB_SPEED_HIGH) { 297 unsigned wait = 25; 298 299 do { 300 empty = musb_is_tx_fifo_empty(hw_ep); 301 if (empty) 302 break; 303 wait--; 304 if (!wait) 305 break; 306 udelay(1); 307 } while (1); 308 309 empty = musb_is_tx_fifo_empty(hw_ep); 310 if (empty) { 311 cppi41_trans_done(cppi41_channel); 312 goto out; 313 } 314 } 315 if (is_isoc(hw_ep, 0)) { 316 schedule_work(&cppi41_channel->dma_completion); 317 goto out; 318 } 319 list_add_tail(&cppi41_channel->tx_check, 320 &controller->early_tx_list); 321 if (!hrtimer_active(&controller->early_tx)) { 322 hrtimer_start_range_ns(&controller->early_tx, 323 ktime_set(0, 140 * NSEC_PER_USEC), 324 40 * NSEC_PER_USEC, 325 HRTIMER_MODE_REL); 326 } 327 } 328 out: 329 spin_unlock_irqrestore(&musb->lock, flags); 330 } 331 332 static u32 update_ep_mode(unsigned ep, unsigned mode, u32 old) 333 { 334 unsigned shift; 335 336 shift = (ep - 1) * 2; 337 old &= ~(3 << shift); 338 old |= mode << shift; 339 return old; 340 } 341 342 static void cppi41_set_dma_mode(struct cppi41_dma_channel *cppi41_channel, 343 unsigned mode) 344 { 345 struct cppi41_dma_controller *controller = cppi41_channel->controller; 346 u32 port; 347 u32 new_mode; 348 u32 old_mode; 349 350 if (cppi41_channel->is_tx) 351 old_mode = controller->tx_mode; 352 else 353 old_mode = controller->rx_mode; 354 port = cppi41_channel->port_num; 355 new_mode = update_ep_mode(port, mode, old_mode); 356 357 if (new_mode == old_mode) 358 return; 359 if (cppi41_channel->is_tx) { 360 controller->tx_mode = new_mode; 361 musb_writel(controller->musb->ctrl_base, USB_CTRL_TX_MODE, 362 new_mode); 363 } else { 364 controller->rx_mode = new_mode; 365 musb_writel(controller->musb->ctrl_base, USB_CTRL_RX_MODE, 366 new_mode); 367 } 368 } 369 370 static void cppi41_set_autoreq_mode(struct cppi41_dma_channel *cppi41_channel, 371 unsigned mode) 372 { 373 struct cppi41_dma_controller *controller = cppi41_channel->controller; 374 u32 port; 375 u32 new_mode; 376 u32 old_mode; 377 378 old_mode = controller->auto_req; 379 port = cppi41_channel->port_num; 380 new_mode = update_ep_mode(port, mode, old_mode); 381 382 if (new_mode == old_mode) 383 return; 384 controller->auto_req = new_mode; 385 musb_writel(controller->musb->ctrl_base, USB_CTRL_AUTOREQ, new_mode); 386 } 387 388 static bool cppi41_configure_channel(struct dma_channel *channel, 389 u16 packet_sz, u8 mode, 390 dma_addr_t dma_addr, u32 len) 391 { 392 struct cppi41_dma_channel *cppi41_channel = channel->private_data; 393 struct dma_chan *dc = cppi41_channel->dc; 394 struct dma_async_tx_descriptor *dma_desc; 395 enum dma_transfer_direction direction; 396 struct musb *musb = cppi41_channel->controller->musb; 397 unsigned use_gen_rndis = 0; 398 399 dev_dbg(musb->controller, 400 "configure ep%d/%x packet_sz=%d, mode=%d, dma_addr=0x%llx, len=%d is_tx=%d\n", 401 cppi41_channel->port_num, RNDIS_REG(cppi41_channel->port_num), 402 packet_sz, mode, (unsigned long long) dma_addr, 403 len, cppi41_channel->is_tx); 404 405 cppi41_channel->buf_addr = dma_addr; 406 cppi41_channel->total_len = len; 407 cppi41_channel->transferred = 0; 408 cppi41_channel->packet_sz = packet_sz; 409 410 /* 411 * Due to AM335x' Advisory 1.0.13 we are not allowed to transfer more 412 * than max packet size at a time. 413 */ 414 if (cppi41_channel->is_tx) 415 use_gen_rndis = 1; 416 417 if (use_gen_rndis) { 418 /* RNDIS mode */ 419 if (len > packet_sz) { 420 musb_writel(musb->ctrl_base, 421 RNDIS_REG(cppi41_channel->port_num), len); 422 /* gen rndis */ 423 cppi41_set_dma_mode(cppi41_channel, 424 EP_MODE_DMA_GEN_RNDIS); 425 426 /* auto req */ 427 cppi41_set_autoreq_mode(cppi41_channel, 428 EP_MODE_AUTOREG_ALL_NEOP); 429 } else { 430 musb_writel(musb->ctrl_base, 431 RNDIS_REG(cppi41_channel->port_num), 0); 432 cppi41_set_dma_mode(cppi41_channel, 433 EP_MODE_DMA_TRANSPARENT); 434 cppi41_set_autoreq_mode(cppi41_channel, 435 EP_MODE_AUTOREG_NONE); 436 } 437 } else { 438 /* fallback mode */ 439 cppi41_set_dma_mode(cppi41_channel, EP_MODE_DMA_TRANSPARENT); 440 cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREG_NONE); 441 len = min_t(u32, packet_sz, len); 442 } 443 cppi41_channel->prog_len = len; 444 direction = cppi41_channel->is_tx ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM; 445 dma_desc = dmaengine_prep_slave_single(dc, dma_addr, len, direction, 446 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 447 if (!dma_desc) 448 return false; 449 450 dma_desc->callback = cppi41_dma_callback; 451 dma_desc->callback_param = channel; 452 cppi41_channel->cookie = dma_desc->tx_submit(dma_desc); 453 454 save_rx_toggle(cppi41_channel); 455 dma_async_issue_pending(dc); 456 return true; 457 } 458 459 static struct dma_channel *cppi41_dma_channel_allocate(struct dma_controller *c, 460 struct musb_hw_ep *hw_ep, u8 is_tx) 461 { 462 struct cppi41_dma_controller *controller = container_of(c, 463 struct cppi41_dma_controller, controller); 464 struct cppi41_dma_channel *cppi41_channel = NULL; 465 u8 ch_num = hw_ep->epnum - 1; 466 467 if (ch_num >= MUSB_DMA_NUM_CHANNELS) 468 return NULL; 469 470 if (is_tx) 471 cppi41_channel = &controller->tx_channel[ch_num]; 472 else 473 cppi41_channel = &controller->rx_channel[ch_num]; 474 475 if (!cppi41_channel->dc) 476 return NULL; 477 478 if (cppi41_channel->is_allocated) 479 return NULL; 480 481 cppi41_channel->hw_ep = hw_ep; 482 cppi41_channel->is_allocated = 1; 483 484 return &cppi41_channel->channel; 485 } 486 487 static void cppi41_dma_channel_release(struct dma_channel *channel) 488 { 489 struct cppi41_dma_channel *cppi41_channel = channel->private_data; 490 491 if (cppi41_channel->is_allocated) { 492 cppi41_channel->is_allocated = 0; 493 channel->status = MUSB_DMA_STATUS_FREE; 494 channel->actual_len = 0; 495 } 496 } 497 498 static int cppi41_dma_channel_program(struct dma_channel *channel, 499 u16 packet_sz, u8 mode, 500 dma_addr_t dma_addr, u32 len) 501 { 502 int ret; 503 struct cppi41_dma_channel *cppi41_channel = channel->private_data; 504 int hb_mult = 0; 505 506 BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN || 507 channel->status == MUSB_DMA_STATUS_BUSY); 508 509 if (is_host_active(cppi41_channel->controller->musb)) { 510 if (cppi41_channel->is_tx) 511 hb_mult = cppi41_channel->hw_ep->out_qh->hb_mult; 512 else 513 hb_mult = cppi41_channel->hw_ep->in_qh->hb_mult; 514 } 515 516 channel->status = MUSB_DMA_STATUS_BUSY; 517 channel->actual_len = 0; 518 519 if (hb_mult) 520 packet_sz = hb_mult * (packet_sz & 0x7FF); 521 522 ret = cppi41_configure_channel(channel, packet_sz, mode, dma_addr, len); 523 if (!ret) 524 channel->status = MUSB_DMA_STATUS_FREE; 525 526 return ret; 527 } 528 529 static int cppi41_is_compatible(struct dma_channel *channel, u16 maxpacket, 530 void *buf, u32 length) 531 { 532 struct cppi41_dma_channel *cppi41_channel = channel->private_data; 533 struct cppi41_dma_controller *controller = cppi41_channel->controller; 534 struct musb *musb = controller->musb; 535 536 if (is_host_active(musb)) { 537 WARN_ON(1); 538 return 1; 539 } 540 if (cppi41_channel->hw_ep->ep_in.type != USB_ENDPOINT_XFER_BULK) 541 return 0; 542 if (cppi41_channel->is_tx) 543 return 1; 544 /* AM335x Advisory 1.0.13. No workaround for device RX mode */ 545 return 0; 546 } 547 548 static int cppi41_dma_channel_abort(struct dma_channel *channel) 549 { 550 struct cppi41_dma_channel *cppi41_channel = channel->private_data; 551 struct cppi41_dma_controller *controller = cppi41_channel->controller; 552 struct musb *musb = controller->musb; 553 void __iomem *epio = cppi41_channel->hw_ep->regs; 554 int tdbit; 555 int ret; 556 unsigned is_tx; 557 u16 csr; 558 559 is_tx = cppi41_channel->is_tx; 560 dev_dbg(musb->controller, "abort channel=%d, is_tx=%d\n", 561 cppi41_channel->port_num, is_tx); 562 563 if (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE) 564 return 0; 565 566 list_del_init(&cppi41_channel->tx_check); 567 if (is_tx) { 568 csr = musb_readw(epio, MUSB_TXCSR); 569 csr &= ~MUSB_TXCSR_DMAENAB; 570 musb_writew(epio, MUSB_TXCSR, csr); 571 } else { 572 csr = musb_readw(epio, MUSB_RXCSR); 573 csr &= ~(MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_DMAENAB); 574 musb_writew(epio, MUSB_RXCSR, csr); 575 576 csr = musb_readw(epio, MUSB_RXCSR); 577 if (csr & MUSB_RXCSR_RXPKTRDY) { 578 csr |= MUSB_RXCSR_FLUSHFIFO; 579 musb_writew(epio, MUSB_RXCSR, csr); 580 musb_writew(epio, MUSB_RXCSR, csr); 581 } 582 } 583 584 tdbit = 1 << cppi41_channel->port_num; 585 if (is_tx) 586 tdbit <<= 16; 587 588 do { 589 musb_writel(musb->ctrl_base, USB_TDOWN, tdbit); 590 ret = dmaengine_terminate_all(cppi41_channel->dc); 591 } while (ret == -EAGAIN); 592 593 musb_writel(musb->ctrl_base, USB_TDOWN, tdbit); 594 595 if (is_tx) { 596 csr = musb_readw(epio, MUSB_TXCSR); 597 if (csr & MUSB_TXCSR_TXPKTRDY) { 598 csr |= MUSB_TXCSR_FLUSHFIFO; 599 musb_writew(epio, MUSB_TXCSR, csr); 600 } 601 } 602 603 cppi41_channel->channel.status = MUSB_DMA_STATUS_FREE; 604 return 0; 605 } 606 607 static void cppi41_release_all_dma_chans(struct cppi41_dma_controller *ctrl) 608 { 609 struct dma_chan *dc; 610 int i; 611 612 for (i = 0; i < MUSB_DMA_NUM_CHANNELS; i++) { 613 dc = ctrl->tx_channel[i].dc; 614 if (dc) 615 dma_release_channel(dc); 616 dc = ctrl->rx_channel[i].dc; 617 if (dc) 618 dma_release_channel(dc); 619 } 620 } 621 622 static void cppi41_dma_controller_stop(struct cppi41_dma_controller *controller) 623 { 624 cppi41_release_all_dma_chans(controller); 625 } 626 627 static int cppi41_dma_controller_start(struct cppi41_dma_controller *controller) 628 { 629 struct musb *musb = controller->musb; 630 struct device *dev = musb->controller; 631 struct device_node *np = dev->of_node; 632 struct cppi41_dma_channel *cppi41_channel; 633 int count; 634 int i; 635 int ret; 636 637 count = of_property_count_strings(np, "dma-names"); 638 if (count < 0) 639 return count; 640 641 for (i = 0; i < count; i++) { 642 struct dma_chan *dc; 643 struct dma_channel *musb_dma; 644 const char *str; 645 unsigned is_tx; 646 unsigned int port; 647 648 ret = of_property_read_string_index(np, "dma-names", i, &str); 649 if (ret) 650 goto err; 651 if (!strncmp(str, "tx", 2)) 652 is_tx = 1; 653 else if (!strncmp(str, "rx", 2)) 654 is_tx = 0; 655 else { 656 dev_err(dev, "Wrong dmatype %s\n", str); 657 goto err; 658 } 659 ret = kstrtouint(str + 2, 0, &port); 660 if (ret) 661 goto err; 662 663 ret = -EINVAL; 664 if (port > MUSB_DMA_NUM_CHANNELS || !port) 665 goto err; 666 if (is_tx) 667 cppi41_channel = &controller->tx_channel[port - 1]; 668 else 669 cppi41_channel = &controller->rx_channel[port - 1]; 670 671 cppi41_channel->controller = controller; 672 cppi41_channel->port_num = port; 673 cppi41_channel->is_tx = is_tx; 674 INIT_LIST_HEAD(&cppi41_channel->tx_check); 675 INIT_WORK(&cppi41_channel->dma_completion, 676 cppi_trans_done_work); 677 678 musb_dma = &cppi41_channel->channel; 679 musb_dma->private_data = cppi41_channel; 680 musb_dma->status = MUSB_DMA_STATUS_FREE; 681 musb_dma->max_len = SZ_4M; 682 683 dc = dma_request_slave_channel(dev, str); 684 if (!dc) { 685 dev_err(dev, "Failed to request %s.\n", str); 686 ret = -EPROBE_DEFER; 687 goto err; 688 } 689 cppi41_channel->dc = dc; 690 } 691 return 0; 692 err: 693 cppi41_release_all_dma_chans(controller); 694 return ret; 695 } 696 697 void dma_controller_destroy(struct dma_controller *c) 698 { 699 struct cppi41_dma_controller *controller = container_of(c, 700 struct cppi41_dma_controller, controller); 701 702 hrtimer_cancel(&controller->early_tx); 703 cppi41_dma_controller_stop(controller); 704 kfree(controller); 705 } 706 707 struct dma_controller *dma_controller_create(struct musb *musb, 708 void __iomem *base) 709 { 710 struct cppi41_dma_controller *controller; 711 int ret = 0; 712 713 if (!musb->controller->of_node) { 714 dev_err(musb->controller, "Need DT for the DMA engine.\n"); 715 return NULL; 716 } 717 718 controller = kzalloc(sizeof(*controller), GFP_KERNEL); 719 if (!controller) 720 goto kzalloc_fail; 721 722 hrtimer_init(&controller->early_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 723 controller->early_tx.function = cppi41_recheck_tx_req; 724 INIT_LIST_HEAD(&controller->early_tx_list); 725 controller->musb = musb; 726 727 controller->controller.channel_alloc = cppi41_dma_channel_allocate; 728 controller->controller.channel_release = cppi41_dma_channel_release; 729 controller->controller.channel_program = cppi41_dma_channel_program; 730 controller->controller.channel_abort = cppi41_dma_channel_abort; 731 controller->controller.is_compatible = cppi41_is_compatible; 732 733 ret = cppi41_dma_controller_start(controller); 734 if (ret) 735 goto plat_get_fail; 736 return &controller->controller; 737 738 plat_get_fail: 739 kfree(controller); 740 kzalloc_fail: 741 if (ret == -EPROBE_DEFER) 742 return ERR_PTR(ret); 743 return NULL; 744 } 745