1 #include <linux/device.h> 2 #include <linux/dma-mapping.h> 3 #include <linux/dmaengine.h> 4 #include <linux/sizes.h> 5 #include <linux/platform_device.h> 6 #include <linux/of.h> 7 8 #include "cppi_dma.h" 9 #include "musb_core.h" 10 #include "musb_trace.h" 11 12 #define RNDIS_REG(x) (0x80 + ((x - 1) * 4)) 13 14 #define EP_MODE_AUTOREQ_NONE 0 15 #define EP_MODE_AUTOREQ_ALL_NEOP 1 16 #define EP_MODE_AUTOREQ_ALWAYS 3 17 18 #define EP_MODE_DMA_TRANSPARENT 0 19 #define EP_MODE_DMA_RNDIS 1 20 #define EP_MODE_DMA_GEN_RNDIS 3 21 22 #define USB_CTRL_TX_MODE 0x70 23 #define USB_CTRL_RX_MODE 0x74 24 #define USB_CTRL_AUTOREQ 0xd0 25 #define USB_TDOWN 0xd8 26 27 #define MUSB_DMA_NUM_CHANNELS 15 28 29 struct cppi41_dma_controller { 30 struct dma_controller controller; 31 struct cppi41_dma_channel rx_channel[MUSB_DMA_NUM_CHANNELS]; 32 struct cppi41_dma_channel tx_channel[MUSB_DMA_NUM_CHANNELS]; 33 struct musb *musb; 34 struct hrtimer early_tx; 35 struct list_head early_tx_list; 36 u32 rx_mode; 37 u32 tx_mode; 38 u32 auto_req; 39 }; 40 41 static void save_rx_toggle(struct cppi41_dma_channel *cppi41_channel) 42 { 43 u16 csr; 44 u8 toggle; 45 46 if (cppi41_channel->is_tx) 47 return; 48 if (!is_host_active(cppi41_channel->controller->musb)) 49 return; 50 51 csr = musb_readw(cppi41_channel->hw_ep->regs, MUSB_RXCSR); 52 toggle = csr & MUSB_RXCSR_H_DATATOGGLE ? 1 : 0; 53 54 cppi41_channel->usb_toggle = toggle; 55 } 56 57 static void update_rx_toggle(struct cppi41_dma_channel *cppi41_channel) 58 { 59 struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep; 60 struct musb *musb = hw_ep->musb; 61 u16 csr; 62 u8 toggle; 63 64 if (cppi41_channel->is_tx) 65 return; 66 if (!is_host_active(musb)) 67 return; 68 69 musb_ep_select(musb->mregs, hw_ep->epnum); 70 csr = musb_readw(hw_ep->regs, MUSB_RXCSR); 71 toggle = csr & MUSB_RXCSR_H_DATATOGGLE ? 1 : 0; 72 73 /* 74 * AM335x Advisory 1.0.13: Due to internal synchronisation error the 75 * data toggle may reset from DATA1 to DATA0 during receiving data from 76 * more than one endpoint. 77 */ 78 if (!toggle && toggle == cppi41_channel->usb_toggle) { 79 csr |= MUSB_RXCSR_H_DATATOGGLE | MUSB_RXCSR_H_WR_DATATOGGLE; 80 musb_writew(cppi41_channel->hw_ep->regs, MUSB_RXCSR, csr); 81 musb_dbg(cppi41_channel->controller->musb, 82 "Restoring DATA1 toggle."); 83 } 84 85 cppi41_channel->usb_toggle = toggle; 86 } 87 88 static bool musb_is_tx_fifo_empty(struct musb_hw_ep *hw_ep) 89 { 90 u8 epnum = hw_ep->epnum; 91 struct musb *musb = hw_ep->musb; 92 void __iomem *epio = musb->endpoints[epnum].regs; 93 u16 csr; 94 95 musb_ep_select(musb->mregs, hw_ep->epnum); 96 csr = musb_readw(epio, MUSB_TXCSR); 97 if (csr & MUSB_TXCSR_TXPKTRDY) 98 return false; 99 return true; 100 } 101 102 static void cppi41_dma_callback(void *private_data); 103 104 static void cppi41_trans_done(struct cppi41_dma_channel *cppi41_channel) 105 { 106 struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep; 107 struct musb *musb = hw_ep->musb; 108 void __iomem *epio = hw_ep->regs; 109 u16 csr; 110 111 if (!cppi41_channel->prog_len || 112 (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE)) { 113 114 /* done, complete */ 115 cppi41_channel->channel.actual_len = 116 cppi41_channel->transferred; 117 cppi41_channel->channel.status = MUSB_DMA_STATUS_FREE; 118 cppi41_channel->channel.rx_packet_done = true; 119 120 /* 121 * transmit ZLP using PIO mode for transfers which size is 122 * multiple of EP packet size. 123 */ 124 if (cppi41_channel->tx_zlp && (cppi41_channel->transferred % 125 cppi41_channel->packet_sz) == 0) { 126 musb_ep_select(musb->mregs, hw_ep->epnum); 127 csr = MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY; 128 musb_writew(epio, MUSB_TXCSR, csr); 129 } 130 131 trace_musb_cppi41_done(cppi41_channel); 132 musb_dma_completion(musb, hw_ep->epnum, cppi41_channel->is_tx); 133 } else { 134 /* next iteration, reload */ 135 struct dma_chan *dc = cppi41_channel->dc; 136 struct dma_async_tx_descriptor *dma_desc; 137 enum dma_transfer_direction direction; 138 u32 remain_bytes; 139 140 cppi41_channel->buf_addr += cppi41_channel->packet_sz; 141 142 remain_bytes = cppi41_channel->total_len; 143 remain_bytes -= cppi41_channel->transferred; 144 remain_bytes = min(remain_bytes, cppi41_channel->packet_sz); 145 cppi41_channel->prog_len = remain_bytes; 146 147 direction = cppi41_channel->is_tx ? DMA_MEM_TO_DEV 148 : DMA_DEV_TO_MEM; 149 dma_desc = dmaengine_prep_slave_single(dc, 150 cppi41_channel->buf_addr, 151 remain_bytes, 152 direction, 153 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 154 if (WARN_ON(!dma_desc)) 155 return; 156 157 dma_desc->callback = cppi41_dma_callback; 158 dma_desc->callback_param = &cppi41_channel->channel; 159 cppi41_channel->cookie = dma_desc->tx_submit(dma_desc); 160 trace_musb_cppi41_cont(cppi41_channel); 161 dma_async_issue_pending(dc); 162 163 if (!cppi41_channel->is_tx) { 164 musb_ep_select(musb->mregs, hw_ep->epnum); 165 csr = musb_readw(epio, MUSB_RXCSR); 166 csr |= MUSB_RXCSR_H_REQPKT; 167 musb_writew(epio, MUSB_RXCSR, csr); 168 } 169 } 170 } 171 172 static enum hrtimer_restart cppi41_recheck_tx_req(struct hrtimer *timer) 173 { 174 struct cppi41_dma_controller *controller; 175 struct cppi41_dma_channel *cppi41_channel, *n; 176 struct musb *musb; 177 unsigned long flags; 178 enum hrtimer_restart ret = HRTIMER_NORESTART; 179 180 controller = container_of(timer, struct cppi41_dma_controller, 181 early_tx); 182 musb = controller->musb; 183 184 spin_lock_irqsave(&musb->lock, flags); 185 list_for_each_entry_safe(cppi41_channel, n, &controller->early_tx_list, 186 tx_check) { 187 bool empty; 188 struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep; 189 190 empty = musb_is_tx_fifo_empty(hw_ep); 191 if (empty) { 192 list_del_init(&cppi41_channel->tx_check); 193 cppi41_trans_done(cppi41_channel); 194 } 195 } 196 197 if (!list_empty(&controller->early_tx_list) && 198 !hrtimer_is_queued(&controller->early_tx)) { 199 ret = HRTIMER_RESTART; 200 hrtimer_forward_now(&controller->early_tx, 201 ktime_set(0, 20 * NSEC_PER_USEC)); 202 } 203 204 spin_unlock_irqrestore(&musb->lock, flags); 205 return ret; 206 } 207 208 static void cppi41_dma_callback(void *private_data) 209 { 210 struct dma_channel *channel = private_data; 211 struct cppi41_dma_channel *cppi41_channel = channel->private_data; 212 struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep; 213 struct cppi41_dma_controller *controller; 214 struct musb *musb = hw_ep->musb; 215 unsigned long flags; 216 struct dma_tx_state txstate; 217 u32 transferred; 218 int is_hs = 0; 219 bool empty; 220 221 spin_lock_irqsave(&musb->lock, flags); 222 223 dmaengine_tx_status(cppi41_channel->dc, cppi41_channel->cookie, 224 &txstate); 225 transferred = cppi41_channel->prog_len - txstate.residue; 226 cppi41_channel->transferred += transferred; 227 228 trace_musb_cppi41_gb(cppi41_channel); 229 update_rx_toggle(cppi41_channel); 230 231 if (cppi41_channel->transferred == cppi41_channel->total_len || 232 transferred < cppi41_channel->packet_sz) 233 cppi41_channel->prog_len = 0; 234 235 if (cppi41_channel->is_tx) 236 empty = musb_is_tx_fifo_empty(hw_ep); 237 238 if (!cppi41_channel->is_tx || empty) { 239 cppi41_trans_done(cppi41_channel); 240 goto out; 241 } 242 243 /* 244 * On AM335x it has been observed that the TX interrupt fires 245 * too early that means the TXFIFO is not yet empty but the DMA 246 * engine says that it is done with the transfer. We don't 247 * receive a FIFO empty interrupt so the only thing we can do is 248 * to poll for the bit. On HS it usually takes 2us, on FS around 249 * 110us - 150us depending on the transfer size. 250 * We spin on HS (no longer than than 25us and setup a timer on 251 * FS to check for the bit and complete the transfer. 252 */ 253 controller = cppi41_channel->controller; 254 255 if (is_host_active(musb)) { 256 if (musb->port1_status & USB_PORT_STAT_HIGH_SPEED) 257 is_hs = 1; 258 } else { 259 if (musb->g.speed == USB_SPEED_HIGH) 260 is_hs = 1; 261 } 262 if (is_hs) { 263 unsigned wait = 25; 264 265 do { 266 empty = musb_is_tx_fifo_empty(hw_ep); 267 if (empty) { 268 cppi41_trans_done(cppi41_channel); 269 goto out; 270 } 271 wait--; 272 if (!wait) 273 break; 274 cpu_relax(); 275 } while (1); 276 } 277 list_add_tail(&cppi41_channel->tx_check, 278 &controller->early_tx_list); 279 if (!hrtimer_is_queued(&controller->early_tx)) { 280 unsigned long usecs = cppi41_channel->total_len / 10; 281 282 hrtimer_start_range_ns(&controller->early_tx, 283 ktime_set(0, usecs * NSEC_PER_USEC), 284 20 * NSEC_PER_USEC, 285 HRTIMER_MODE_REL); 286 } 287 288 out: 289 spin_unlock_irqrestore(&musb->lock, flags); 290 } 291 292 static u32 update_ep_mode(unsigned ep, unsigned mode, u32 old) 293 { 294 unsigned shift; 295 296 shift = (ep - 1) * 2; 297 old &= ~(3 << shift); 298 old |= mode << shift; 299 return old; 300 } 301 302 static void cppi41_set_dma_mode(struct cppi41_dma_channel *cppi41_channel, 303 unsigned mode) 304 { 305 struct cppi41_dma_controller *controller = cppi41_channel->controller; 306 u32 port; 307 u32 new_mode; 308 u32 old_mode; 309 310 if (cppi41_channel->is_tx) 311 old_mode = controller->tx_mode; 312 else 313 old_mode = controller->rx_mode; 314 port = cppi41_channel->port_num; 315 new_mode = update_ep_mode(port, mode, old_mode); 316 317 if (new_mode == old_mode) 318 return; 319 if (cppi41_channel->is_tx) { 320 controller->tx_mode = new_mode; 321 musb_writel(controller->musb->ctrl_base, USB_CTRL_TX_MODE, 322 new_mode); 323 } else { 324 controller->rx_mode = new_mode; 325 musb_writel(controller->musb->ctrl_base, USB_CTRL_RX_MODE, 326 new_mode); 327 } 328 } 329 330 static void cppi41_set_autoreq_mode(struct cppi41_dma_channel *cppi41_channel, 331 unsigned mode) 332 { 333 struct cppi41_dma_controller *controller = cppi41_channel->controller; 334 u32 port; 335 u32 new_mode; 336 u32 old_mode; 337 338 old_mode = controller->auto_req; 339 port = cppi41_channel->port_num; 340 new_mode = update_ep_mode(port, mode, old_mode); 341 342 if (new_mode == old_mode) 343 return; 344 controller->auto_req = new_mode; 345 musb_writel(controller->musb->ctrl_base, USB_CTRL_AUTOREQ, new_mode); 346 } 347 348 static bool cppi41_configure_channel(struct dma_channel *channel, 349 u16 packet_sz, u8 mode, 350 dma_addr_t dma_addr, u32 len) 351 { 352 struct cppi41_dma_channel *cppi41_channel = channel->private_data; 353 struct dma_chan *dc = cppi41_channel->dc; 354 struct dma_async_tx_descriptor *dma_desc; 355 enum dma_transfer_direction direction; 356 struct musb *musb = cppi41_channel->controller->musb; 357 unsigned use_gen_rndis = 0; 358 359 cppi41_channel->buf_addr = dma_addr; 360 cppi41_channel->total_len = len; 361 cppi41_channel->transferred = 0; 362 cppi41_channel->packet_sz = packet_sz; 363 cppi41_channel->tx_zlp = (cppi41_channel->is_tx && mode) ? 1 : 0; 364 365 /* 366 * Due to AM335x' Advisory 1.0.13 we are not allowed to transfer more 367 * than max packet size at a time. 368 */ 369 if (cppi41_channel->is_tx) 370 use_gen_rndis = 1; 371 372 if (use_gen_rndis) { 373 /* RNDIS mode */ 374 if (len > packet_sz) { 375 musb_writel(musb->ctrl_base, 376 RNDIS_REG(cppi41_channel->port_num), len); 377 /* gen rndis */ 378 cppi41_set_dma_mode(cppi41_channel, 379 EP_MODE_DMA_GEN_RNDIS); 380 381 /* auto req */ 382 cppi41_set_autoreq_mode(cppi41_channel, 383 EP_MODE_AUTOREQ_ALL_NEOP); 384 } else { 385 musb_writel(musb->ctrl_base, 386 RNDIS_REG(cppi41_channel->port_num), 0); 387 cppi41_set_dma_mode(cppi41_channel, 388 EP_MODE_DMA_TRANSPARENT); 389 cppi41_set_autoreq_mode(cppi41_channel, 390 EP_MODE_AUTOREQ_NONE); 391 } 392 } else { 393 /* fallback mode */ 394 cppi41_set_dma_mode(cppi41_channel, EP_MODE_DMA_TRANSPARENT); 395 cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREQ_NONE); 396 len = min_t(u32, packet_sz, len); 397 } 398 cppi41_channel->prog_len = len; 399 direction = cppi41_channel->is_tx ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM; 400 dma_desc = dmaengine_prep_slave_single(dc, dma_addr, len, direction, 401 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 402 if (!dma_desc) 403 return false; 404 405 dma_desc->callback = cppi41_dma_callback; 406 dma_desc->callback_param = channel; 407 cppi41_channel->cookie = dma_desc->tx_submit(dma_desc); 408 cppi41_channel->channel.rx_packet_done = false; 409 410 trace_musb_cppi41_config(cppi41_channel); 411 412 save_rx_toggle(cppi41_channel); 413 dma_async_issue_pending(dc); 414 return true; 415 } 416 417 static struct dma_channel *cppi41_dma_channel_allocate(struct dma_controller *c, 418 struct musb_hw_ep *hw_ep, u8 is_tx) 419 { 420 struct cppi41_dma_controller *controller = container_of(c, 421 struct cppi41_dma_controller, controller); 422 struct cppi41_dma_channel *cppi41_channel = NULL; 423 u8 ch_num = hw_ep->epnum - 1; 424 425 if (ch_num >= MUSB_DMA_NUM_CHANNELS) 426 return NULL; 427 428 if (is_tx) 429 cppi41_channel = &controller->tx_channel[ch_num]; 430 else 431 cppi41_channel = &controller->rx_channel[ch_num]; 432 433 if (!cppi41_channel->dc) 434 return NULL; 435 436 if (cppi41_channel->is_allocated) 437 return NULL; 438 439 cppi41_channel->hw_ep = hw_ep; 440 cppi41_channel->is_allocated = 1; 441 442 trace_musb_cppi41_alloc(cppi41_channel); 443 return &cppi41_channel->channel; 444 } 445 446 static void cppi41_dma_channel_release(struct dma_channel *channel) 447 { 448 struct cppi41_dma_channel *cppi41_channel = channel->private_data; 449 450 trace_musb_cppi41_free(cppi41_channel); 451 if (cppi41_channel->is_allocated) { 452 cppi41_channel->is_allocated = 0; 453 channel->status = MUSB_DMA_STATUS_FREE; 454 channel->actual_len = 0; 455 } 456 } 457 458 static int cppi41_dma_channel_program(struct dma_channel *channel, 459 u16 packet_sz, u8 mode, 460 dma_addr_t dma_addr, u32 len) 461 { 462 int ret; 463 struct cppi41_dma_channel *cppi41_channel = channel->private_data; 464 int hb_mult = 0; 465 466 BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN || 467 channel->status == MUSB_DMA_STATUS_BUSY); 468 469 if (is_host_active(cppi41_channel->controller->musb)) { 470 if (cppi41_channel->is_tx) 471 hb_mult = cppi41_channel->hw_ep->out_qh->hb_mult; 472 else 473 hb_mult = cppi41_channel->hw_ep->in_qh->hb_mult; 474 } 475 476 channel->status = MUSB_DMA_STATUS_BUSY; 477 channel->actual_len = 0; 478 479 if (hb_mult) 480 packet_sz = hb_mult * (packet_sz & 0x7FF); 481 482 ret = cppi41_configure_channel(channel, packet_sz, mode, dma_addr, len); 483 if (!ret) 484 channel->status = MUSB_DMA_STATUS_FREE; 485 486 return ret; 487 } 488 489 static int cppi41_is_compatible(struct dma_channel *channel, u16 maxpacket, 490 void *buf, u32 length) 491 { 492 struct cppi41_dma_channel *cppi41_channel = channel->private_data; 493 struct cppi41_dma_controller *controller = cppi41_channel->controller; 494 struct musb *musb = controller->musb; 495 496 if (is_host_active(musb)) { 497 WARN_ON(1); 498 return 1; 499 } 500 if (cppi41_channel->hw_ep->ep_in.type != USB_ENDPOINT_XFER_BULK) 501 return 0; 502 if (cppi41_channel->is_tx) 503 return 1; 504 /* AM335x Advisory 1.0.13. No workaround for device RX mode */ 505 return 0; 506 } 507 508 static int cppi41_dma_channel_abort(struct dma_channel *channel) 509 { 510 struct cppi41_dma_channel *cppi41_channel = channel->private_data; 511 struct cppi41_dma_controller *controller = cppi41_channel->controller; 512 struct musb *musb = controller->musb; 513 void __iomem *epio = cppi41_channel->hw_ep->regs; 514 int tdbit; 515 int ret; 516 unsigned is_tx; 517 u16 csr; 518 519 is_tx = cppi41_channel->is_tx; 520 trace_musb_cppi41_abort(cppi41_channel); 521 522 if (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE) 523 return 0; 524 525 list_del_init(&cppi41_channel->tx_check); 526 if (is_tx) { 527 csr = musb_readw(epio, MUSB_TXCSR); 528 csr &= ~MUSB_TXCSR_DMAENAB; 529 musb_writew(epio, MUSB_TXCSR, csr); 530 } else { 531 cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREQ_NONE); 532 533 /* delay to drain to cppi dma pipeline for isoch */ 534 udelay(250); 535 536 csr = musb_readw(epio, MUSB_RXCSR); 537 csr &= ~(MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_DMAENAB); 538 musb_writew(epio, MUSB_RXCSR, csr); 539 540 /* wait to drain cppi dma pipe line */ 541 udelay(50); 542 543 csr = musb_readw(epio, MUSB_RXCSR); 544 if (csr & MUSB_RXCSR_RXPKTRDY) { 545 csr |= MUSB_RXCSR_FLUSHFIFO; 546 musb_writew(epio, MUSB_RXCSR, csr); 547 musb_writew(epio, MUSB_RXCSR, csr); 548 } 549 } 550 551 tdbit = 1 << cppi41_channel->port_num; 552 if (is_tx) 553 tdbit <<= 16; 554 555 do { 556 if (is_tx) 557 musb_writel(musb->ctrl_base, USB_TDOWN, tdbit); 558 ret = dmaengine_terminate_all(cppi41_channel->dc); 559 } while (ret == -EAGAIN); 560 561 if (is_tx) { 562 musb_writel(musb->ctrl_base, USB_TDOWN, tdbit); 563 564 csr = musb_readw(epio, MUSB_TXCSR); 565 if (csr & MUSB_TXCSR_TXPKTRDY) { 566 csr |= MUSB_TXCSR_FLUSHFIFO; 567 musb_writew(epio, MUSB_TXCSR, csr); 568 } 569 } 570 571 cppi41_channel->channel.status = MUSB_DMA_STATUS_FREE; 572 return 0; 573 } 574 575 static void cppi41_release_all_dma_chans(struct cppi41_dma_controller *ctrl) 576 { 577 struct dma_chan *dc; 578 int i; 579 580 for (i = 0; i < MUSB_DMA_NUM_CHANNELS; i++) { 581 dc = ctrl->tx_channel[i].dc; 582 if (dc) 583 dma_release_channel(dc); 584 dc = ctrl->rx_channel[i].dc; 585 if (dc) 586 dma_release_channel(dc); 587 } 588 } 589 590 static void cppi41_dma_controller_stop(struct cppi41_dma_controller *controller) 591 { 592 cppi41_release_all_dma_chans(controller); 593 } 594 595 static int cppi41_dma_controller_start(struct cppi41_dma_controller *controller) 596 { 597 struct musb *musb = controller->musb; 598 struct device *dev = musb->controller; 599 struct device_node *np = dev->parent->of_node; 600 struct cppi41_dma_channel *cppi41_channel; 601 int count; 602 int i; 603 int ret; 604 605 count = of_property_count_strings(np, "dma-names"); 606 if (count < 0) 607 return count; 608 609 for (i = 0; i < count; i++) { 610 struct dma_chan *dc; 611 struct dma_channel *musb_dma; 612 const char *str; 613 unsigned is_tx; 614 unsigned int port; 615 616 ret = of_property_read_string_index(np, "dma-names", i, &str); 617 if (ret) 618 goto err; 619 if (strstarts(str, "tx")) 620 is_tx = 1; 621 else if (strstarts(str, "rx")) 622 is_tx = 0; 623 else { 624 dev_err(dev, "Wrong dmatype %s\n", str); 625 goto err; 626 } 627 ret = kstrtouint(str + 2, 0, &port); 628 if (ret) 629 goto err; 630 631 ret = -EINVAL; 632 if (port > MUSB_DMA_NUM_CHANNELS || !port) 633 goto err; 634 if (is_tx) 635 cppi41_channel = &controller->tx_channel[port - 1]; 636 else 637 cppi41_channel = &controller->rx_channel[port - 1]; 638 639 cppi41_channel->controller = controller; 640 cppi41_channel->port_num = port; 641 cppi41_channel->is_tx = is_tx; 642 INIT_LIST_HEAD(&cppi41_channel->tx_check); 643 644 musb_dma = &cppi41_channel->channel; 645 musb_dma->private_data = cppi41_channel; 646 musb_dma->status = MUSB_DMA_STATUS_FREE; 647 musb_dma->max_len = SZ_4M; 648 649 dc = dma_request_slave_channel(dev->parent, str); 650 if (!dc) { 651 dev_err(dev, "Failed to request %s.\n", str); 652 ret = -EPROBE_DEFER; 653 goto err; 654 } 655 cppi41_channel->dc = dc; 656 } 657 return 0; 658 err: 659 cppi41_release_all_dma_chans(controller); 660 return ret; 661 } 662 663 void cppi41_dma_controller_destroy(struct dma_controller *c) 664 { 665 struct cppi41_dma_controller *controller = container_of(c, 666 struct cppi41_dma_controller, controller); 667 668 hrtimer_cancel(&controller->early_tx); 669 cppi41_dma_controller_stop(controller); 670 kfree(controller); 671 } 672 EXPORT_SYMBOL_GPL(cppi41_dma_controller_destroy); 673 674 struct dma_controller * 675 cppi41_dma_controller_create(struct musb *musb, void __iomem *base) 676 { 677 struct cppi41_dma_controller *controller; 678 int ret = 0; 679 680 if (!musb->controller->parent->of_node) { 681 dev_err(musb->controller, "Need DT for the DMA engine.\n"); 682 return NULL; 683 } 684 685 controller = kzalloc(sizeof(*controller), GFP_KERNEL); 686 if (!controller) 687 goto kzalloc_fail; 688 689 hrtimer_init(&controller->early_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 690 controller->early_tx.function = cppi41_recheck_tx_req; 691 INIT_LIST_HEAD(&controller->early_tx_list); 692 controller->musb = musb; 693 694 controller->controller.channel_alloc = cppi41_dma_channel_allocate; 695 controller->controller.channel_release = cppi41_dma_channel_release; 696 controller->controller.channel_program = cppi41_dma_channel_program; 697 controller->controller.channel_abort = cppi41_dma_channel_abort; 698 controller->controller.is_compatible = cppi41_is_compatible; 699 700 ret = cppi41_dma_controller_start(controller); 701 if (ret) 702 goto plat_get_fail; 703 return &controller->controller; 704 705 plat_get_fail: 706 kfree(controller); 707 kzalloc_fail: 708 if (ret == -EPROBE_DEFER) 709 return ERR_PTR(ret); 710 return NULL; 711 } 712 EXPORT_SYMBOL_GPL(cppi41_dma_controller_create); 713