1 #include <linux/device.h> 2 #include <linux/dma-mapping.h> 3 #include <linux/dmaengine.h> 4 #include <linux/sizes.h> 5 #include <linux/platform_device.h> 6 #include <linux/of.h> 7 8 #include "musb_core.h" 9 10 #define RNDIS_REG(x) (0x80 + ((x - 1) * 4)) 11 12 #define EP_MODE_AUTOREG_NONE 0 13 #define EP_MODE_AUTOREG_ALL_NEOP 1 14 #define EP_MODE_AUTOREG_ALWAYS 3 15 16 #define EP_MODE_DMA_TRANSPARENT 0 17 #define EP_MODE_DMA_RNDIS 1 18 #define EP_MODE_DMA_GEN_RNDIS 3 19 20 #define USB_CTRL_TX_MODE 0x70 21 #define USB_CTRL_RX_MODE 0x74 22 #define USB_CTRL_AUTOREQ 0xd0 23 #define USB_TDOWN 0xd8 24 25 struct cppi41_dma_channel { 26 struct dma_channel channel; 27 struct cppi41_dma_controller *controller; 28 struct musb_hw_ep *hw_ep; 29 struct dma_chan *dc; 30 dma_cookie_t cookie; 31 u8 port_num; 32 u8 is_tx; 33 u8 is_allocated; 34 u8 usb_toggle; 35 36 dma_addr_t buf_addr; 37 u32 total_len; 38 u32 prog_len; 39 u32 transferred; 40 u32 packet_sz; 41 struct list_head tx_check; 42 }; 43 44 #define MUSB_DMA_NUM_CHANNELS 15 45 46 struct cppi41_dma_controller { 47 struct dma_controller controller; 48 struct cppi41_dma_channel rx_channel[MUSB_DMA_NUM_CHANNELS]; 49 struct cppi41_dma_channel tx_channel[MUSB_DMA_NUM_CHANNELS]; 50 struct musb *musb; 51 struct hrtimer early_tx; 52 struct list_head early_tx_list; 53 u32 rx_mode; 54 u32 tx_mode; 55 u32 auto_req; 56 }; 57 58 static void save_rx_toggle(struct cppi41_dma_channel *cppi41_channel) 59 { 60 u16 csr; 61 u8 toggle; 62 63 if (cppi41_channel->is_tx) 64 return; 65 if (!is_host_active(cppi41_channel->controller->musb)) 66 return; 67 68 csr = musb_readw(cppi41_channel->hw_ep->regs, MUSB_RXCSR); 69 toggle = csr & MUSB_RXCSR_H_DATATOGGLE ? 1 : 0; 70 71 cppi41_channel->usb_toggle = toggle; 72 } 73 74 static void update_rx_toggle(struct cppi41_dma_channel *cppi41_channel) 75 { 76 struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep; 77 struct musb *musb = hw_ep->musb; 78 u16 csr; 79 u8 toggle; 80 81 if (cppi41_channel->is_tx) 82 return; 83 if (!is_host_active(musb)) 84 return; 85 86 musb_ep_select(musb->mregs, hw_ep->epnum); 87 csr = musb_readw(hw_ep->regs, MUSB_RXCSR); 88 toggle = csr & MUSB_RXCSR_H_DATATOGGLE ? 1 : 0; 89 90 /* 91 * AM335x Advisory 1.0.13: Due to internal synchronisation error the 92 * data toggle may reset from DATA1 to DATA0 during receiving data from 93 * more than one endpoint. 94 */ 95 if (!toggle && toggle == cppi41_channel->usb_toggle) { 96 csr |= MUSB_RXCSR_H_DATATOGGLE | MUSB_RXCSR_H_WR_DATATOGGLE; 97 musb_writew(cppi41_channel->hw_ep->regs, MUSB_RXCSR, csr); 98 dev_dbg(cppi41_channel->controller->musb->controller, 99 "Restoring DATA1 toggle.\n"); 100 } 101 102 cppi41_channel->usb_toggle = toggle; 103 } 104 105 static bool musb_is_tx_fifo_empty(struct musb_hw_ep *hw_ep) 106 { 107 u8 epnum = hw_ep->epnum; 108 struct musb *musb = hw_ep->musb; 109 void __iomem *epio = musb->endpoints[epnum].regs; 110 u16 csr; 111 112 musb_ep_select(musb->mregs, hw_ep->epnum); 113 csr = musb_readw(epio, MUSB_TXCSR); 114 if (csr & MUSB_TXCSR_TXPKTRDY) 115 return false; 116 return true; 117 } 118 119 static void cppi41_dma_callback(void *private_data); 120 121 static void cppi41_trans_done(struct cppi41_dma_channel *cppi41_channel) 122 { 123 struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep; 124 struct musb *musb = hw_ep->musb; 125 126 if (!cppi41_channel->prog_len || 127 (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE)) { 128 129 /* done, complete */ 130 cppi41_channel->channel.actual_len = 131 cppi41_channel->transferred; 132 cppi41_channel->channel.status = MUSB_DMA_STATUS_FREE; 133 cppi41_channel->channel.rx_packet_done = true; 134 musb_dma_completion(musb, hw_ep->epnum, cppi41_channel->is_tx); 135 } else { 136 /* next iteration, reload */ 137 struct dma_chan *dc = cppi41_channel->dc; 138 struct dma_async_tx_descriptor *dma_desc; 139 enum dma_transfer_direction direction; 140 u16 csr; 141 u32 remain_bytes; 142 void __iomem *epio = cppi41_channel->hw_ep->regs; 143 144 cppi41_channel->buf_addr += cppi41_channel->packet_sz; 145 146 remain_bytes = cppi41_channel->total_len; 147 remain_bytes -= cppi41_channel->transferred; 148 remain_bytes = min(remain_bytes, cppi41_channel->packet_sz); 149 cppi41_channel->prog_len = remain_bytes; 150 151 direction = cppi41_channel->is_tx ? DMA_MEM_TO_DEV 152 : DMA_DEV_TO_MEM; 153 dma_desc = dmaengine_prep_slave_single(dc, 154 cppi41_channel->buf_addr, 155 remain_bytes, 156 direction, 157 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 158 if (WARN_ON(!dma_desc)) 159 return; 160 161 dma_desc->callback = cppi41_dma_callback; 162 dma_desc->callback_param = &cppi41_channel->channel; 163 cppi41_channel->cookie = dma_desc->tx_submit(dma_desc); 164 dma_async_issue_pending(dc); 165 166 if (!cppi41_channel->is_tx) { 167 musb_ep_select(musb->mregs, hw_ep->epnum); 168 csr = musb_readw(epio, MUSB_RXCSR); 169 csr |= MUSB_RXCSR_H_REQPKT; 170 musb_writew(epio, MUSB_RXCSR, csr); 171 } 172 } 173 } 174 175 static enum hrtimer_restart cppi41_recheck_tx_req(struct hrtimer *timer) 176 { 177 struct cppi41_dma_controller *controller; 178 struct cppi41_dma_channel *cppi41_channel, *n; 179 struct musb *musb; 180 unsigned long flags; 181 enum hrtimer_restart ret = HRTIMER_NORESTART; 182 183 controller = container_of(timer, struct cppi41_dma_controller, 184 early_tx); 185 musb = controller->musb; 186 187 spin_lock_irqsave(&musb->lock, flags); 188 list_for_each_entry_safe(cppi41_channel, n, &controller->early_tx_list, 189 tx_check) { 190 bool empty; 191 struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep; 192 193 empty = musb_is_tx_fifo_empty(hw_ep); 194 if (empty) { 195 list_del_init(&cppi41_channel->tx_check); 196 cppi41_trans_done(cppi41_channel); 197 } 198 } 199 200 if (!list_empty(&controller->early_tx_list)) { 201 ret = HRTIMER_RESTART; 202 hrtimer_forward_now(&controller->early_tx, 203 ktime_set(0, 50 * NSEC_PER_USEC)); 204 } 205 206 spin_unlock_irqrestore(&musb->lock, flags); 207 return ret; 208 } 209 210 static void cppi41_dma_callback(void *private_data) 211 { 212 struct dma_channel *channel = private_data; 213 struct cppi41_dma_channel *cppi41_channel = channel->private_data; 214 struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep; 215 struct musb *musb = hw_ep->musb; 216 unsigned long flags; 217 struct dma_tx_state txstate; 218 u32 transferred; 219 bool empty; 220 221 spin_lock_irqsave(&musb->lock, flags); 222 223 dmaengine_tx_status(cppi41_channel->dc, cppi41_channel->cookie, 224 &txstate); 225 transferred = cppi41_channel->prog_len - txstate.residue; 226 cppi41_channel->transferred += transferred; 227 228 dev_dbg(musb->controller, "DMA transfer done on hw_ep=%d bytes=%d/%d\n", 229 hw_ep->epnum, cppi41_channel->transferred, 230 cppi41_channel->total_len); 231 232 update_rx_toggle(cppi41_channel); 233 234 if (cppi41_channel->transferred == cppi41_channel->total_len || 235 transferred < cppi41_channel->packet_sz) 236 cppi41_channel->prog_len = 0; 237 238 empty = musb_is_tx_fifo_empty(hw_ep); 239 if (empty) { 240 cppi41_trans_done(cppi41_channel); 241 } else { 242 struct cppi41_dma_controller *controller; 243 /* 244 * On AM335x it has been observed that the TX interrupt fires 245 * too early that means the TXFIFO is not yet empty but the DMA 246 * engine says that it is done with the transfer. We don't 247 * receive a FIFO empty interrupt so the only thing we can do is 248 * to poll for the bit. On HS it usually takes 2us, on FS around 249 * 110us - 150us depending on the transfer size. 250 * We spin on HS (no longer than than 25us and setup a timer on 251 * FS to check for the bit and complete the transfer. 252 */ 253 controller = cppi41_channel->controller; 254 255 if (musb->g.speed == USB_SPEED_HIGH) { 256 unsigned wait = 25; 257 258 do { 259 empty = musb_is_tx_fifo_empty(hw_ep); 260 if (empty) 261 break; 262 wait--; 263 if (!wait) 264 break; 265 udelay(1); 266 } while (1); 267 268 empty = musb_is_tx_fifo_empty(hw_ep); 269 if (empty) { 270 cppi41_trans_done(cppi41_channel); 271 goto out; 272 } 273 } 274 list_add_tail(&cppi41_channel->tx_check, 275 &controller->early_tx_list); 276 if (!hrtimer_is_queued(&controller->early_tx)) { 277 unsigned long usecs = cppi41_channel->total_len / 10; 278 279 hrtimer_start_range_ns(&controller->early_tx, 280 ktime_set(0, usecs * NSEC_PER_USEC), 281 40 * NSEC_PER_USEC, 282 HRTIMER_MODE_REL); 283 } 284 } 285 out: 286 spin_unlock_irqrestore(&musb->lock, flags); 287 } 288 289 static u32 update_ep_mode(unsigned ep, unsigned mode, u32 old) 290 { 291 unsigned shift; 292 293 shift = (ep - 1) * 2; 294 old &= ~(3 << shift); 295 old |= mode << shift; 296 return old; 297 } 298 299 static void cppi41_set_dma_mode(struct cppi41_dma_channel *cppi41_channel, 300 unsigned mode) 301 { 302 struct cppi41_dma_controller *controller = cppi41_channel->controller; 303 u32 port; 304 u32 new_mode; 305 u32 old_mode; 306 307 if (cppi41_channel->is_tx) 308 old_mode = controller->tx_mode; 309 else 310 old_mode = controller->rx_mode; 311 port = cppi41_channel->port_num; 312 new_mode = update_ep_mode(port, mode, old_mode); 313 314 if (new_mode == old_mode) 315 return; 316 if (cppi41_channel->is_tx) { 317 controller->tx_mode = new_mode; 318 musb_writel(controller->musb->ctrl_base, USB_CTRL_TX_MODE, 319 new_mode); 320 } else { 321 controller->rx_mode = new_mode; 322 musb_writel(controller->musb->ctrl_base, USB_CTRL_RX_MODE, 323 new_mode); 324 } 325 } 326 327 static void cppi41_set_autoreq_mode(struct cppi41_dma_channel *cppi41_channel, 328 unsigned mode) 329 { 330 struct cppi41_dma_controller *controller = cppi41_channel->controller; 331 u32 port; 332 u32 new_mode; 333 u32 old_mode; 334 335 old_mode = controller->auto_req; 336 port = cppi41_channel->port_num; 337 new_mode = update_ep_mode(port, mode, old_mode); 338 339 if (new_mode == old_mode) 340 return; 341 controller->auto_req = new_mode; 342 musb_writel(controller->musb->ctrl_base, USB_CTRL_AUTOREQ, new_mode); 343 } 344 345 static bool cppi41_configure_channel(struct dma_channel *channel, 346 u16 packet_sz, u8 mode, 347 dma_addr_t dma_addr, u32 len) 348 { 349 struct cppi41_dma_channel *cppi41_channel = channel->private_data; 350 struct dma_chan *dc = cppi41_channel->dc; 351 struct dma_async_tx_descriptor *dma_desc; 352 enum dma_transfer_direction direction; 353 struct musb *musb = cppi41_channel->controller->musb; 354 unsigned use_gen_rndis = 0; 355 356 dev_dbg(musb->controller, 357 "configure ep%d/%x packet_sz=%d, mode=%d, dma_addr=0x%llx, len=%d is_tx=%d\n", 358 cppi41_channel->port_num, RNDIS_REG(cppi41_channel->port_num), 359 packet_sz, mode, (unsigned long long) dma_addr, 360 len, cppi41_channel->is_tx); 361 362 cppi41_channel->buf_addr = dma_addr; 363 cppi41_channel->total_len = len; 364 cppi41_channel->transferred = 0; 365 cppi41_channel->packet_sz = packet_sz; 366 367 /* 368 * Due to AM335x' Advisory 1.0.13 we are not allowed to transfer more 369 * than max packet size at a time. 370 */ 371 if (cppi41_channel->is_tx) 372 use_gen_rndis = 1; 373 374 if (use_gen_rndis) { 375 /* RNDIS mode */ 376 if (len > packet_sz) { 377 musb_writel(musb->ctrl_base, 378 RNDIS_REG(cppi41_channel->port_num), len); 379 /* gen rndis */ 380 cppi41_set_dma_mode(cppi41_channel, 381 EP_MODE_DMA_GEN_RNDIS); 382 383 /* auto req */ 384 cppi41_set_autoreq_mode(cppi41_channel, 385 EP_MODE_AUTOREG_ALL_NEOP); 386 } else { 387 musb_writel(musb->ctrl_base, 388 RNDIS_REG(cppi41_channel->port_num), 0); 389 cppi41_set_dma_mode(cppi41_channel, 390 EP_MODE_DMA_TRANSPARENT); 391 cppi41_set_autoreq_mode(cppi41_channel, 392 EP_MODE_AUTOREG_NONE); 393 } 394 } else { 395 /* fallback mode */ 396 cppi41_set_dma_mode(cppi41_channel, EP_MODE_DMA_TRANSPARENT); 397 cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREG_NONE); 398 len = min_t(u32, packet_sz, len); 399 } 400 cppi41_channel->prog_len = len; 401 direction = cppi41_channel->is_tx ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM; 402 dma_desc = dmaengine_prep_slave_single(dc, dma_addr, len, direction, 403 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 404 if (!dma_desc) 405 return false; 406 407 dma_desc->callback = cppi41_dma_callback; 408 dma_desc->callback_param = channel; 409 cppi41_channel->cookie = dma_desc->tx_submit(dma_desc); 410 cppi41_channel->channel.rx_packet_done = false; 411 412 save_rx_toggle(cppi41_channel); 413 dma_async_issue_pending(dc); 414 return true; 415 } 416 417 static struct dma_channel *cppi41_dma_channel_allocate(struct dma_controller *c, 418 struct musb_hw_ep *hw_ep, u8 is_tx) 419 { 420 struct cppi41_dma_controller *controller = container_of(c, 421 struct cppi41_dma_controller, controller); 422 struct cppi41_dma_channel *cppi41_channel = NULL; 423 u8 ch_num = hw_ep->epnum - 1; 424 425 if (ch_num >= MUSB_DMA_NUM_CHANNELS) 426 return NULL; 427 428 if (is_tx) 429 cppi41_channel = &controller->tx_channel[ch_num]; 430 else 431 cppi41_channel = &controller->rx_channel[ch_num]; 432 433 if (!cppi41_channel->dc) 434 return NULL; 435 436 if (cppi41_channel->is_allocated) 437 return NULL; 438 439 cppi41_channel->hw_ep = hw_ep; 440 cppi41_channel->is_allocated = 1; 441 442 return &cppi41_channel->channel; 443 } 444 445 static void cppi41_dma_channel_release(struct dma_channel *channel) 446 { 447 struct cppi41_dma_channel *cppi41_channel = channel->private_data; 448 449 if (cppi41_channel->is_allocated) { 450 cppi41_channel->is_allocated = 0; 451 channel->status = MUSB_DMA_STATUS_FREE; 452 channel->actual_len = 0; 453 } 454 } 455 456 static int cppi41_dma_channel_program(struct dma_channel *channel, 457 u16 packet_sz, u8 mode, 458 dma_addr_t dma_addr, u32 len) 459 { 460 int ret; 461 struct cppi41_dma_channel *cppi41_channel = channel->private_data; 462 int hb_mult = 0; 463 464 BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN || 465 channel->status == MUSB_DMA_STATUS_BUSY); 466 467 if (is_host_active(cppi41_channel->controller->musb)) { 468 if (cppi41_channel->is_tx) 469 hb_mult = cppi41_channel->hw_ep->out_qh->hb_mult; 470 else 471 hb_mult = cppi41_channel->hw_ep->in_qh->hb_mult; 472 } 473 474 channel->status = MUSB_DMA_STATUS_BUSY; 475 channel->actual_len = 0; 476 477 if (hb_mult) 478 packet_sz = hb_mult * (packet_sz & 0x7FF); 479 480 ret = cppi41_configure_channel(channel, packet_sz, mode, dma_addr, len); 481 if (!ret) 482 channel->status = MUSB_DMA_STATUS_FREE; 483 484 return ret; 485 } 486 487 static int cppi41_is_compatible(struct dma_channel *channel, u16 maxpacket, 488 void *buf, u32 length) 489 { 490 struct cppi41_dma_channel *cppi41_channel = channel->private_data; 491 struct cppi41_dma_controller *controller = cppi41_channel->controller; 492 struct musb *musb = controller->musb; 493 494 if (is_host_active(musb)) { 495 WARN_ON(1); 496 return 1; 497 } 498 if (cppi41_channel->hw_ep->ep_in.type != USB_ENDPOINT_XFER_BULK) 499 return 0; 500 if (cppi41_channel->is_tx) 501 return 1; 502 /* AM335x Advisory 1.0.13. No workaround for device RX mode */ 503 return 0; 504 } 505 506 static int cppi41_dma_channel_abort(struct dma_channel *channel) 507 { 508 struct cppi41_dma_channel *cppi41_channel = channel->private_data; 509 struct cppi41_dma_controller *controller = cppi41_channel->controller; 510 struct musb *musb = controller->musb; 511 void __iomem *epio = cppi41_channel->hw_ep->regs; 512 int tdbit; 513 int ret; 514 unsigned is_tx; 515 u16 csr; 516 517 is_tx = cppi41_channel->is_tx; 518 dev_dbg(musb->controller, "abort channel=%d, is_tx=%d\n", 519 cppi41_channel->port_num, is_tx); 520 521 if (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE) 522 return 0; 523 524 list_del_init(&cppi41_channel->tx_check); 525 if (is_tx) { 526 csr = musb_readw(epio, MUSB_TXCSR); 527 csr &= ~MUSB_TXCSR_DMAENAB; 528 musb_writew(epio, MUSB_TXCSR, csr); 529 } else { 530 csr = musb_readw(epio, MUSB_RXCSR); 531 csr &= ~(MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_DMAENAB); 532 musb_writew(epio, MUSB_RXCSR, csr); 533 534 csr = musb_readw(epio, MUSB_RXCSR); 535 if (csr & MUSB_RXCSR_RXPKTRDY) { 536 csr |= MUSB_RXCSR_FLUSHFIFO; 537 musb_writew(epio, MUSB_RXCSR, csr); 538 musb_writew(epio, MUSB_RXCSR, csr); 539 } 540 } 541 542 tdbit = 1 << cppi41_channel->port_num; 543 if (is_tx) 544 tdbit <<= 16; 545 546 do { 547 musb_writel(musb->ctrl_base, USB_TDOWN, tdbit); 548 ret = dmaengine_terminate_all(cppi41_channel->dc); 549 } while (ret == -EAGAIN); 550 551 musb_writel(musb->ctrl_base, USB_TDOWN, tdbit); 552 553 if (is_tx) { 554 csr = musb_readw(epio, MUSB_TXCSR); 555 if (csr & MUSB_TXCSR_TXPKTRDY) { 556 csr |= MUSB_TXCSR_FLUSHFIFO; 557 musb_writew(epio, MUSB_TXCSR, csr); 558 } 559 } 560 561 cppi41_channel->channel.status = MUSB_DMA_STATUS_FREE; 562 return 0; 563 } 564 565 static void cppi41_release_all_dma_chans(struct cppi41_dma_controller *ctrl) 566 { 567 struct dma_chan *dc; 568 int i; 569 570 for (i = 0; i < MUSB_DMA_NUM_CHANNELS; i++) { 571 dc = ctrl->tx_channel[i].dc; 572 if (dc) 573 dma_release_channel(dc); 574 dc = ctrl->rx_channel[i].dc; 575 if (dc) 576 dma_release_channel(dc); 577 } 578 } 579 580 static void cppi41_dma_controller_stop(struct cppi41_dma_controller *controller) 581 { 582 cppi41_release_all_dma_chans(controller); 583 } 584 585 static int cppi41_dma_controller_start(struct cppi41_dma_controller *controller) 586 { 587 struct musb *musb = controller->musb; 588 struct device *dev = musb->controller; 589 struct device_node *np = dev->of_node; 590 struct cppi41_dma_channel *cppi41_channel; 591 int count; 592 int i; 593 int ret; 594 595 count = of_property_count_strings(np, "dma-names"); 596 if (count < 0) 597 return count; 598 599 for (i = 0; i < count; i++) { 600 struct dma_chan *dc; 601 struct dma_channel *musb_dma; 602 const char *str; 603 unsigned is_tx; 604 unsigned int port; 605 606 ret = of_property_read_string_index(np, "dma-names", i, &str); 607 if (ret) 608 goto err; 609 if (!strncmp(str, "tx", 2)) 610 is_tx = 1; 611 else if (!strncmp(str, "rx", 2)) 612 is_tx = 0; 613 else { 614 dev_err(dev, "Wrong dmatype %s\n", str); 615 goto err; 616 } 617 ret = kstrtouint(str + 2, 0, &port); 618 if (ret) 619 goto err; 620 621 ret = -EINVAL; 622 if (port > MUSB_DMA_NUM_CHANNELS || !port) 623 goto err; 624 if (is_tx) 625 cppi41_channel = &controller->tx_channel[port - 1]; 626 else 627 cppi41_channel = &controller->rx_channel[port - 1]; 628 629 cppi41_channel->controller = controller; 630 cppi41_channel->port_num = port; 631 cppi41_channel->is_tx = is_tx; 632 INIT_LIST_HEAD(&cppi41_channel->tx_check); 633 634 musb_dma = &cppi41_channel->channel; 635 musb_dma->private_data = cppi41_channel; 636 musb_dma->status = MUSB_DMA_STATUS_FREE; 637 musb_dma->max_len = SZ_4M; 638 639 dc = dma_request_slave_channel(dev, str); 640 if (!dc) { 641 dev_err(dev, "Failed to request %s.\n", str); 642 ret = -EPROBE_DEFER; 643 goto err; 644 } 645 cppi41_channel->dc = dc; 646 } 647 return 0; 648 err: 649 cppi41_release_all_dma_chans(controller); 650 return ret; 651 } 652 653 void dma_controller_destroy(struct dma_controller *c) 654 { 655 struct cppi41_dma_controller *controller = container_of(c, 656 struct cppi41_dma_controller, controller); 657 658 hrtimer_cancel(&controller->early_tx); 659 cppi41_dma_controller_stop(controller); 660 kfree(controller); 661 } 662 663 struct dma_controller *dma_controller_create(struct musb *musb, 664 void __iomem *base) 665 { 666 struct cppi41_dma_controller *controller; 667 int ret = 0; 668 669 if (!musb->controller->of_node) { 670 dev_err(musb->controller, "Need DT for the DMA engine.\n"); 671 return NULL; 672 } 673 674 controller = kzalloc(sizeof(*controller), GFP_KERNEL); 675 if (!controller) 676 goto kzalloc_fail; 677 678 hrtimer_init(&controller->early_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 679 controller->early_tx.function = cppi41_recheck_tx_req; 680 INIT_LIST_HEAD(&controller->early_tx_list); 681 controller->musb = musb; 682 683 controller->controller.channel_alloc = cppi41_dma_channel_allocate; 684 controller->controller.channel_release = cppi41_dma_channel_release; 685 controller->controller.channel_program = cppi41_dma_channel_program; 686 controller->controller.channel_abort = cppi41_dma_channel_abort; 687 controller->controller.is_compatible = cppi41_is_compatible; 688 689 ret = cppi41_dma_controller_start(controller); 690 if (ret) 691 goto plat_get_fail; 692 return &controller->controller; 693 694 plat_get_fail: 695 kfree(controller); 696 kzalloc_fail: 697 if (ret == -EPROBE_DEFER) 698 return ERR_PTR(ret); 699 return NULL; 700 } 701