Lines Matching +full:edma +full:- +full:tx
1 // SPDX-License-Identifier: GPL-2.0-or-later
8 * arch/arm/mach-ep93xx/dma-m2p.c which has following copyrights:
14 * This driver is based on dw_dmac and amba-pl08x drivers.
26 #include <linux/platform_data/dma-ep93xx.h>
113 * struct ep93xx_dma_desc - EP93xx specific transaction descriptor
133 * struct ep93xx_dma_chan - an EP93xx DMA M2P/M2M channel
135 * @edma: pointer to the engine device
166 const struct ep93xx_dma_engine *edma; member
187 * struct ep93xx_dma_engine - the EP93xx DMA engine instance
221 return &edmac->chan.dev->device; in chan2dev()
230 * ep93xx_dma_set_active - set new active descriptor chain
238 * Called with @edmac->lock held and interrupts disabled.
243 BUG_ON(!list_empty(&edmac->active)); in ep93xx_dma_set_active()
245 list_add_tail(&desc->node, &edmac->active); in ep93xx_dma_set_active()
247 /* Flatten the @desc->tx_list chain into @edmac->active list */ in ep93xx_dma_set_active()
248 while (!list_empty(&desc->tx_list)) { in ep93xx_dma_set_active()
249 struct ep93xx_dma_desc *d = list_first_entry(&desc->tx_list, in ep93xx_dma_set_active()
258 d->txd.callback = desc->txd.callback; in ep93xx_dma_set_active()
259 d->txd.callback_param = desc->txd.callback_param; in ep93xx_dma_set_active()
261 list_move_tail(&d->node, &edmac->active); in ep93xx_dma_set_active()
265 /* Called with @edmac->lock held and interrupts disabled */
269 return list_first_entry_or_null(&edmac->active, in ep93xx_dma_get_active()
274 * ep93xx_dma_advance_active - advances to the next active descriptor
277 * Function advances active descriptor to the next in the @edmac->active and
283 * Called with @edmac->lock held and interrupts disabled.
289 list_rotate_left(&edmac->active); in ep93xx_dma_advance_active()
291 if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) in ep93xx_dma_advance_active()
302 return !desc->txd.cookie; in ep93xx_dma_advance_active()
311 writel(control, edmac->regs + M2P_CONTROL); in m2p_set_control()
316 readl(edmac->regs + M2P_CONTROL); in m2p_set_control()
321 struct ep93xx_dma_data *data = edmac->chan.private; in m2p_hw_setup()
324 writel(data->port & 0xf, edmac->regs + M2P_PPALLOC); in m2p_hw_setup()
330 edmac->buffer = 0; in m2p_hw_setup()
337 return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3; in m2p_channel_state()
345 spin_lock_irqsave(&edmac->lock, flags); in m2p_hw_synchronize()
346 control = readl(edmac->regs + M2P_CONTROL); in m2p_hw_synchronize()
349 spin_unlock_irqrestore(&edmac->lock, flags); in m2p_hw_synchronize()
374 if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_MEM_TO_DEV) in m2p_fill_desc()
375 bus_addr = desc->src_addr; in m2p_fill_desc()
377 bus_addr = desc->dst_addr; in m2p_fill_desc()
379 if (edmac->buffer == 0) { in m2p_fill_desc()
380 writel(desc->size, edmac->regs + M2P_MAXCNT0); in m2p_fill_desc()
381 writel(bus_addr, edmac->regs + M2P_BASE0); in m2p_fill_desc()
383 writel(desc->size, edmac->regs + M2P_MAXCNT1); in m2p_fill_desc()
384 writel(bus_addr, edmac->regs + M2P_BASE1); in m2p_fill_desc()
387 edmac->buffer ^= 1; in m2p_fill_desc()
392 u32 control = readl(edmac->regs + M2P_CONTROL); in m2p_hw_submit()
407 u32 irq_status = readl(edmac->regs + M2P_INTERRUPT); in m2p_hw_interrupt()
414 writel(1, edmac->regs + M2P_INTERRUPT); in m2p_hw_interrupt()
430 desc->txd.cookie, desc->src_addr, desc->dst_addr, in m2p_hw_interrupt()
431 desc->size); in m2p_hw_interrupt()
448 control = readl(edmac->regs + M2P_CONTROL); in m2p_hw_interrupt()
461 const struct ep93xx_dma_data *data = edmac->chan.private; in m2m_hw_setup()
466 writel(control, edmac->regs + M2M_CONTROL); in m2m_hw_setup()
470 switch (data->port) { in m2m_hw_setup()
473 * This was found via experimenting - anything less than 5 in m2m_hw_setup()
480 if (data->direction == DMA_MEM_TO_DEV) { in m2m_hw_setup()
496 if (data->direction == DMA_MEM_TO_DEV) { in m2m_hw_setup()
513 return -EINVAL; in m2m_hw_setup()
516 writel(control, edmac->regs + M2M_CONTROL); in m2m_hw_setup()
523 writel(0, edmac->regs + M2M_CONTROL); in m2m_hw_shutdown()
536 if (edmac->buffer == 0) { in m2m_fill_desc()
537 writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0); in m2m_fill_desc()
538 writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE0); in m2m_fill_desc()
539 writel(desc->size, edmac->regs + M2M_BCR0); in m2m_fill_desc()
541 writel(desc->src_addr, edmac->regs + M2M_SAR_BASE1); in m2m_fill_desc()
542 writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE1); in m2m_fill_desc()
543 writel(desc->size, edmac->regs + M2M_BCR1); in m2m_fill_desc()
546 edmac->buffer ^= 1; in m2m_fill_desc()
551 struct ep93xx_dma_data *data = edmac->chan.private; in m2m_hw_submit()
552 u32 control = readl(edmac->regs + M2M_CONTROL); in m2m_hw_submit()
560 control |= edmac->runtime_ctrl; in m2m_hw_submit()
575 writel(control, edmac->regs + M2M_CONTROL); in m2m_hw_submit()
583 writel(control, edmac->regs + M2M_CONTROL); in m2m_hw_submit()
590 * case - sometimes EP93xx M2M DMA asserts DONE interrupt when the DMA channel
592 * Control FSM in DMA_MEM_RD state, observed at least in IDE-DMA operation).
599 u32 status = readl(edmac->regs + M2M_STATUS); in m2m_hw_interrupt()
608 if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_MASK)) in m2m_hw_interrupt()
613 writel(0, edmac->regs + M2M_INTERRUPT); in m2m_hw_interrupt()
621 last_done = !desc || desc->txd.cookie; in m2m_hw_interrupt()
639 if (done && !edmac->chan.private) { in m2m_hw_interrupt()
641 control = readl(edmac->regs + M2M_CONTROL); in m2m_hw_interrupt()
643 writel(control, edmac->regs + M2M_CONTROL); in m2m_hw_interrupt()
659 control = readl(edmac->regs + M2M_CONTROL); in m2m_hw_interrupt()
662 writel(control, edmac->regs + M2M_CONTROL); in m2m_hw_interrupt()
683 spin_lock_irqsave(&edmac->lock, flags); in ep93xx_dma_desc_get()
684 list_for_each_entry_safe(desc, _desc, &edmac->free_list, node) { in ep93xx_dma_desc_get()
685 if (async_tx_test_ack(&desc->txd)) { in ep93xx_dma_desc_get()
686 list_del_init(&desc->node); in ep93xx_dma_desc_get()
688 /* Re-initialize the descriptor */ in ep93xx_dma_desc_get()
689 desc->src_addr = 0; in ep93xx_dma_desc_get()
690 desc->dst_addr = 0; in ep93xx_dma_desc_get()
691 desc->size = 0; in ep93xx_dma_desc_get()
692 desc->complete = false; in ep93xx_dma_desc_get()
693 desc->txd.cookie = 0; in ep93xx_dma_desc_get()
694 desc->txd.callback = NULL; in ep93xx_dma_desc_get()
695 desc->txd.callback_param = NULL; in ep93xx_dma_desc_get()
701 spin_unlock_irqrestore(&edmac->lock, flags); in ep93xx_dma_desc_get()
711 spin_lock_irqsave(&edmac->lock, flags); in ep93xx_dma_desc_put()
712 list_splice_init(&desc->tx_list, &edmac->free_list); in ep93xx_dma_desc_put()
713 list_add(&desc->node, &edmac->free_list); in ep93xx_dma_desc_put()
714 spin_unlock_irqrestore(&edmac->lock, flags); in ep93xx_dma_desc_put()
719 * ep93xx_dma_advance_work - start processing the next pending transaction
723 * function takes the next queued transaction from the @edmac->queue and
731 spin_lock_irqsave(&edmac->lock, flags); in ep93xx_dma_advance_work()
732 if (!list_empty(&edmac->active) || list_empty(&edmac->queue)) { in ep93xx_dma_advance_work()
733 spin_unlock_irqrestore(&edmac->lock, flags); in ep93xx_dma_advance_work()
738 new = list_first_entry(&edmac->queue, struct ep93xx_dma_desc, node); in ep93xx_dma_advance_work()
739 list_del_init(&new->node); in ep93xx_dma_advance_work()
744 edmac->edma->hw_submit(edmac); in ep93xx_dma_advance_work()
745 spin_unlock_irqrestore(&edmac->lock, flags); in ep93xx_dma_advance_work()
756 spin_lock_irq(&edmac->lock); in ep93xx_dma_tasklet()
764 if (desc->complete) { in ep93xx_dma_tasklet()
766 if (!test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) in ep93xx_dma_tasklet()
767 dma_cookie_complete(&desc->txd); in ep93xx_dma_tasklet()
768 list_splice_init(&edmac->active, &list); in ep93xx_dma_tasklet()
770 dmaengine_desc_get_callback(&desc->txd, &cb); in ep93xx_dma_tasklet()
772 spin_unlock_irq(&edmac->lock); in ep93xx_dma_tasklet()
779 dma_descriptor_unmap(&desc->txd); in ep93xx_dma_tasklet()
792 spin_lock(&edmac->lock); in ep93xx_dma_interrupt()
798 spin_unlock(&edmac->lock); in ep93xx_dma_interrupt()
802 switch (edmac->edma->hw_interrupt(edmac)) { in ep93xx_dma_interrupt()
804 desc->complete = true; in ep93xx_dma_interrupt()
805 tasklet_schedule(&edmac->tasklet); in ep93xx_dma_interrupt()
809 if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) in ep93xx_dma_interrupt()
810 tasklet_schedule(&edmac->tasklet); in ep93xx_dma_interrupt()
819 spin_unlock(&edmac->lock); in ep93xx_dma_interrupt()
824 * ep93xx_dma_tx_submit - set the prepared descriptor(s) to be executed
825 * @tx: descriptor to be executed
831 static dma_cookie_t ep93xx_dma_tx_submit(struct dma_async_tx_descriptor *tx) in ep93xx_dma_tx_submit() argument
833 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(tx->chan); in ep93xx_dma_tx_submit()
838 spin_lock_irqsave(&edmac->lock, flags); in ep93xx_dma_tx_submit()
839 cookie = dma_cookie_assign(tx); in ep93xx_dma_tx_submit()
841 desc = container_of(tx, struct ep93xx_dma_desc, txd); in ep93xx_dma_tx_submit()
848 if (list_empty(&edmac->active)) { in ep93xx_dma_tx_submit()
850 edmac->edma->hw_submit(edmac); in ep93xx_dma_tx_submit()
852 list_add_tail(&desc->node, &edmac->queue); in ep93xx_dma_tx_submit()
855 spin_unlock_irqrestore(&edmac->lock, flags); in ep93xx_dma_tx_submit()
860 * ep93xx_dma_alloc_chan_resources - allocate resources for the channel
870 struct ep93xx_dma_data *data = chan->private; in ep93xx_dma_alloc_chan_resources()
875 if (!edmac->edma->m2m) { in ep93xx_dma_alloc_chan_resources()
877 return -EINVAL; in ep93xx_dma_alloc_chan_resources()
878 if (data->port < EP93XX_DMA_I2S1 || in ep93xx_dma_alloc_chan_resources()
879 data->port > EP93XX_DMA_IRDA) in ep93xx_dma_alloc_chan_resources()
880 return -EINVAL; in ep93xx_dma_alloc_chan_resources()
881 if (data->direction != ep93xx_dma_chan_direction(chan)) in ep93xx_dma_alloc_chan_resources()
882 return -EINVAL; in ep93xx_dma_alloc_chan_resources()
885 switch (data->port) { in ep93xx_dma_alloc_chan_resources()
888 if (!is_slave_direction(data->direction)) in ep93xx_dma_alloc_chan_resources()
889 return -EINVAL; in ep93xx_dma_alloc_chan_resources()
892 return -EINVAL; in ep93xx_dma_alloc_chan_resources()
897 if (data && data->name) in ep93xx_dma_alloc_chan_resources()
898 name = data->name; in ep93xx_dma_alloc_chan_resources()
900 ret = clk_prepare_enable(edmac->clk); in ep93xx_dma_alloc_chan_resources()
904 ret = request_irq(edmac->irq, ep93xx_dma_interrupt, 0, name, edmac); in ep93xx_dma_alloc_chan_resources()
908 spin_lock_irq(&edmac->lock); in ep93xx_dma_alloc_chan_resources()
909 dma_cookie_init(&edmac->chan); in ep93xx_dma_alloc_chan_resources()
910 ret = edmac->edma->hw_setup(edmac); in ep93xx_dma_alloc_chan_resources()
911 spin_unlock_irq(&edmac->lock); in ep93xx_dma_alloc_chan_resources()
925 INIT_LIST_HEAD(&desc->tx_list); in ep93xx_dma_alloc_chan_resources()
927 dma_async_tx_descriptor_init(&desc->txd, chan); in ep93xx_dma_alloc_chan_resources()
928 desc->txd.flags = DMA_CTRL_ACK; in ep93xx_dma_alloc_chan_resources()
929 desc->txd.tx_submit = ep93xx_dma_tx_submit; in ep93xx_dma_alloc_chan_resources()
937 free_irq(edmac->irq, edmac); in ep93xx_dma_alloc_chan_resources()
939 clk_disable_unprepare(edmac->clk); in ep93xx_dma_alloc_chan_resources()
945 * ep93xx_dma_free_chan_resources - release resources for the channel
958 BUG_ON(!list_empty(&edmac->active)); in ep93xx_dma_free_chan_resources()
959 BUG_ON(!list_empty(&edmac->queue)); in ep93xx_dma_free_chan_resources()
961 spin_lock_irqsave(&edmac->lock, flags); in ep93xx_dma_free_chan_resources()
962 edmac->edma->hw_shutdown(edmac); in ep93xx_dma_free_chan_resources()
963 edmac->runtime_addr = 0; in ep93xx_dma_free_chan_resources()
964 edmac->runtime_ctrl = 0; in ep93xx_dma_free_chan_resources()
965 edmac->buffer = 0; in ep93xx_dma_free_chan_resources()
966 list_splice_init(&edmac->free_list, &list); in ep93xx_dma_free_chan_resources()
967 spin_unlock_irqrestore(&edmac->lock, flags); in ep93xx_dma_free_chan_resources()
972 clk_disable_unprepare(edmac->clk); in ep93xx_dma_free_chan_resources()
973 free_irq(edmac->irq, edmac); in ep93xx_dma_free_chan_resources()
977 * ep93xx_dma_prep_dma_memcpy - prepare a memcpy DMA operation
1002 bytes = min_t(size_t, len - offset, DMA_MAX_CHAN_BYTES); in ep93xx_dma_prep_dma_memcpy()
1004 desc->src_addr = src + offset; in ep93xx_dma_prep_dma_memcpy()
1005 desc->dst_addr = dest + offset; in ep93xx_dma_prep_dma_memcpy()
1006 desc->size = bytes; in ep93xx_dma_prep_dma_memcpy()
1011 list_add_tail(&desc->node, &first->tx_list); in ep93xx_dma_prep_dma_memcpy()
1014 first->txd.cookie = -EBUSY; in ep93xx_dma_prep_dma_memcpy()
1015 first->txd.flags = flags; in ep93xx_dma_prep_dma_memcpy()
1017 return &first->txd; in ep93xx_dma_prep_dma_memcpy()
1024 * ep93xx_dma_prep_slave_sg - prepare a slave DMA operation
1044 if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) { in ep93xx_dma_prep_slave_sg()
1050 if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) { in ep93xx_dma_prep_slave_sg()
1056 ep93xx_dma_slave_config_write(chan, dir, &edmac->slave_config); in ep93xx_dma_prep_slave_sg()
1075 desc->src_addr = sg_dma_address(sg); in ep93xx_dma_prep_slave_sg()
1076 desc->dst_addr = edmac->runtime_addr; in ep93xx_dma_prep_slave_sg()
1078 desc->src_addr = edmac->runtime_addr; in ep93xx_dma_prep_slave_sg()
1079 desc->dst_addr = sg_dma_address(sg); in ep93xx_dma_prep_slave_sg()
1081 desc->size = len; in ep93xx_dma_prep_slave_sg()
1086 list_add_tail(&desc->node, &first->tx_list); in ep93xx_dma_prep_slave_sg()
1089 first->txd.cookie = -EBUSY; in ep93xx_dma_prep_slave_sg()
1090 first->txd.flags = flags; in ep93xx_dma_prep_slave_sg()
1092 return &first->txd; in ep93xx_dma_prep_slave_sg()
1100 * ep93xx_dma_prep_dma_cyclic - prepare a cyclic DMA operation
1106 * @flags: tx descriptor status flags
1125 if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) { in ep93xx_dma_prep_dma_cyclic()
1131 if (test_and_set_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) { in ep93xx_dma_prep_dma_cyclic()
1143 ep93xx_dma_slave_config_write(chan, dir, &edmac->slave_config); in ep93xx_dma_prep_dma_cyclic()
1155 desc->src_addr = dma_addr + offset; in ep93xx_dma_prep_dma_cyclic()
1156 desc->dst_addr = edmac->runtime_addr; in ep93xx_dma_prep_dma_cyclic()
1158 desc->src_addr = edmac->runtime_addr; in ep93xx_dma_prep_dma_cyclic()
1159 desc->dst_addr = dma_addr + offset; in ep93xx_dma_prep_dma_cyclic()
1162 desc->size = period_len; in ep93xx_dma_prep_dma_cyclic()
1167 list_add_tail(&desc->node, &first->tx_list); in ep93xx_dma_prep_dma_cyclic()
1170 first->txd.cookie = -EBUSY; in ep93xx_dma_prep_dma_cyclic()
1172 return &first->txd; in ep93xx_dma_prep_dma_cyclic()
1180 * ep93xx_dma_synchronize - Synchronizes the termination of transfers to the
1195 if (edmac->edma->hw_synchronize) in ep93xx_dma_synchronize()
1196 edmac->edma->hw_synchronize(edmac); in ep93xx_dma_synchronize()
1200 * ep93xx_dma_terminate_all - terminate all transactions
1204 * @edmac->free_list and callbacks are _not_ called.
1213 spin_lock_irqsave(&edmac->lock, flags); in ep93xx_dma_terminate_all()
1215 edmac->edma->hw_shutdown(edmac); in ep93xx_dma_terminate_all()
1216 clear_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags); in ep93xx_dma_terminate_all()
1217 list_splice_init(&edmac->active, &list); in ep93xx_dma_terminate_all()
1218 list_splice_init(&edmac->queue, &list); in ep93xx_dma_terminate_all()
1220 * We then re-enable the channel. This way we can continue submitting in ep93xx_dma_terminate_all()
1221 * the descriptors by just calling ->hw_submit() again. in ep93xx_dma_terminate_all()
1223 edmac->edma->hw_setup(edmac); in ep93xx_dma_terminate_all()
1224 spin_unlock_irqrestore(&edmac->lock, flags); in ep93xx_dma_terminate_all()
1237 memcpy(&edmac->slave_config, config, sizeof(*config)); in ep93xx_dma_slave_config()
1251 if (!edmac->edma->m2m) in ep93xx_dma_slave_config_write()
1252 return -EINVAL; in ep93xx_dma_slave_config_write()
1256 width = config->src_addr_width; in ep93xx_dma_slave_config_write()
1257 addr = config->src_addr; in ep93xx_dma_slave_config_write()
1261 width = config->dst_addr_width; in ep93xx_dma_slave_config_write()
1262 addr = config->dst_addr; in ep93xx_dma_slave_config_write()
1266 return -EINVAL; in ep93xx_dma_slave_config_write()
1280 return -EINVAL; in ep93xx_dma_slave_config_write()
1283 spin_lock_irqsave(&edmac->lock, flags); in ep93xx_dma_slave_config_write()
1284 edmac->runtime_addr = addr; in ep93xx_dma_slave_config_write()
1285 edmac->runtime_ctrl = ctrl; in ep93xx_dma_slave_config_write()
1286 spin_unlock_irqrestore(&edmac->lock, flags); in ep93xx_dma_slave_config_write()
1292 * ep93xx_dma_tx_status - check if a transaction is completed
1307 * ep93xx_dma_issue_pending - push pending transactions to the hardware
1320 struct ep93xx_dma_platform_data *pdata = dev_get_platdata(&pdev->dev); in ep93xx_dma_probe()
1321 struct ep93xx_dma_engine *edma; in ep93xx_dma_probe() local
1325 edma = kzalloc(struct_size(edma, channels, pdata->num_channels), GFP_KERNEL); in ep93xx_dma_probe()
1326 if (!edma) in ep93xx_dma_probe()
1327 return -ENOMEM; in ep93xx_dma_probe()
1329 dma_dev = &edma->dma_dev; in ep93xx_dma_probe()
1330 edma->m2m = platform_get_device_id(pdev)->driver_data; in ep93xx_dma_probe()
1331 edma->num_channels = pdata->num_channels; in ep93xx_dma_probe()
1333 INIT_LIST_HEAD(&dma_dev->channels); in ep93xx_dma_probe()
1334 for (i = 0; i < pdata->num_channels; i++) { in ep93xx_dma_probe()
1335 const struct ep93xx_dma_chan_data *cdata = &pdata->channels[i]; in ep93xx_dma_probe()
1336 struct ep93xx_dma_chan *edmac = &edma->channels[i]; in ep93xx_dma_probe()
1338 edmac->chan.device = dma_dev; in ep93xx_dma_probe()
1339 edmac->regs = cdata->base; in ep93xx_dma_probe()
1340 edmac->irq = cdata->irq; in ep93xx_dma_probe()
1341 edmac->edma = edma; in ep93xx_dma_probe()
1343 edmac->clk = clk_get(NULL, cdata->name); in ep93xx_dma_probe()
1344 if (IS_ERR(edmac->clk)) { in ep93xx_dma_probe()
1345 dev_warn(&pdev->dev, "failed to get clock for %s\n", in ep93xx_dma_probe()
1346 cdata->name); in ep93xx_dma_probe()
1350 spin_lock_init(&edmac->lock); in ep93xx_dma_probe()
1351 INIT_LIST_HEAD(&edmac->active); in ep93xx_dma_probe()
1352 INIT_LIST_HEAD(&edmac->queue); in ep93xx_dma_probe()
1353 INIT_LIST_HEAD(&edmac->free_list); in ep93xx_dma_probe()
1354 tasklet_setup(&edmac->tasklet, ep93xx_dma_tasklet); in ep93xx_dma_probe()
1356 list_add_tail(&edmac->chan.device_node, in ep93xx_dma_probe()
1357 &dma_dev->channels); in ep93xx_dma_probe()
1360 dma_cap_zero(dma_dev->cap_mask); in ep93xx_dma_probe()
1361 dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); in ep93xx_dma_probe()
1362 dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask); in ep93xx_dma_probe()
1364 dma_dev->dev = &pdev->dev; in ep93xx_dma_probe()
1365 dma_dev->device_alloc_chan_resources = ep93xx_dma_alloc_chan_resources; in ep93xx_dma_probe()
1366 dma_dev->device_free_chan_resources = ep93xx_dma_free_chan_resources; in ep93xx_dma_probe()
1367 dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg; in ep93xx_dma_probe()
1368 dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic; in ep93xx_dma_probe()
1369 dma_dev->device_config = ep93xx_dma_slave_config; in ep93xx_dma_probe()
1370 dma_dev->device_synchronize = ep93xx_dma_synchronize; in ep93xx_dma_probe()
1371 dma_dev->device_terminate_all = ep93xx_dma_terminate_all; in ep93xx_dma_probe()
1372 dma_dev->device_issue_pending = ep93xx_dma_issue_pending; in ep93xx_dma_probe()
1373 dma_dev->device_tx_status = ep93xx_dma_tx_status; in ep93xx_dma_probe()
1375 dma_set_max_seg_size(dma_dev->dev, DMA_MAX_CHAN_BYTES); in ep93xx_dma_probe()
1377 if (edma->m2m) { in ep93xx_dma_probe()
1378 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); in ep93xx_dma_probe()
1379 dma_dev->device_prep_dma_memcpy = ep93xx_dma_prep_dma_memcpy; in ep93xx_dma_probe()
1381 edma->hw_setup = m2m_hw_setup; in ep93xx_dma_probe()
1382 edma->hw_shutdown = m2m_hw_shutdown; in ep93xx_dma_probe()
1383 edma->hw_submit = m2m_hw_submit; in ep93xx_dma_probe()
1384 edma->hw_interrupt = m2m_hw_interrupt; in ep93xx_dma_probe()
1386 dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask); in ep93xx_dma_probe()
1388 edma->hw_synchronize = m2p_hw_synchronize; in ep93xx_dma_probe()
1389 edma->hw_setup = m2p_hw_setup; in ep93xx_dma_probe()
1390 edma->hw_shutdown = m2p_hw_shutdown; in ep93xx_dma_probe()
1391 edma->hw_submit = m2p_hw_submit; in ep93xx_dma_probe()
1392 edma->hw_interrupt = m2p_hw_interrupt; in ep93xx_dma_probe()
1397 for (i = 0; i < edma->num_channels; i++) { in ep93xx_dma_probe()
1398 struct ep93xx_dma_chan *edmac = &edma->channels[i]; in ep93xx_dma_probe()
1399 if (!IS_ERR_OR_NULL(edmac->clk)) in ep93xx_dma_probe()
1400 clk_put(edmac->clk); in ep93xx_dma_probe()
1402 kfree(edma); in ep93xx_dma_probe()
1404 dev_info(dma_dev->dev, "EP93xx M2%s DMA ready\n", in ep93xx_dma_probe()
1405 edma->m2m ? "M" : "P"); in ep93xx_dma_probe()
1412 { "ep93xx-dma-m2p", 0 },
1413 { "ep93xx-dma-m2m", 1 },
1419 .name = "ep93xx-dma",