Lines Matching +full:memcpy +full:- +full:channels
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) Ericsson AB 2007-2008
4 * Copyright (C) ST-Ericsson SA 2008-2010
5 * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
6 * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
9 #include <linux/dma-mapping.h>
32 * struct stedma40_platform_data - Configuration struct for the dma device.
36 * @disabled_channels: A vector, ending with -1, that marks physical channels
38 * @soft_lli_chans: A vector, that marks physical channels will use LLI by SW
42 * @num_of_soft_lli_chans: The number of channels that needs to be configured
45 * @num_of_memcpy_chans: The number of channels reserved for memcpy.
46 * @num_of_phy_chans: The number of physical channels implemented in HW.
47 * 0 means reading the number of channels from DMA HW but this is only valid
48 * for 'multiple of 4' channels, like 8.
61 #define D40_PHY_CHAN -1
80 /* Max number of logical channels per physical channel */
93 /* Reserved event lines for memcpy only. */
110 /* Default configuration for physical memcpy */
124 /* Default configuration for logical memcpy */
139 * enum d40_command - The different commands and/or statuses.
154 * enum d40_events - The different Event Enables for the event lines.
156 * @D40_DEACTIVATE_EVENTLINE: De-activate Event line, stopping the logical chan.
258 * struct d40_interrupt_lookup - lookup table for interrupt handler
303 * struct d40_reg_val - simple lookup struct
317 /* Interrupts on all logical channels */
335 /* Interrupts on all logical channels */
354 * struct d40_lli_pool - Structure for keeping LLIs in memory
373 * struct d40_desc - A descriptor is one DMA job.
378 * @lli_log: Same as above but for logical channels.
379 * @lli_pool: The pool with two entries pre-allocated.
410 * struct d40_lcla_pool - LCLA pool settings and data.
415 * This pointer is only there for clean-up on error.
416 * @pages: The number of pages needed for all physical channels.
417 * Only used later for clean-up on error
431 * struct d40_phy_res - struct for handling eventlines mapped to physical
432 * channels.
456 * struct d40_chan - Struct that describes a channel.
516 * struct d40_gen_dmac - generic values to represent u8500/u8540 DMA
548 * struct d40_base - The big global struct, one for each probe'd instance.
551 * @execmd_lock: Lock for execute command usage since several channels share
558 * @num_memcpy_chans: The number of channels used for memcpy (mem-to-mem
560 * @num_phy_chans: The number of physical channels. Read from HW. This
561 * is the number of available channels for this driver, not counting "Secure
562 * mode" allocated physical channels.
563 * @num_log_chans: The number of logical channels. Calculated from
565 * @dma_both: dma_device channels that can do both memcpy and slave transfers.
566 * @dma_slave: dma_device channels that can do only do slave transfers.
567 * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
568 * @phy_chans: Room for all possible physical channels in system.
569 * @log_chans: Room for all possible logical channels in system.
577 * @phy_res: Vector containing all physical channels.
589 * @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off.
613 /* Physical half channels */
630 return &d40c->chan.dev->device; in chan2dev()
635 return chan->log_num == D40_PHY_CHAN; in chan_is_physical()
645 return chan->base->virtbase + D40_DREG_PCBASE + in chan_base()
646 chan->phy_chan->num * D40_DREG_PCDELTA; in chan_base()
672 base = d40d->lli_pool.pre_alloc_lli; in d40_pool_lli_alloc()
673 d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli); in d40_pool_lli_alloc()
674 d40d->lli_pool.base = NULL; in d40_pool_lli_alloc()
676 d40d->lli_pool.size = lli_len * 2 * align; in d40_pool_lli_alloc()
678 base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT); in d40_pool_lli_alloc()
679 d40d->lli_pool.base = base; in d40_pool_lli_alloc()
681 if (d40d->lli_pool.base == NULL) in d40_pool_lli_alloc()
682 return -ENOMEM; in d40_pool_lli_alloc()
686 d40d->lli_log.src = PTR_ALIGN(base, align); in d40_pool_lli_alloc()
687 d40d->lli_log.dst = d40d->lli_log.src + lli_len; in d40_pool_lli_alloc()
689 d40d->lli_pool.dma_addr = 0; in d40_pool_lli_alloc()
691 d40d->lli_phy.src = PTR_ALIGN(base, align); in d40_pool_lli_alloc()
692 d40d->lli_phy.dst = d40d->lli_phy.src + lli_len; in d40_pool_lli_alloc()
694 d40d->lli_pool.dma_addr = dma_map_single(d40c->base->dev, in d40_pool_lli_alloc()
695 d40d->lli_phy.src, in d40_pool_lli_alloc()
696 d40d->lli_pool.size, in d40_pool_lli_alloc()
699 if (dma_mapping_error(d40c->base->dev, in d40_pool_lli_alloc()
700 d40d->lli_pool.dma_addr)) { in d40_pool_lli_alloc()
701 kfree(d40d->lli_pool.base); in d40_pool_lli_alloc()
702 d40d->lli_pool.base = NULL; in d40_pool_lli_alloc()
703 d40d->lli_pool.dma_addr = 0; in d40_pool_lli_alloc()
704 return -ENOMEM; in d40_pool_lli_alloc()
713 if (d40d->lli_pool.dma_addr) in d40_pool_lli_free()
714 dma_unmap_single(d40c->base->dev, d40d->lli_pool.dma_addr, in d40_pool_lli_free()
715 d40d->lli_pool.size, DMA_TO_DEVICE); in d40_pool_lli_free()
717 kfree(d40d->lli_pool.base); in d40_pool_lli_free()
718 d40d->lli_pool.base = NULL; in d40_pool_lli_free()
719 d40d->lli_pool.size = 0; in d40_pool_lli_free()
720 d40d->lli_log.src = NULL; in d40_pool_lli_free()
721 d40d->lli_log.dst = NULL; in d40_pool_lli_free()
722 d40d->lli_phy.src = NULL; in d40_pool_lli_free()
723 d40d->lli_phy.dst = NULL; in d40_pool_lli_free()
731 int ret = -EINVAL; in d40_lcla_alloc_one()
733 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags); in d40_lcla_alloc_one()
740 int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i; in d40_lcla_alloc_one()
742 if (!d40c->base->lcla_pool.alloc_map[idx]) { in d40_lcla_alloc_one()
743 d40c->base->lcla_pool.alloc_map[idx] = d40d; in d40_lcla_alloc_one()
744 d40d->lcla_alloc++; in d40_lcla_alloc_one()
750 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags); in d40_lcla_alloc_one()
760 int ret = -EINVAL; in d40_lcla_free_all()
765 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags); in d40_lcla_free_all()
768 int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i; in d40_lcla_free_all()
770 if (d40c->base->lcla_pool.alloc_map[idx] == d40d) { in d40_lcla_free_all()
771 d40c->base->lcla_pool.alloc_map[idx] = NULL; in d40_lcla_free_all()
772 d40d->lcla_alloc--; in d40_lcla_free_all()
773 if (d40d->lcla_alloc == 0) { in d40_lcla_free_all()
780 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags); in d40_lcla_free_all()
788 list_del(&d40d->node); in d40_desc_remove()
795 if (!list_empty(&d40c->client)) { in d40_desc_get()
799 list_for_each_entry_safe(d, _d, &d40c->client, node) { in d40_desc_get()
800 if (async_tx_test_ack(&d->txd)) { in d40_desc_get()
810 desc = kmem_cache_zalloc(d40c->base->desc_slab, GFP_NOWAIT); in d40_desc_get()
813 INIT_LIST_HEAD(&desc->node); in d40_desc_get()
823 kmem_cache_free(d40c->base->desc_slab, d40d); in d40_desc_free()
828 list_add_tail(&desc->node, &d40c->active); in d40_desc_submit()
833 struct d40_phy_lli *lli_dst = desc->lli_phy.dst; in d40_phy_lli_load()
834 struct d40_phy_lli *lli_src = desc->lli_phy.src; in d40_phy_lli_load()
837 writel(lli_src->reg_cfg, base + D40_CHAN_REG_SSCFG); in d40_phy_lli_load()
838 writel(lli_src->reg_elt, base + D40_CHAN_REG_SSELT); in d40_phy_lli_load()
839 writel(lli_src->reg_ptr, base + D40_CHAN_REG_SSPTR); in d40_phy_lli_load()
840 writel(lli_src->reg_lnk, base + D40_CHAN_REG_SSLNK); in d40_phy_lli_load()
842 writel(lli_dst->reg_cfg, base + D40_CHAN_REG_SDCFG); in d40_phy_lli_load()
843 writel(lli_dst->reg_elt, base + D40_CHAN_REG_SDELT); in d40_phy_lli_load()
844 writel(lli_dst->reg_ptr, base + D40_CHAN_REG_SDPTR); in d40_phy_lli_load()
845 writel(lli_dst->reg_lnk, base + D40_CHAN_REG_SDLNK); in d40_phy_lli_load()
850 list_add_tail(&desc->node, &d40c->done); in d40_desc_done()
855 struct d40_lcla_pool *pool = &chan->base->lcla_pool; in d40_log_lli_to_lcxa()
856 struct d40_log_lli_bidir *lli = &desc->lli_log; in d40_log_lli_to_lcxa()
857 int lli_current = desc->lli_current; in d40_log_lli_to_lcxa()
858 int lli_len = desc->lli_len; in d40_log_lli_to_lcxa()
859 bool cyclic = desc->cyclic; in d40_log_lli_to_lcxa()
860 int curr_lcla = -EINVAL; in d40_log_lli_to_lcxa()
862 bool use_esram_lcla = chan->base->plat_data->use_esram_lcla; in d40_log_lli_to_lcxa()
875 if (linkback || (lli_len - lli_current > 1)) { in d40_log_lli_to_lcxa()
882 if (!(chan->phy_chan->use_soft_lli && in d40_log_lli_to_lcxa()
883 chan->dma_cfg.dir == DMA_DEV_TO_MEM)) in d40_log_lli_to_lcxa()
895 if (!linkback || curr_lcla == -EINVAL) { in d40_log_lli_to_lcxa()
898 if (curr_lcla == -EINVAL) in d40_log_lli_to_lcxa()
901 d40_log_lli_lcpa_write(chan->lcpa, in d40_log_lli_to_lcxa()
902 &lli->dst[lli_current], in d40_log_lli_to_lcxa()
903 &lli->src[lli_current], in d40_log_lli_to_lcxa()
913 unsigned int lcla_offset = chan->phy_chan->num * 1024 + in d40_log_lli_to_lcxa()
915 struct d40_log_lli *lcla = pool->base + lcla_offset; in d40_log_lli_to_lcxa()
922 next_lcla = linkback ? first_lcla : -EINVAL; in d40_log_lli_to_lcxa()
924 if (cyclic || next_lcla == -EINVAL) in d40_log_lli_to_lcxa()
929 d40_log_lli_lcpa_write(chan->lcpa, in d40_log_lli_to_lcxa()
930 &lli->dst[lli_current], in d40_log_lli_to_lcxa()
931 &lli->src[lli_current], in d40_log_lli_to_lcxa()
940 &lli->dst[lli_current], in d40_log_lli_to_lcxa()
941 &lli->src[lli_current], in d40_log_lli_to_lcxa()
949 dma_sync_single_range_for_device(chan->base->dev, in d40_log_lli_to_lcxa()
950 pool->dma_addr, lcla_offset, in d40_log_lli_to_lcxa()
956 if (curr_lcla == -EINVAL || curr_lcla == first_lcla) { in d40_log_lli_to_lcxa()
962 desc->lli_current = lli_current; in d40_log_lli_to_lcxa()
969 d40d->lli_current = d40d->lli_len; in d40_desc_load()
976 return list_first_entry_or_null(&d40c->active, struct d40_desc, node); in d40_first_active_get()
983 desc->is_in_client_list = false; in d40_desc_queue()
984 list_add_tail(&desc->node, &d40c->pending_queue); in d40_desc_queue()
989 return list_first_entry_or_null(&d40c->pending_queue, struct d40_desc, in d40_first_pending()
995 return list_first_entry_or_null(&d40c->queue, struct d40_desc, node); in d40_first_queued()
1000 return list_first_entry_or_null(&d40c->done, struct d40_desc, node); in d40_first_done()
1030 seg_max -= max_w; in d40_size_2_dmalen()
1033 return -EINVAL; in d40_size_2_dmalen()
1079 spin_lock_irqsave(&d40c->base->execmd_lock, flags); in __d40_execute_command_phy()
1081 if (d40c->phy_chan->num % 2 == 0) in __d40_execute_command_phy()
1082 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE; in __d40_execute_command_phy()
1084 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO; in __d40_execute_command_phy()
1088 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> in __d40_execute_command_phy()
1089 D40_CHAN_POS(d40c->phy_chan->num); in __d40_execute_command_phy()
1095 wmask = 0xffffffff & ~(D40_CHAN_POS_MASK(d40c->phy_chan->num)); in __d40_execute_command_phy()
1096 writel(wmask | (command << D40_CHAN_POS(d40c->phy_chan->num)), in __d40_execute_command_phy()
1103 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> in __d40_execute_command_phy()
1104 D40_CHAN_POS(d40c->phy_chan->num); in __d40_execute_command_phy()
1121 d40c->phy_chan->num, d40c->log_num, in __d40_execute_command_phy()
1124 ret = -EBUSY; in __d40_execute_command_phy()
1129 spin_unlock_irqrestore(&d40c->base->execmd_lock, flags); in __d40_execute_command_phy()
1163 if (!list_empty(&d40c->client)) in d40_term_all()
1164 list_for_each_entry_safe(d40d, _d, &d40c->client, node) { in d40_term_all()
1170 if (!list_empty(&d40c->prepare_queue)) in d40_term_all()
1172 &d40c->prepare_queue, node) { in d40_term_all()
1177 d40c->pending_tx = 0; in d40_term_all()
1226 "status %x\n", d40c->phy_chan->num, in __d40_config_set_event()
1227 d40c->log_num, status); in __d40_config_set_event()
1238 while (--tries) { in __d40_config_set_event()
1251 100 - tries); in __d40_config_set_event()
1266 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type); in d40_config_set_event()
1268 /* Enable event line connected to device (or memcpy) */ in d40_config_set_event()
1269 if ((d40c->dma_cfg.dir == DMA_DEV_TO_MEM) || in d40_config_set_event()
1270 (d40c->dma_cfg.dir == DMA_DEV_TO_DEV)) in d40_config_set_event()
1274 if (d40c->dma_cfg.dir != DMA_DEV_TO_MEM) in d40_config_set_event()
1298 if (d40c->phy_chan->num % 2 == 0) in __d40_execute_command_log()
1299 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE; in __d40_execute_command_log()
1301 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO; in __d40_execute_command_log()
1304 spin_lock_irqsave(&d40c->phy_chan->lock, flags); in __d40_execute_command_log()
1311 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> in __d40_execute_command_log()
1312 D40_CHAN_POS(d40c->phy_chan->num); in __d40_execute_command_log()
1335 spin_unlock_irqrestore(&d40c->phy_chan->lock, flags); in __d40_execute_command_log()
1368 return phy_map[d40c->dma_cfg.mode_opt]; in d40_get_prmo()
1370 return log_map[d40c->dma_cfg.mode_opt]; in d40_get_prmo()
1379 addr_base = (d40c->phy_chan->num % 2) * 4; in d40_config_write()
1382 D40_CHAN_POS(d40c->phy_chan->num); in d40_config_write()
1383 writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base); in d40_config_write()
1386 var = d40_get_prmo(d40c) << D40_CHAN_POS(d40c->phy_chan->num); in d40_config_write()
1388 writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base); in d40_config_write()
1391 int lidx = (d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) in d40_config_write()
1396 writel(d40c->src_def_cfg, chanbase + D40_CHAN_REG_SSCFG); in d40_config_write()
1397 writel(d40c->dst_def_cfg, chanbase + D40_CHAN_REG_SDCFG); in d40_config_write()
1414 num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK) in d40_residue()
1422 return num_elt * d40c->dma_cfg.dst_info.data_width; in d40_residue()
1430 is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK; in d40_tx_is_linked()
1444 if (d40c->phy_chan == NULL) { in d40_pause()
1446 return -EINVAL; in d40_pause()
1449 if (!d40c->busy) in d40_pause()
1452 spin_lock_irqsave(&d40c->lock, flags); in d40_pause()
1453 pm_runtime_get_sync(d40c->base->dev); in d40_pause()
1457 pm_runtime_mark_last_busy(d40c->base->dev); in d40_pause()
1458 pm_runtime_put_autosuspend(d40c->base->dev); in d40_pause()
1459 spin_unlock_irqrestore(&d40c->lock, flags); in d40_pause()
1469 if (d40c->phy_chan == NULL) { in d40_resume()
1471 return -EINVAL; in d40_resume()
1474 if (!d40c->busy) in d40_resume()
1477 spin_lock_irqsave(&d40c->lock, flags); in d40_resume()
1478 pm_runtime_get_sync(d40c->base->dev); in d40_resume()
1484 pm_runtime_mark_last_busy(d40c->base->dev); in d40_resume()
1485 pm_runtime_put_autosuspend(d40c->base->dev); in d40_resume()
1486 spin_unlock_irqrestore(&d40c->lock, flags); in d40_resume()
1492 struct d40_chan *d40c = container_of(tx->chan, in d40_tx_submit()
1499 spin_lock_irqsave(&d40c->lock, flags); in d40_tx_submit()
1502 spin_unlock_irqrestore(&d40c->lock, flags); in d40_tx_submit()
1521 if (!d40c->busy) { in d40_queue_start()
1522 d40c->busy = true; in d40_queue_start()
1523 pm_runtime_get_sync(d40c->base->dev); in d40_queue_start()
1556 if (d40d->cyclic) { in dma_tc_handle()
1563 if (d40d->lli_current < d40d->lli_len in dma_tc_handle()
1570 if (d40d->lli_current == d40d->lli_len) in dma_tc_handle()
1571 d40d->lli_current = 0; in dma_tc_handle()
1576 if (d40d->lli_current < d40d->lli_len) { in dma_tc_handle()
1584 d40c->busy = false; in dma_tc_handle()
1586 pm_runtime_mark_last_busy(d40c->base->dev); in dma_tc_handle()
1587 pm_runtime_put_autosuspend(d40c->base->dev); in dma_tc_handle()
1594 d40c->pending_tx++; in dma_tc_handle()
1595 tasklet_schedule(&d40c->tasklet); in dma_tc_handle()
1607 spin_lock_irqsave(&d40c->lock, flags); in dma_tasklet()
1614 if (d40d == NULL || !d40d->cyclic) in dma_tasklet()
1618 if (!d40d->cyclic) in dma_tasklet()
1619 dma_cookie_complete(&d40d->txd); in dma_tasklet()
1625 if (d40c->pending_tx == 0) { in dma_tasklet()
1626 spin_unlock_irqrestore(&d40c->lock, flags); in dma_tasklet()
1631 callback_active = !!(d40d->txd.flags & DMA_PREP_INTERRUPT); in dma_tasklet()
1632 dmaengine_desc_get_callback(&d40d->txd, &cb); in dma_tasklet()
1634 if (!d40d->cyclic) { in dma_tasklet()
1635 if (async_tx_test_ack(&d40d->txd)) { in dma_tasklet()
1638 } else if (!d40d->is_in_client_list) { in dma_tasklet()
1641 list_add_tail(&d40d->node, &d40c->client); in dma_tasklet()
1642 d40d->is_in_client_list = true; in dma_tasklet()
1646 d40c->pending_tx--; in dma_tasklet()
1648 if (d40c->pending_tx) in dma_tasklet()
1649 tasklet_schedule(&d40c->tasklet); in dma_tasklet()
1651 spin_unlock_irqrestore(&d40c->lock, flags); in dma_tasklet()
1659 if (d40c->pending_tx > 0) in dma_tasklet()
1660 d40c->pending_tx--; in dma_tasklet()
1661 spin_unlock_irqrestore(&d40c->lock, flags); in dma_tasklet()
1669 long chan = -1; in d40_handle_interrupt()
1672 u32 *regs = base->regs_interrupt; in d40_handle_interrupt()
1673 struct d40_interrupt_lookup *il = base->gen_dmac.il; in d40_handle_interrupt()
1674 u32 il_size = base->gen_dmac.il_size; in d40_handle_interrupt()
1676 spin_lock(&base->interrupt_lock); in d40_handle_interrupt()
1678 /* Read interrupt status of both logical and physical channels */ in d40_handle_interrupt()
1680 regs[i] = readl(base->virtbase + il[i].src); in d40_handle_interrupt()
1692 idx = chan & (BITS_PER_LONG - 1); in d40_handle_interrupt()
1695 d40c = base->lookup_phy_chans[idx]; in d40_handle_interrupt()
1697 d40c = base->lookup_log_chans[il[row].offset + idx]; in d40_handle_interrupt()
1708 writel(BIT(idx), base->virtbase + il[row].clr); in d40_handle_interrupt()
1710 spin_lock(&d40c->lock); in d40_handle_interrupt()
1715 d40_err(base->dev, "IRQ chan: %ld offset %d idx %d\n", in d40_handle_interrupt()
1718 spin_unlock(&d40c->lock); in d40_handle_interrupt()
1721 spin_unlock(&base->interrupt_lock); in d40_handle_interrupt()
1730 bool is_log = conf->mode == STEDMA40_MODE_LOGICAL; in d40_validate_conf()
1732 if (!conf->dir) { in d40_validate_conf()
1734 res = -EINVAL; in d40_validate_conf()
1737 if ((is_log && conf->dev_type > d40c->base->num_log_chans) || in d40_validate_conf()
1738 (!is_log && conf->dev_type > d40c->base->num_phy_chans) || in d40_validate_conf()
1739 (conf->dev_type < 0)) { in d40_validate_conf()
1740 chan_err(d40c, "Invalid device type (%d)\n", conf->dev_type); in d40_validate_conf()
1741 res = -EINVAL; in d40_validate_conf()
1744 if (conf->dir == DMA_DEV_TO_DEV) { in d40_validate_conf()
1750 res = -EINVAL; in d40_validate_conf()
1753 if (d40_psize_2_burst_size(is_log, conf->src_info.psize) * in d40_validate_conf()
1754 conf->src_info.data_width != in d40_validate_conf()
1755 d40_psize_2_burst_size(is_log, conf->dst_info.psize) * in d40_validate_conf()
1756 conf->dst_info.data_width) { in d40_validate_conf()
1763 res = -EINVAL; in d40_validate_conf()
1774 spin_lock_irqsave(&phy->lock, flags); in d40_alloc_mask_set()
1776 *first_user = ((phy->allocated_src | phy->allocated_dst) in d40_alloc_mask_set()
1781 if (phy->allocated_src == D40_ALLOC_FREE && in d40_alloc_mask_set()
1782 phy->allocated_dst == D40_ALLOC_FREE) { in d40_alloc_mask_set()
1783 phy->allocated_dst = D40_ALLOC_PHY; in d40_alloc_mask_set()
1784 phy->allocated_src = D40_ALLOC_PHY; in d40_alloc_mask_set()
1792 if (phy->allocated_src == D40_ALLOC_PHY) in d40_alloc_mask_set()
1795 if (phy->allocated_src == D40_ALLOC_FREE) in d40_alloc_mask_set()
1796 phy->allocated_src = D40_ALLOC_LOG_FREE; in d40_alloc_mask_set()
1798 if (!(phy->allocated_src & BIT(log_event_line))) { in d40_alloc_mask_set()
1799 phy->allocated_src |= BIT(log_event_line); in d40_alloc_mask_set()
1804 if (phy->allocated_dst == D40_ALLOC_PHY) in d40_alloc_mask_set()
1807 if (phy->allocated_dst == D40_ALLOC_FREE) in d40_alloc_mask_set()
1808 phy->allocated_dst = D40_ALLOC_LOG_FREE; in d40_alloc_mask_set()
1810 if (!(phy->allocated_dst & BIT(log_event_line))) { in d40_alloc_mask_set()
1811 phy->allocated_dst |= BIT(log_event_line); in d40_alloc_mask_set()
1816 spin_unlock_irqrestore(&phy->lock, flags); in d40_alloc_mask_set()
1819 spin_unlock_irqrestore(&phy->lock, flags); in d40_alloc_mask_set()
1829 spin_lock_irqsave(&phy->lock, flags); in d40_alloc_mask_free()
1831 phy->allocated_dst = D40_ALLOC_FREE; in d40_alloc_mask_free()
1832 phy->allocated_src = D40_ALLOC_FREE; in d40_alloc_mask_free()
1839 phy->allocated_src &= ~BIT(log_event_line); in d40_alloc_mask_free()
1840 if (phy->allocated_src == D40_ALLOC_LOG_FREE) in d40_alloc_mask_free()
1841 phy->allocated_src = D40_ALLOC_FREE; in d40_alloc_mask_free()
1843 phy->allocated_dst &= ~BIT(log_event_line); in d40_alloc_mask_free()
1844 if (phy->allocated_dst == D40_ALLOC_LOG_FREE) in d40_alloc_mask_free()
1845 phy->allocated_dst = D40_ALLOC_FREE; in d40_alloc_mask_free()
1848 is_free = ((phy->allocated_src | phy->allocated_dst) == in d40_alloc_mask_free()
1851 spin_unlock_irqrestore(&phy->lock, flags); in d40_alloc_mask_free()
1858 int dev_type = d40c->dma_cfg.dev_type; in d40_allocate_channel()
1867 bool is_log = d40c->dma_cfg.mode == STEDMA40_MODE_LOGICAL; in d40_allocate_channel()
1869 phys = d40c->base->phy_res; in d40_allocate_channel()
1870 num_phy_chans = d40c->base->num_phy_chans; in d40_allocate_channel()
1872 if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) { in d40_allocate_channel()
1875 } else if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV || in d40_allocate_channel()
1876 d40c->dma_cfg.dir == DMA_MEM_TO_MEM) { in d40_allocate_channel()
1877 /* dst event lines are used for logical memcpy */ in d40_allocate_channel()
1881 return -EINVAL; in d40_allocate_channel()
1887 if (d40c->dma_cfg.dir == DMA_MEM_TO_MEM) { in d40_allocate_channel()
1889 if (d40c->dma_cfg.use_fixed_channel) { in d40_allocate_channel()
1890 i = d40c->dma_cfg.phy_channel; in d40_allocate_channel()
1904 for (j = 0; j < d40c->base->num_phy_chans; j += 8) { in d40_allocate_channel()
1915 return -EINVAL; in d40_allocate_channel()
1917 d40c->phy_chan = &phys[i]; in d40_allocate_channel()
1918 d40c->log_num = D40_PHY_CHAN; in d40_allocate_channel()
1921 if (dev_type == -1) in d40_allocate_channel()
1922 return -EINVAL; in d40_allocate_channel()
1925 for (j = 0; j < d40c->base->num_phy_chans; j += 8) { in d40_allocate_channel()
1928 if (d40c->dma_cfg.use_fixed_channel) { in d40_allocate_channel()
1929 i = d40c->dma_cfg.phy_channel; in d40_allocate_channel()
1934 return -EINVAL; in d40_allocate_channel()
1943 return -EINVAL; in d40_allocate_channel()
1947 * Spread logical channels across all available physical rather in d40_allocate_channel()
1949 * channels. in d40_allocate_channel()
1959 for (i = phy_num + 1; i >= phy_num; i--) { in d40_allocate_channel()
1967 return -EINVAL; in d40_allocate_channel()
1970 d40c->phy_chan = &phys[i]; in d40_allocate_channel()
1971 d40c->log_num = log_num; in d40_allocate_channel()
1975 d40c->base->lookup_log_chans[d40c->log_num] = d40c; in d40_allocate_channel()
1977 d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c; in d40_allocate_channel()
1985 dma_cap_mask_t cap = d40c->chan.device->cap_mask; in d40_config_memcpy()
1988 d40c->dma_cfg = dma40_memcpy_conf_log; in d40_config_memcpy()
1989 d40c->dma_cfg.dev_type = dma40_memcpy_channels[d40c->chan.chan_id]; in d40_config_memcpy()
1991 d40_log_cfg(&d40c->dma_cfg, in d40_config_memcpy()
1992 &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); in d40_config_memcpy()
1996 d40c->dma_cfg = dma40_memcpy_conf_phy; in d40_config_memcpy()
1999 d40c->dst_def_cfg |= BIT(D40_SREG_CFG_TIM_POS); in d40_config_memcpy()
2002 d40c->src_def_cfg |= BIT(D40_SREG_CFG_EIM_POS); in d40_config_memcpy()
2003 d40c->dst_def_cfg |= BIT(D40_SREG_CFG_EIM_POS); in d40_config_memcpy()
2006 chan_err(d40c, "No memcpy\n"); in d40_config_memcpy()
2007 return -EINVAL; in d40_config_memcpy()
2017 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type); in d40_free_dma()
2018 struct d40_phy_res *phy = d40c->phy_chan; in d40_free_dma()
2026 return -EINVAL; in d40_free_dma()
2029 if (phy->allocated_src == D40_ALLOC_FREE && in d40_free_dma()
2030 phy->allocated_dst == D40_ALLOC_FREE) { in d40_free_dma()
2032 return -EINVAL; in d40_free_dma()
2035 if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV || in d40_free_dma()
2036 d40c->dma_cfg.dir == DMA_MEM_TO_MEM) in d40_free_dma()
2038 else if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) in d40_free_dma()
2042 return -EINVAL; in d40_free_dma()
2045 pm_runtime_get_sync(d40c->base->dev); in d40_free_dma()
2055 d40c->base->lookup_log_chans[d40c->log_num] = NULL; in d40_free_dma()
2057 d40c->base->lookup_phy_chans[phy->num] = NULL; in d40_free_dma()
2059 if (d40c->busy) { in d40_free_dma()
2060 pm_runtime_mark_last_busy(d40c->base->dev); in d40_free_dma()
2061 pm_runtime_put_autosuspend(d40c->base->dev); in d40_free_dma()
2064 d40c->busy = false; in d40_free_dma()
2065 d40c->phy_chan = NULL; in d40_free_dma()
2066 d40c->configured = false; in d40_free_dma()
2068 pm_runtime_mark_last_busy(d40c->base->dev); in d40_free_dma()
2069 pm_runtime_put_autosuspend(d40c->base->dev); in d40_free_dma()
2080 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type); in d40_is_paused()
2082 spin_lock_irqsave(&d40c->lock, flags); in d40_is_paused()
2085 if (d40c->phy_chan->num % 2 == 0) in d40_is_paused()
2086 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE; in d40_is_paused()
2088 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO; in d40_is_paused()
2091 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> in d40_is_paused()
2092 D40_CHAN_POS(d40c->phy_chan->num); in d40_is_paused()
2098 if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV || in d40_is_paused()
2099 d40c->dma_cfg.dir == DMA_MEM_TO_MEM) { in d40_is_paused()
2101 } else if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) { in d40_is_paused()
2114 spin_unlock_irqrestore(&d40c->lock, flags); in d40_is_paused()
2126 spin_lock_irqsave(&d40c->lock, flags); in stedma40_residue()
2128 spin_unlock_irqrestore(&d40c->lock, flags); in stedma40_residue()
2139 struct stedma40_chan_cfg *cfg = &chan->dma_cfg; in d40_prep_sg_log()
2140 struct stedma40_half_channel_info *src_info = &cfg->src_info; in d40_prep_sg_log()
2141 struct stedma40_half_channel_info *dst_info = &cfg->dst_info; in d40_prep_sg_log()
2146 desc->lli_log.src, in d40_prep_sg_log()
2147 chan->log_def.lcsp1, in d40_prep_sg_log()
2148 src_info->data_width, in d40_prep_sg_log()
2149 dst_info->data_width); in d40_prep_sg_log()
2153 desc->lli_log.dst, in d40_prep_sg_log()
2154 chan->log_def.lcsp3, in d40_prep_sg_log()
2155 dst_info->data_width, in d40_prep_sg_log()
2156 src_info->data_width); in d40_prep_sg_log()
2167 struct stedma40_chan_cfg *cfg = &chan->dma_cfg; in d40_prep_sg_phy()
2168 struct stedma40_half_channel_info *src_info = &cfg->src_info; in d40_prep_sg_phy()
2169 struct stedma40_half_channel_info *dst_info = &cfg->dst_info; in d40_prep_sg_phy()
2173 if (desc->cyclic) in d40_prep_sg_phy()
2177 desc->lli_phy.src, in d40_prep_sg_phy()
2178 virt_to_phys(desc->lli_phy.src), in d40_prep_sg_phy()
2179 chan->src_def_cfg, in d40_prep_sg_phy()
2183 desc->lli_phy.dst, in d40_prep_sg_phy()
2184 virt_to_phys(desc->lli_phy.dst), in d40_prep_sg_phy()
2185 chan->dst_def_cfg, in d40_prep_sg_phy()
2188 dma_sync_single_for_device(chan->base->dev, desc->lli_pool.dma_addr, in d40_prep_sg_phy()
2189 desc->lli_pool.size, DMA_TO_DEVICE); in d40_prep_sg_phy()
2206 cfg = &chan->dma_cfg; in d40_prep_desc()
2207 desc->lli_len = d40_sg_2_dmalen(sg, sg_len, cfg->src_info.data_width, in d40_prep_desc()
2208 cfg->dst_info.data_width); in d40_prep_desc()
2209 if (desc->lli_len < 0) { in d40_prep_desc()
2214 ret = d40_pool_lli_alloc(chan, desc, desc->lli_len); in d40_prep_desc()
2220 desc->lli_current = 0; in d40_prep_desc()
2221 desc->txd.flags = dma_flags; in d40_prep_desc()
2222 desc->txd.tx_submit = d40_tx_submit; in d40_prep_desc()
2224 dma_async_tx_descriptor_init(&desc->txd, &chan->chan); in d40_prep_desc()
2244 if (!chan->phy_chan) { in d40_prep_sg()
2249 d40_set_runtime_config_write(dchan, &chan->slave_config, direction); in d40_prep_sg()
2251 spin_lock_irqsave(&chan->lock, flags); in d40_prep_sg()
2257 if (sg_next(&sg_src[sg_len - 1]) == sg_src) in d40_prep_sg()
2258 desc->cyclic = true; in d40_prep_sg()
2263 src_dev_addr = chan->runtime_addr; in d40_prep_sg()
2265 dst_dev_addr = chan->runtime_addr; in d40_prep_sg()
2284 list_add_tail(&desc->node, &chan->prepare_queue); in d40_prep_sg()
2286 spin_unlock_irqrestore(&chan->lock, flags); in d40_prep_sg()
2288 return &desc->txd; in d40_prep_sg()
2292 spin_unlock_irqrestore(&chan->lock, flags); in d40_prep_sg()
2306 d40c->dma_cfg = *info; in stedma40_filter()
2311 d40c->configured = true; in stedma40_filter()
2318 bool realtime = d40c->dma_cfg.realtime; in __d40_set_prio_rt()
2319 bool highprio = d40c->dma_cfg.high_priority; in __d40_set_prio_rt()
2325 struct d40_gen_dmac *dmac = &d40c->base->gen_dmac; in __d40_set_prio_rt()
2327 rtreg = realtime ? dmac->realtime_en : dmac->realtime_clear; in __d40_set_prio_rt()
2334 * destination event lines that trigger logical channels. in __d40_set_prio_rt()
2339 prioreg = highprio ? dmac->high_prio_en : dmac->high_prio_clear; in __d40_set_prio_rt()
2345 writel(bit, d40c->base->virtbase + prioreg + group * 4); in __d40_set_prio_rt()
2346 writel(bit, d40c->base->virtbase + rtreg + group * 4); in __d40_set_prio_rt()
2351 if (d40c->base->rev < 3) in d40_set_prio_realtime()
2354 if ((d40c->dma_cfg.dir == DMA_DEV_TO_MEM) || in d40_set_prio_realtime()
2355 (d40c->dma_cfg.dir == DMA_DEV_TO_DEV)) in d40_set_prio_realtime()
2356 __d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, true); in d40_set_prio_realtime()
2358 if ((d40c->dma_cfg.dir == DMA_MEM_TO_DEV) || in d40_set_prio_realtime()
2359 (d40c->dma_cfg.dir == DMA_DEV_TO_DEV)) in d40_set_prio_realtime()
2360 __d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, false); in d40_set_prio_realtime()
2381 cfg.dev_type = dma_spec->args[0]; in d40_xlate()
2382 flags = dma_spec->args[2]; in d40_xlate()
2401 cfg.phy_channel = dma_spec->args[1]; in d40_xlate()
2419 spin_lock_irqsave(&d40c->lock, flags); in d40_alloc_chan_resources()
2423 /* If no dma configuration is set use default configuration (memcpy) */ in d40_alloc_chan_resources()
2424 if (!d40c->configured) { in d40_alloc_chan_resources()
2427 chan_err(d40c, "Failed to configure memcpy channel\n"); in d40_alloc_chan_resources()
2435 d40c->configured = false; in d40_alloc_chan_resources()
2439 pm_runtime_get_sync(d40c->base->dev); in d40_alloc_chan_resources()
2444 if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) in d40_alloc_chan_resources()
2445 d40c->lcpa = d40c->base->lcpa_base + in d40_alloc_chan_resources()
2446 d40c->dma_cfg.dev_type * D40_LCPA_CHAN_SIZE; in d40_alloc_chan_resources()
2448 d40c->lcpa = d40c->base->lcpa_base + in d40_alloc_chan_resources()
2449 d40c->dma_cfg.dev_type * in d40_alloc_chan_resources()
2453 d40c->src_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS); in d40_alloc_chan_resources()
2454 d40c->dst_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS); in d40_alloc_chan_resources()
2459 d40c->phy_chan->num, in d40_alloc_chan_resources()
2460 d40c->dma_cfg.use_fixed_channel ? ", fixed" : ""); in d40_alloc_chan_resources()
2465 * resource is free. In case of multiple logical channels in d40_alloc_chan_resources()
2471 pm_runtime_mark_last_busy(d40c->base->dev); in d40_alloc_chan_resources()
2472 pm_runtime_put_autosuspend(d40c->base->dev); in d40_alloc_chan_resources()
2473 spin_unlock_irqrestore(&d40c->lock, flags); in d40_alloc_chan_resources()
2484 if (d40c->phy_chan == NULL) { in d40_free_chan_resources()
2489 spin_lock_irqsave(&d40c->lock, flags); in d40_free_chan_resources()
2495 spin_unlock_irqrestore(&d40c->lock, flags); in d40_free_chan_resources()
2568 if (d40c->phy_chan == NULL) { in d40_tx_status()
2570 return -EINVAL; in d40_tx_status()
2588 if (d40c->phy_chan == NULL) { in d40_issue_pending()
2593 spin_lock_irqsave(&d40c->lock, flags); in d40_issue_pending()
2595 list_splice_tail_init(&d40c->pending_queue, &d40c->queue); in d40_issue_pending()
2598 if (!d40c->busy) in d40_issue_pending()
2601 spin_unlock_irqrestore(&d40c->lock, flags); in d40_issue_pending()
2610 if (d40c->phy_chan == NULL) { in d40_terminate_all()
2612 return -EINVAL; in d40_terminate_all()
2615 spin_lock_irqsave(&d40c->lock, flags); in d40_terminate_all()
2617 pm_runtime_get_sync(d40c->base->dev); in d40_terminate_all()
2623 pm_runtime_mark_last_busy(d40c->base->dev); in d40_terminate_all()
2624 pm_runtime_put_autosuspend(d40c->base->dev); in d40_terminate_all()
2625 if (d40c->busy) { in d40_terminate_all()
2626 pm_runtime_mark_last_busy(d40c->base->dev); in d40_terminate_all()
2627 pm_runtime_put_autosuspend(d40c->base->dev); in d40_terminate_all()
2629 d40c->busy = false; in d40_terminate_all()
2631 spin_unlock_irqrestore(&d40c->lock, flags); in d40_terminate_all()
2662 info->psize = psize; in dma40_config_to_halfchannel()
2663 info->flow_ctrl = STEDMA40_NO_FLOW_CTRL; in dma40_config_to_halfchannel()
2673 memcpy(&d40c->slave_config, config, sizeof(*config)); in d40_set_runtime_config()
2684 struct stedma40_chan_cfg *cfg = &d40c->dma_cfg; in d40_set_runtime_config_write()
2690 if (d40c->phy_chan == NULL) { in d40_set_runtime_config_write()
2692 return -EINVAL; in d40_set_runtime_config_write()
2695 src_addr_width = config->src_addr_width; in d40_set_runtime_config_write()
2696 src_maxburst = config->src_maxburst; in d40_set_runtime_config_write()
2697 dst_addr_width = config->dst_addr_width; in d40_set_runtime_config_write()
2698 dst_maxburst = config->dst_maxburst; in d40_set_runtime_config_write()
2701 config_addr = config->src_addr; in d40_set_runtime_config_write()
2703 if (cfg->dir != DMA_DEV_TO_MEM) in d40_set_runtime_config_write()
2704 dev_dbg(d40c->base->dev, in d40_set_runtime_config_write()
2707 cfg->dir); in d40_set_runtime_config_write()
2708 cfg->dir = DMA_DEV_TO_MEM; in d40_set_runtime_config_write()
2717 config_addr = config->dst_addr; in d40_set_runtime_config_write()
2719 if (cfg->dir != DMA_MEM_TO_DEV) in d40_set_runtime_config_write()
2720 dev_dbg(d40c->base->dev, in d40_set_runtime_config_write()
2723 cfg->dir); in d40_set_runtime_config_write()
2724 cfg->dir = DMA_MEM_TO_DEV; in d40_set_runtime_config_write()
2732 dev_err(d40c->base->dev, in d40_set_runtime_config_write()
2735 return -EINVAL; in d40_set_runtime_config_write()
2739 dev_err(d40c->base->dev, "no address supplied\n"); in d40_set_runtime_config_write()
2740 return -EINVAL; in d40_set_runtime_config_write()
2744 dev_err(d40c->base->dev, in d40_set_runtime_config_write()
2750 return -EINVAL; in d40_set_runtime_config_write()
2768 return -EINVAL; in d40_set_runtime_config_write()
2770 cfg->src_info.data_width = src_addr_width; in d40_set_runtime_config_write()
2771 cfg->dst_info.data_width = dst_addr_width; in d40_set_runtime_config_write()
2773 ret = dma40_config_to_halfchannel(d40c, &cfg->src_info, in d40_set_runtime_config_write()
2778 ret = dma40_config_to_halfchannel(d40c, &cfg->dst_info, in d40_set_runtime_config_write()
2785 d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); in d40_set_runtime_config_write()
2787 d40_phy_cfg(cfg, &d40c->src_def_cfg, &d40c->dst_def_cfg); in d40_set_runtime_config_write()
2790 d40c->runtime_addr = config_addr; in d40_set_runtime_config_write()
2791 d40c->runtime_direction = direction; in d40_set_runtime_config_write()
2792 dev_dbg(d40c->base->dev, in d40_set_runtime_config_write()
2812 INIT_LIST_HEAD(&dma->channels); in d40_chan_init()
2816 d40c->base = base; in d40_chan_init()
2817 d40c->chan.device = dma; in d40_chan_init()
2819 spin_lock_init(&d40c->lock); in d40_chan_init()
2821 d40c->log_num = D40_PHY_CHAN; in d40_chan_init()
2823 INIT_LIST_HEAD(&d40c->done); in d40_chan_init()
2824 INIT_LIST_HEAD(&d40c->active); in d40_chan_init()
2825 INIT_LIST_HEAD(&d40c->queue); in d40_chan_init()
2826 INIT_LIST_HEAD(&d40c->pending_queue); in d40_chan_init()
2827 INIT_LIST_HEAD(&d40c->client); in d40_chan_init()
2828 INIT_LIST_HEAD(&d40c->prepare_queue); in d40_chan_init()
2830 tasklet_setup(&d40c->tasklet, dma_tasklet); in d40_chan_init()
2832 list_add_tail(&d40c->chan.device_node, in d40_chan_init()
2833 &dma->channels); in d40_chan_init()
2839 if (dma_has_cap(DMA_SLAVE, dev->cap_mask)) { in d40_ops_init()
2840 dev->device_prep_slave_sg = d40_prep_slave_sg; in d40_ops_init()
2841 dev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); in d40_ops_init()
2844 if (dma_has_cap(DMA_MEMCPY, dev->cap_mask)) { in d40_ops_init()
2845 dev->device_prep_dma_memcpy = d40_prep_memcpy; in d40_ops_init()
2846 dev->directions = BIT(DMA_MEM_TO_MEM); in d40_ops_init()
2851 dev->copy_align = DMAENGINE_ALIGN_4_BYTES; in d40_ops_init()
2854 if (dma_has_cap(DMA_CYCLIC, dev->cap_mask)) in d40_ops_init()
2855 dev->device_prep_dma_cyclic = dma40_prep_dma_cyclic; in d40_ops_init()
2857 dev->device_alloc_chan_resources = d40_alloc_chan_resources; in d40_ops_init()
2858 dev->device_free_chan_resources = d40_free_chan_resources; in d40_ops_init()
2859 dev->device_issue_pending = d40_issue_pending; in d40_ops_init()
2860 dev->device_tx_status = d40_tx_status; in d40_ops_init()
2861 dev->device_config = d40_set_runtime_config; in d40_ops_init()
2862 dev->device_pause = d40_pause; in d40_ops_init()
2863 dev->device_resume = d40_resume; in d40_ops_init()
2864 dev->device_terminate_all = d40_terminate_all; in d40_ops_init()
2865 dev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; in d40_ops_init()
2866 dev->dev = base->dev; in d40_ops_init()
2874 d40_chan_init(base, &base->dma_slave, base->log_chans, in d40_dmaengine_init()
2875 0, base->num_log_chans); in d40_dmaengine_init()
2877 dma_cap_zero(base->dma_slave.cap_mask); in d40_dmaengine_init()
2878 dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask); in d40_dmaengine_init()
2879 dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask); in d40_dmaengine_init()
2881 d40_ops_init(base, &base->dma_slave); in d40_dmaengine_init()
2883 err = dmaenginem_async_device_register(&base->dma_slave); in d40_dmaengine_init()
2886 d40_err(base->dev, "Failed to register slave channels\n"); in d40_dmaengine_init()
2890 d40_chan_init(base, &base->dma_memcpy, base->log_chans, in d40_dmaengine_init()
2891 base->num_log_chans, base->num_memcpy_chans); in d40_dmaengine_init()
2893 dma_cap_zero(base->dma_memcpy.cap_mask); in d40_dmaengine_init()
2894 dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask); in d40_dmaengine_init()
2896 d40_ops_init(base, &base->dma_memcpy); in d40_dmaengine_init()
2898 err = dmaenginem_async_device_register(&base->dma_memcpy); in d40_dmaengine_init()
2901 d40_err(base->dev, in d40_dmaengine_init()
2902 "Failed to register memcpy only channels\n"); in d40_dmaengine_init()
2906 d40_chan_init(base, &base->dma_both, base->phy_chans, in d40_dmaengine_init()
2909 dma_cap_zero(base->dma_both.cap_mask); in d40_dmaengine_init()
2910 dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask); in d40_dmaengine_init()
2911 dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask); in d40_dmaengine_init()
2912 dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask); in d40_dmaengine_init()
2914 d40_ops_init(base, &base->dma_both); in d40_dmaengine_init()
2915 err = dmaenginem_async_device_register(&base->dma_both); in d40_dmaengine_init()
2918 d40_err(base->dev, in d40_dmaengine_init()
2919 "Failed to register logical and physical capable channels\n"); in d40_dmaengine_init()
2938 if (base->lcpa_regulator) in dma40_suspend()
2939 ret = regulator_disable(base->lcpa_regulator); in dma40_suspend()
2948 if (base->lcpa_regulator) { in dma40_resume()
2949 ret = regulator_enable(base->lcpa_regulator); in dma40_resume()
2979 for (i = 0; i < base->num_phy_chans; i++) { in d40_save_restore_registers()
2983 if (base->phy_res[i].reserved) in d40_save_restore_registers()
2986 addr = base->virtbase + D40_DREG_PCBASE + i * D40_DREG_PCDELTA; in d40_save_restore_registers()
2989 dma40_backup(addr, &base->reg_val_backup_chan[idx], in d40_save_restore_registers()
2996 dma40_backup(base->virtbase, base->reg_val_backup, in d40_save_restore_registers()
3001 if (base->gen_dmac.backup) in d40_save_restore_registers()
3002 dma40_backup(base->virtbase, base->reg_val_backup_v4, in d40_save_restore_registers()
3003 base->gen_dmac.backup, in d40_save_restore_registers()
3004 base->gen_dmac.backup_size, in d40_save_restore_registers()
3015 if (base->rev != 1) in dma40_runtime_suspend()
3016 writel_relaxed(base->gcc_pwr_off_mask, in dma40_runtime_suspend()
3017 base->virtbase + D40_DREG_GCC); in dma40_runtime_suspend()
3029 base->virtbase + D40_DREG_GCC); in dma40_runtime_resume()
3048 int odd_even_bit = -2; in d40_phy_res_init()
3051 val[0] = readl(base->virtbase + D40_DREG_PRSME); in d40_phy_res_init()
3052 val[1] = readl(base->virtbase + D40_DREG_PRSMO); in d40_phy_res_init()
3054 for (i = 0; i < base->num_phy_chans; i++) { in d40_phy_res_init()
3055 base->phy_res[i].num = i; in d40_phy_res_init()
3058 /* Mark security only channels as occupied */ in d40_phy_res_init()
3059 base->phy_res[i].allocated_src = D40_ALLOC_PHY; in d40_phy_res_init()
3060 base->phy_res[i].allocated_dst = D40_ALLOC_PHY; in d40_phy_res_init()
3061 base->phy_res[i].reserved = true; in d40_phy_res_init()
3069 base->phy_res[i].allocated_src = D40_ALLOC_FREE; in d40_phy_res_init()
3070 base->phy_res[i].allocated_dst = D40_ALLOC_FREE; in d40_phy_res_init()
3071 base->phy_res[i].reserved = false; in d40_phy_res_init()
3074 spin_lock_init(&base->phy_res[i].lock); in d40_phy_res_init()
3077 /* Mark disabled channels as occupied */ in d40_phy_res_init()
3078 for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) { in d40_phy_res_init()
3079 int chan = base->plat_data->disabled_channels[i]; in d40_phy_res_init()
3081 base->phy_res[chan].allocated_src = D40_ALLOC_PHY; in d40_phy_res_init()
3082 base->phy_res[chan].allocated_dst = D40_ALLOC_PHY; in d40_phy_res_init()
3083 base->phy_res[chan].reserved = true; in d40_phy_res_init()
3088 num_phy_chans_avail--; in d40_phy_res_init()
3091 /* Mark soft_lli channels */ in d40_phy_res_init()
3092 for (i = 0; i < base->plat_data->num_of_soft_lli_chans; i++) { in d40_phy_res_init()
3093 int chan = base->plat_data->soft_lli_chans[i]; in d40_phy_res_init()
3095 base->phy_res[chan].use_soft_lli = true; in d40_phy_res_init()
3098 dev_info(base->dev, "%d of %d physical DMA channels available\n", in d40_phy_res_init()
3099 num_phy_chans_avail, base->num_phy_chans); in d40_phy_res_init()
3102 val[0] = readl(base->virtbase + D40_DREG_PRTYP); in d40_phy_res_init()
3104 for (i = 0; i < base->num_phy_chans; i++) { in d40_phy_res_init()
3106 if (base->phy_res[i].allocated_src == D40_ALLOC_FREE && in d40_phy_res_init()
3108 dev_info(base->dev, in d40_phy_res_init()
3118 * The clocks for the event lines on which reserved channels exists in d40_phy_res_init()
3121 writel(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC); in d40_phy_res_init()
3122 base->gcc_pwr_off_mask = gcc; in d40_phy_res_init()
3138 struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev); in d40_hw_detect_init()
3139 struct device *dev = &pdev->dev; in d40_hw_detect_init()
3163 pid |= (readl(virtbase + SZ_4K - 0x20 + 4 * i) in d40_hw_detect_init()
3166 cid |= (readl(virtbase + SZ_4K - 0x10 + 4 * i) in d40_hw_detect_init()
3171 return -EINVAL; in d40_hw_detect_init()
3177 return -EINVAL; in d40_hw_detect_init()
3191 return -EINVAL; in d40_hw_detect_init()
3194 /* The number of physical channels on this HW */ in d40_hw_detect_init()
3195 if (plat_data->num_of_phy_chans) in d40_hw_detect_init()
3196 num_phy_chans = plat_data->num_of_phy_chans; in d40_hw_detect_init()
3200 /* The number of channels used for memcpy */ in d40_hw_detect_init()
3201 if (plat_data->num_of_memcpy_chans) in d40_hw_detect_init()
3202 num_memcpy_chans = plat_data->num_of_memcpy_chans; in d40_hw_detect_init()
3209 "hardware rev: %d with %d physical and %d logical channels\n", in d40_hw_detect_init()
3218 return -ENOMEM; in d40_hw_detect_init()
3220 base->rev = rev; in d40_hw_detect_init()
3221 base->clk = clk; in d40_hw_detect_init()
3222 base->num_memcpy_chans = num_memcpy_chans; in d40_hw_detect_init()
3223 base->num_phy_chans = num_phy_chans; in d40_hw_detect_init()
3224 base->num_log_chans = num_log_chans; in d40_hw_detect_init()
3225 base->virtbase = virtbase; in d40_hw_detect_init()
3226 base->plat_data = plat_data; in d40_hw_detect_init()
3227 base->dev = dev; in d40_hw_detect_init()
3228 base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4); in d40_hw_detect_init()
3229 base->log_chans = &base->phy_chans[num_phy_chans]; in d40_hw_detect_init()
3231 if (base->plat_data->num_of_phy_chans == 14) { in d40_hw_detect_init()
3232 base->gen_dmac.backup = d40_backup_regs_v4b; in d40_hw_detect_init()
3233 base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4B; in d40_hw_detect_init()
3234 base->gen_dmac.interrupt_en = D40_DREG_CPCMIS; in d40_hw_detect_init()
3235 base->gen_dmac.interrupt_clear = D40_DREG_CPCICR; in d40_hw_detect_init()
3236 base->gen_dmac.realtime_en = D40_DREG_CRSEG1; in d40_hw_detect_init()
3237 base->gen_dmac.realtime_clear = D40_DREG_CRCEG1; in d40_hw_detect_init()
3238 base->gen_dmac.high_prio_en = D40_DREG_CPSEG1; in d40_hw_detect_init()
3239 base->gen_dmac.high_prio_clear = D40_DREG_CPCEG1; in d40_hw_detect_init()
3240 base->gen_dmac.il = il_v4b; in d40_hw_detect_init()
3241 base->gen_dmac.il_size = ARRAY_SIZE(il_v4b); in d40_hw_detect_init()
3242 base->gen_dmac.init_reg = dma_init_reg_v4b; in d40_hw_detect_init()
3243 base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4b); in d40_hw_detect_init()
3245 if (base->rev >= 3) { in d40_hw_detect_init()
3246 base->gen_dmac.backup = d40_backup_regs_v4a; in d40_hw_detect_init()
3247 base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4A; in d40_hw_detect_init()
3249 base->gen_dmac.interrupt_en = D40_DREG_PCMIS; in d40_hw_detect_init()
3250 base->gen_dmac.interrupt_clear = D40_DREG_PCICR; in d40_hw_detect_init()
3251 base->gen_dmac.realtime_en = D40_DREG_RSEG1; in d40_hw_detect_init()
3252 base->gen_dmac.realtime_clear = D40_DREG_RCEG1; in d40_hw_detect_init()
3253 base->gen_dmac.high_prio_en = D40_DREG_PSEG1; in d40_hw_detect_init()
3254 base->gen_dmac.high_prio_clear = D40_DREG_PCEG1; in d40_hw_detect_init()
3255 base->gen_dmac.il = il_v4a; in d40_hw_detect_init()
3256 base->gen_dmac.il_size = ARRAY_SIZE(il_v4a); in d40_hw_detect_init()
3257 base->gen_dmac.init_reg = dma_init_reg_v4a; in d40_hw_detect_init()
3258 base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4a); in d40_hw_detect_init()
3261 base->phy_res = devm_kcalloc(dev, num_phy_chans, in d40_hw_detect_init()
3262 sizeof(*base->phy_res), in d40_hw_detect_init()
3264 if (!base->phy_res) in d40_hw_detect_init()
3265 return -ENOMEM; in d40_hw_detect_init()
3267 base->lookup_phy_chans = devm_kcalloc(dev, num_phy_chans, in d40_hw_detect_init()
3268 sizeof(*base->lookup_phy_chans), in d40_hw_detect_init()
3270 if (!base->lookup_phy_chans) in d40_hw_detect_init()
3271 return -ENOMEM; in d40_hw_detect_init()
3273 base->lookup_log_chans = devm_kcalloc(dev, num_log_chans, in d40_hw_detect_init()
3274 sizeof(*base->lookup_log_chans), in d40_hw_detect_init()
3276 if (!base->lookup_log_chans) in d40_hw_detect_init()
3277 return -ENOMEM; in d40_hw_detect_init()
3279 base->reg_val_backup_chan = devm_kmalloc_array(dev, base->num_phy_chans, in d40_hw_detect_init()
3282 if (!base->reg_val_backup_chan) in d40_hw_detect_init()
3283 return -ENOMEM; in d40_hw_detect_init()
3285 base->lcla_pool.alloc_map = devm_kcalloc(dev, num_phy_chans in d40_hw_detect_init()
3287 sizeof(*base->lcla_pool.alloc_map), in d40_hw_detect_init()
3289 if (!base->lcla_pool.alloc_map) in d40_hw_detect_init()
3290 return -ENOMEM; in d40_hw_detect_init()
3292 base->regs_interrupt = devm_kmalloc_array(dev, base->gen_dmac.il_size, in d40_hw_detect_init()
3293 sizeof(*base->regs_interrupt), in d40_hw_detect_init()
3295 if (!base->regs_interrupt) in d40_hw_detect_init()
3296 return -ENOMEM; in d40_hw_detect_init()
3298 base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc), in d40_hw_detect_init()
3301 if (!base->desc_slab) in d40_hw_detect_init()
3302 return -ENOMEM; in d40_hw_detect_init()
3305 base->desc_slab); in d40_hw_detect_init()
3322 struct d40_reg_val *dma_init_reg = base->gen_dmac.init_reg; in d40_hw_init()
3323 u32 reg_size = base->gen_dmac.init_reg_size; in d40_hw_init()
3327 base->virtbase + dma_init_reg[i].reg); in d40_hw_init()
3329 /* Configure all our dma channels to default settings */ in d40_hw_init()
3330 for (i = 0; i < base->num_phy_chans; i++) { in d40_hw_init()
3334 if (base->phy_res[base->num_phy_chans - i - 1].allocated_src in d40_hw_init()
3352 writel(prmseo[1], base->virtbase + D40_DREG_PRMSE); in d40_hw_init()
3353 writel(prmseo[0], base->virtbase + D40_DREG_PRMSO); in d40_hw_init()
3354 writel(activeo[1], base->virtbase + D40_DREG_ACTIVE); in d40_hw_init()
3355 writel(activeo[0], base->virtbase + D40_DREG_ACTIVO); in d40_hw_init()
3358 writel(pcmis, base->virtbase + base->gen_dmac.interrupt_en); in d40_hw_init()
3361 writel(pcicr, base->virtbase + base->gen_dmac.interrupt_clear); in d40_hw_init()
3364 base->gen_dmac.init_reg = NULL; in d40_hw_init()
3365 base->gen_dmac.init_reg_size = 0; in d40_hw_init()
3370 struct d40_lcla_pool *pool = &base->lcla_pool; in d40_lcla_allocate()
3384 return -ENOMEM; in d40_lcla_allocate()
3387 base->lcla_pool.pages = SZ_1K * base->num_phy_chans / PAGE_SIZE; in d40_lcla_allocate()
3391 base->lcla_pool.pages); in d40_lcla_allocate()
3394 d40_err(base->dev, "Failed to allocate %d pages.\n", in d40_lcla_allocate()
3395 base->lcla_pool.pages); in d40_lcla_allocate()
3396 ret = -ENOMEM; in d40_lcla_allocate()
3399 free_pages(page_list[j], base->lcla_pool.pages); in d40_lcla_allocate()
3404 (LCLA_ALIGNMENT - 1)) == 0) in d40_lcla_allocate()
3409 free_pages(page_list[j], base->lcla_pool.pages); in d40_lcla_allocate()
3412 base->lcla_pool.base = (void *)page_list[i]; in d40_lcla_allocate()
3418 dev_warn(base->dev, in d40_lcla_allocate()
3420 __func__, base->lcla_pool.pages); in d40_lcla_allocate()
3421 base->lcla_pool.base_unaligned = kmalloc(SZ_1K * in d40_lcla_allocate()
3422 base->num_phy_chans + in d40_lcla_allocate()
3425 if (!base->lcla_pool.base_unaligned) { in d40_lcla_allocate()
3426 ret = -ENOMEM; in d40_lcla_allocate()
3430 base->lcla_pool.base = PTR_ALIGN(base->lcla_pool.base_unaligned, in d40_lcla_allocate()
3434 pool->dma_addr = dma_map_single(base->dev, pool->base, in d40_lcla_allocate()
3435 SZ_1K * base->num_phy_chans, in d40_lcla_allocate()
3437 if (dma_mapping_error(base->dev, pool->dma_addr)) { in d40_lcla_allocate()
3438 pool->dma_addr = 0; in d40_lcla_allocate()
3439 ret = -ENOMEM; in d40_lcla_allocate()
3443 writel(virt_to_phys(base->lcla_pool.base), in d40_lcla_allocate()
3444 base->virtbase + D40_DREG_LCLA); in d40_lcla_allocate()
3460 return -ENOMEM; in d40_of_probe()
3463 of_property_read_u32(np, "dma-channels", &num_phy); in d40_of_probe()
3465 pdata->num_of_phy_chans = num_phy; in d40_of_probe()
3467 list = of_get_property(np, "memcpy-channels", &num_memcpy); in d40_of_probe()
3472 "Invalid number of memcpy channels specified (%d)\n", in d40_of_probe()
3474 return -EINVAL; in d40_of_probe()
3476 pdata->num_of_memcpy_chans = num_memcpy; in d40_of_probe()
3478 of_property_read_u32_array(np, "memcpy-channels", in d40_of_probe()
3482 list = of_get_property(np, "disabled-channels", &num_disabled); in d40_of_probe()
3487 "Invalid number of disabled channels specified (%d)\n", in d40_of_probe()
3489 return -EINVAL; in d40_of_probe()
3492 of_property_read_u32_array(np, "disabled-channels", in d40_of_probe()
3493 pdata->disabled_channels, in d40_of_probe()
3495 pdata->disabled_channels[num_disabled] = -1; in d40_of_probe()
3497 dev->platform_data = pdata; in d40_of_probe()
3504 struct device *dev = &pdev->dev; in d40_probe()
3505 struct device_node *np = pdev->dev.of_node; in d40_probe()
3515 ret = -ENOMEM; in d40_probe()
3527 spin_lock_init(&base->interrupt_lock); in d40_probe()
3528 spin_lock_init(&base->execmd_lock); in d40_probe()
3534 ret = -EINVAL; in d40_probe()
3543 base->lcpa_size = resource_size(&res_lcpa); in d40_probe()
3544 base->phy_lcpa = res_lcpa.start; in d40_probe()
3546 &base->phy_lcpa, &base->lcpa_size); in d40_probe()
3549 val = readl(base->virtbase + D40_DREG_LCPA); in d40_probe()
3550 if (base->phy_lcpa != val && val != 0) { in d40_probe()
3553 __func__, val, (u32)base->phy_lcpa); in d40_probe()
3555 writel(base->phy_lcpa, base->virtbase + D40_DREG_LCPA); in d40_probe()
3557 base->lcpa_base = devm_ioremap(dev, base->phy_lcpa, base->lcpa_size); in d40_probe()
3558 if (!base->lcpa_base) { in d40_probe()
3559 ret = -ENOMEM; in d40_probe()
3564 if (base->plat_data->use_esram_lcla) { in d40_probe()
3568 ret = -ENOENT; in d40_probe()
3573 base->lcla_pool.base = devm_ioremap(dev, res->start, in d40_probe()
3575 if (!base->lcla_pool.base) { in d40_probe()
3576 ret = -ENOMEM; in d40_probe()
3580 writel(res->start, base->virtbase + D40_DREG_LCLA); in d40_probe()
3590 spin_lock_init(&base->lcla_pool.lock); in d40_probe()
3592 base->irq = platform_get_irq(pdev, 0); in d40_probe()
3593 if (base->irq < 0) { in d40_probe()
3594 ret = base->irq; in d40_probe()
3598 ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base); in d40_probe()
3604 if (base->plat_data->use_esram_lcla) { in d40_probe()
3606 base->lcpa_regulator = regulator_get(base->dev, "lcla_esram"); in d40_probe()
3607 if (IS_ERR(base->lcpa_regulator)) { in d40_probe()
3609 ret = PTR_ERR(base->lcpa_regulator); in d40_probe()
3610 base->lcpa_regulator = NULL; in d40_probe()
3614 ret = regulator_enable(base->lcpa_regulator); in d40_probe()
3618 regulator_put(base->lcpa_regulator); in d40_probe()
3619 base->lcpa_regulator = NULL; in d40_probe()
3624 writel_relaxed(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC); in d40_probe()
3626 pm_runtime_irq_safe(base->dev); in d40_probe()
3627 pm_runtime_set_autosuspend_delay(base->dev, DMA40_AUTOSUSPEND_DELAY); in d40_probe()
3628 pm_runtime_use_autosuspend(base->dev); in d40_probe()
3629 pm_runtime_mark_last_busy(base->dev); in d40_probe()
3630 pm_runtime_set_active(base->dev); in d40_probe()
3631 pm_runtime_enable(base->dev); in d40_probe()
3637 ret = dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE); in d40_probe()
3652 dev_info(base->dev, "initialized\n"); in d40_probe()
3656 if (base->lcla_pool.dma_addr) in d40_probe()
3657 dma_unmap_single(base->dev, base->lcla_pool.dma_addr, in d40_probe()
3658 SZ_1K * base->num_phy_chans, in d40_probe()
3661 if (!base->lcla_pool.base_unaligned && base->lcla_pool.base) in d40_probe()
3662 free_pages((unsigned long)base->lcla_pool.base, in d40_probe()
3663 base->lcla_pool.pages); in d40_probe()
3665 kfree(base->lcla_pool.base_unaligned); in d40_probe()
3667 if (base->lcpa_regulator) { in d40_probe()
3668 regulator_disable(base->lcpa_regulator); in d40_probe()
3669 regulator_put(base->lcpa_regulator); in d40_probe()
3671 pm_runtime_disable(base->dev); in d40_probe()