1dc78baa2SNicolas Ferre /* 2dc78baa2SNicolas Ferre * Driver for the Atmel AHB DMA Controller (aka HDMA or DMAC on AT91 systems) 3dc78baa2SNicolas Ferre * 4dc78baa2SNicolas Ferre * Copyright (C) 2008 Atmel Corporation 5dc78baa2SNicolas Ferre * 6dc78baa2SNicolas Ferre * This program is free software; you can redistribute it and/or modify 7dc78baa2SNicolas Ferre * it under the terms of the GNU General Public License as published by 8dc78baa2SNicolas Ferre * the Free Software Foundation; either version 2 of the License, or 9dc78baa2SNicolas Ferre * (at your option) any later version. 10dc78baa2SNicolas Ferre * 11dc78baa2SNicolas Ferre * 129102d871SNicolas Ferre * This supports the Atmel AHB DMA Controller found in several Atmel SoCs. 139102d871SNicolas Ferre * The only Atmel DMA Controller that is not covered by this driver is the one 149102d871SNicolas Ferre * found on AT91SAM9263. 15dc78baa2SNicolas Ferre */ 16dc78baa2SNicolas Ferre 17dc78baa2SNicolas Ferre #include <linux/clk.h> 18dc78baa2SNicolas Ferre #include <linux/dmaengine.h> 19dc78baa2SNicolas Ferre #include <linux/dma-mapping.h> 20dc78baa2SNicolas Ferre #include <linux/dmapool.h> 21dc78baa2SNicolas Ferre #include <linux/interrupt.h> 22dc78baa2SNicolas Ferre #include <linux/module.h> 23dc78baa2SNicolas Ferre #include <linux/platform_device.h> 245a0e3ad6STejun Heo #include <linux/slab.h> 25c5115953SNicolas Ferre #include <linux/of.h> 26c5115953SNicolas Ferre #include <linux/of_device.h> 27bbe89c8eSLudovic Desroches #include <linux/of_dma.h> 28dc78baa2SNicolas Ferre 29dc78baa2SNicolas Ferre #include "at_hdmac_regs.h" 30d2ebfb33SRussell King - ARM Linux #include "dmaengine.h" 31dc78baa2SNicolas Ferre 32dc78baa2SNicolas Ferre /* 33dc78baa2SNicolas Ferre * Glossary 34dc78baa2SNicolas Ferre * -------- 35dc78baa2SNicolas Ferre * 36dc78baa2SNicolas Ferre * at_hdmac : Name of the ATmel AHB DMA Controller 37dc78baa2SNicolas Ferre * at_dma_ / atdma : ATmel DMA controller entity related 38dc78baa2SNicolas Ferre * atc_ / atchan : ATmel DMA Channel entity related 39dc78baa2SNicolas Ferre */ 40dc78baa2SNicolas Ferre 41dc78baa2SNicolas Ferre #define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO) 42ae14d4b5SNicolas Ferre #define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \ 43ae14d4b5SNicolas Ferre |ATC_DIF(AT_DMA_MEM_IF)) 44dc78baa2SNicolas Ferre 45dc78baa2SNicolas Ferre /* 46dc78baa2SNicolas Ferre * Initial number of descriptors to allocate for each channel. This could 47dc78baa2SNicolas Ferre * be increased during dma usage. 48dc78baa2SNicolas Ferre */ 49dc78baa2SNicolas Ferre static unsigned int init_nr_desc_per_channel = 64; 50dc78baa2SNicolas Ferre module_param(init_nr_desc_per_channel, uint, 0644); 51dc78baa2SNicolas Ferre MODULE_PARM_DESC(init_nr_desc_per_channel, 52dc78baa2SNicolas Ferre "initial descriptors per channel (default: 64)"); 53dc78baa2SNicolas Ferre 54dc78baa2SNicolas Ferre 55dc78baa2SNicolas Ferre /* prototypes */ 56dc78baa2SNicolas Ferre static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx); 57dc78baa2SNicolas Ferre 58dc78baa2SNicolas Ferre 59dc78baa2SNicolas Ferre /*----------------------------------------------------------------------*/ 60dc78baa2SNicolas Ferre 61dc78baa2SNicolas Ferre static struct at_desc *atc_first_active(struct at_dma_chan *atchan) 62dc78baa2SNicolas Ferre { 63dc78baa2SNicolas Ferre return list_first_entry(&atchan->active_list, 64dc78baa2SNicolas Ferre struct at_desc, desc_node); 65dc78baa2SNicolas Ferre } 66dc78baa2SNicolas Ferre 67dc78baa2SNicolas Ferre static struct at_desc *atc_first_queued(struct at_dma_chan *atchan) 68dc78baa2SNicolas Ferre { 69dc78baa2SNicolas Ferre return list_first_entry(&atchan->queue, 70dc78baa2SNicolas Ferre struct at_desc, desc_node); 71dc78baa2SNicolas Ferre } 72dc78baa2SNicolas Ferre 73dc78baa2SNicolas Ferre /** 74421f91d2SUwe Kleine-König * atc_alloc_descriptor - allocate and return an initialized descriptor 75dc78baa2SNicolas Ferre * @chan: the channel to allocate descriptors for 76dc78baa2SNicolas Ferre * @gfp_flags: GFP allocation flags 77dc78baa2SNicolas Ferre * 78dc78baa2SNicolas Ferre * Note: The ack-bit is positioned in the descriptor flag at creation time 79dc78baa2SNicolas Ferre * to make initial allocation more convenient. This bit will be cleared 80dc78baa2SNicolas Ferre * and control will be given to client at usage time (during 81dc78baa2SNicolas Ferre * preparation functions). 82dc78baa2SNicolas Ferre */ 83dc78baa2SNicolas Ferre static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan, 84dc78baa2SNicolas Ferre gfp_t gfp_flags) 85dc78baa2SNicolas Ferre { 86dc78baa2SNicolas Ferre struct at_desc *desc = NULL; 87dc78baa2SNicolas Ferre struct at_dma *atdma = to_at_dma(chan->device); 88dc78baa2SNicolas Ferre dma_addr_t phys; 89dc78baa2SNicolas Ferre 90dc78baa2SNicolas Ferre desc = dma_pool_alloc(atdma->dma_desc_pool, gfp_flags, &phys); 91dc78baa2SNicolas Ferre if (desc) { 92dc78baa2SNicolas Ferre memset(desc, 0, sizeof(struct at_desc)); 93285a3c71SDan Williams INIT_LIST_HEAD(&desc->tx_list); 94dc78baa2SNicolas Ferre dma_async_tx_descriptor_init(&desc->txd, chan); 95dc78baa2SNicolas Ferre /* txd.flags will be overwritten in prep functions */ 96dc78baa2SNicolas Ferre desc->txd.flags = DMA_CTRL_ACK; 97dc78baa2SNicolas Ferre desc->txd.tx_submit = atc_tx_submit; 98dc78baa2SNicolas Ferre desc->txd.phys = phys; 99dc78baa2SNicolas Ferre } 100dc78baa2SNicolas Ferre 101dc78baa2SNicolas Ferre return desc; 102dc78baa2SNicolas Ferre } 103dc78baa2SNicolas Ferre 104dc78baa2SNicolas Ferre /** 105af901ca1SAndré Goddard Rosa * atc_desc_get - get an unused descriptor from free_list 106dc78baa2SNicolas Ferre * @atchan: channel we want a new descriptor for 107dc78baa2SNicolas Ferre */ 108dc78baa2SNicolas Ferre static struct at_desc *atc_desc_get(struct at_dma_chan *atchan) 109dc78baa2SNicolas Ferre { 110dc78baa2SNicolas Ferre struct at_desc *desc, *_desc; 111dc78baa2SNicolas Ferre struct at_desc *ret = NULL; 112d8cb04b0SNicolas Ferre unsigned long flags; 113dc78baa2SNicolas Ferre unsigned int i = 0; 114dc78baa2SNicolas Ferre LIST_HEAD(tmp_list); 115dc78baa2SNicolas Ferre 116d8cb04b0SNicolas Ferre spin_lock_irqsave(&atchan->lock, flags); 117dc78baa2SNicolas Ferre list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) { 118dc78baa2SNicolas Ferre i++; 119dc78baa2SNicolas Ferre if (async_tx_test_ack(&desc->txd)) { 120dc78baa2SNicolas Ferre list_del(&desc->desc_node); 121dc78baa2SNicolas Ferre ret = desc; 122dc78baa2SNicolas Ferre break; 123dc78baa2SNicolas Ferre } 124dc78baa2SNicolas Ferre dev_dbg(chan2dev(&atchan->chan_common), 125dc78baa2SNicolas Ferre "desc %p not ACKed\n", desc); 126dc78baa2SNicolas Ferre } 127d8cb04b0SNicolas Ferre spin_unlock_irqrestore(&atchan->lock, flags); 128dc78baa2SNicolas Ferre dev_vdbg(chan2dev(&atchan->chan_common), 129dc78baa2SNicolas Ferre "scanned %u descriptors on freelist\n", i); 130dc78baa2SNicolas Ferre 131dc78baa2SNicolas Ferre /* no more descriptor available in initial pool: create one more */ 132dc78baa2SNicolas Ferre if (!ret) { 133dc78baa2SNicolas Ferre ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC); 134dc78baa2SNicolas Ferre if (ret) { 135d8cb04b0SNicolas Ferre spin_lock_irqsave(&atchan->lock, flags); 136dc78baa2SNicolas Ferre atchan->descs_allocated++; 137d8cb04b0SNicolas Ferre spin_unlock_irqrestore(&atchan->lock, flags); 138dc78baa2SNicolas Ferre } else { 139dc78baa2SNicolas Ferre dev_err(chan2dev(&atchan->chan_common), 140dc78baa2SNicolas Ferre "not enough descriptors available\n"); 141dc78baa2SNicolas Ferre } 142dc78baa2SNicolas Ferre } 143dc78baa2SNicolas Ferre 144dc78baa2SNicolas Ferre return ret; 145dc78baa2SNicolas Ferre } 146dc78baa2SNicolas Ferre 147dc78baa2SNicolas Ferre /** 148dc78baa2SNicolas Ferre * atc_desc_put - move a descriptor, including any children, to the free list 149dc78baa2SNicolas Ferre * @atchan: channel we work on 150dc78baa2SNicolas Ferre * @desc: descriptor, at the head of a chain, to move to free list 151dc78baa2SNicolas Ferre */ 152dc78baa2SNicolas Ferre static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc) 153dc78baa2SNicolas Ferre { 154dc78baa2SNicolas Ferre if (desc) { 155dc78baa2SNicolas Ferre struct at_desc *child; 156d8cb04b0SNicolas Ferre unsigned long flags; 157dc78baa2SNicolas Ferre 158d8cb04b0SNicolas Ferre spin_lock_irqsave(&atchan->lock, flags); 159285a3c71SDan Williams list_for_each_entry(child, &desc->tx_list, desc_node) 160dc78baa2SNicolas Ferre dev_vdbg(chan2dev(&atchan->chan_common), 161dc78baa2SNicolas Ferre "moving child desc %p to freelist\n", 162dc78baa2SNicolas Ferre child); 163285a3c71SDan Williams list_splice_init(&desc->tx_list, &atchan->free_list); 164dc78baa2SNicolas Ferre dev_vdbg(chan2dev(&atchan->chan_common), 165dc78baa2SNicolas Ferre "moving desc %p to freelist\n", desc); 166dc78baa2SNicolas Ferre list_add(&desc->desc_node, &atchan->free_list); 167d8cb04b0SNicolas Ferre spin_unlock_irqrestore(&atchan->lock, flags); 168dc78baa2SNicolas Ferre } 169dc78baa2SNicolas Ferre } 170dc78baa2SNicolas Ferre 171dc78baa2SNicolas Ferre /** 172d73111c6SMasanari Iida * atc_desc_chain - build chain adding a descriptor 173d73111c6SMasanari Iida * @first: address of first descriptor of the chain 174d73111c6SMasanari Iida * @prev: address of previous descriptor of the chain 17553830cc7SNicolas Ferre * @desc: descriptor to queue 17653830cc7SNicolas Ferre * 17753830cc7SNicolas Ferre * Called from prep_* functions 17853830cc7SNicolas Ferre */ 17953830cc7SNicolas Ferre static void atc_desc_chain(struct at_desc **first, struct at_desc **prev, 18053830cc7SNicolas Ferre struct at_desc *desc) 18153830cc7SNicolas Ferre { 18253830cc7SNicolas Ferre if (!(*first)) { 18353830cc7SNicolas Ferre *first = desc; 18453830cc7SNicolas Ferre } else { 18553830cc7SNicolas Ferre /* inform the HW lli about chaining */ 18653830cc7SNicolas Ferre (*prev)->lli.dscr = desc->txd.phys; 18753830cc7SNicolas Ferre /* insert the link descriptor to the LD ring */ 18853830cc7SNicolas Ferre list_add_tail(&desc->desc_node, 18953830cc7SNicolas Ferre &(*first)->tx_list); 19053830cc7SNicolas Ferre } 19153830cc7SNicolas Ferre *prev = desc; 19253830cc7SNicolas Ferre } 19353830cc7SNicolas Ferre 19453830cc7SNicolas Ferre /** 195dc78baa2SNicolas Ferre * atc_dostart - starts the DMA engine for real 196dc78baa2SNicolas Ferre * @atchan: the channel we want to start 197dc78baa2SNicolas Ferre * @first: first descriptor in the list we want to begin with 198dc78baa2SNicolas Ferre * 199dc78baa2SNicolas Ferre * Called with atchan->lock held and bh disabled 200dc78baa2SNicolas Ferre */ 201dc78baa2SNicolas Ferre static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first) 202dc78baa2SNicolas Ferre { 203dc78baa2SNicolas Ferre struct at_dma *atdma = to_at_dma(atchan->chan_common.device); 204dc78baa2SNicolas Ferre 205dc78baa2SNicolas Ferre /* ASSERT: channel is idle */ 206dc78baa2SNicolas Ferre if (atc_chan_is_enabled(atchan)) { 207dc78baa2SNicolas Ferre dev_err(chan2dev(&atchan->chan_common), 208dc78baa2SNicolas Ferre "BUG: Attempted to start non-idle channel\n"); 209dc78baa2SNicolas Ferre dev_err(chan2dev(&atchan->chan_common), 210dc78baa2SNicolas Ferre " channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n", 211dc78baa2SNicolas Ferre channel_readl(atchan, SADDR), 212dc78baa2SNicolas Ferre channel_readl(atchan, DADDR), 213dc78baa2SNicolas Ferre channel_readl(atchan, CTRLA), 214dc78baa2SNicolas Ferre channel_readl(atchan, CTRLB), 215dc78baa2SNicolas Ferre channel_readl(atchan, DSCR)); 216dc78baa2SNicolas Ferre 217dc78baa2SNicolas Ferre /* The tasklet will hopefully advance the queue... */ 218dc78baa2SNicolas Ferre return; 219dc78baa2SNicolas Ferre } 220dc78baa2SNicolas Ferre 221dc78baa2SNicolas Ferre vdbg_dump_regs(atchan); 222dc78baa2SNicolas Ferre 223dc78baa2SNicolas Ferre channel_writel(atchan, SADDR, 0); 224dc78baa2SNicolas Ferre channel_writel(atchan, DADDR, 0); 225dc78baa2SNicolas Ferre channel_writel(atchan, CTRLA, 0); 226dc78baa2SNicolas Ferre channel_writel(atchan, CTRLB, 0); 227dc78baa2SNicolas Ferre channel_writel(atchan, DSCR, first->txd.phys); 228dc78baa2SNicolas Ferre dma_writel(atdma, CHER, atchan->mask); 229dc78baa2SNicolas Ferre 230dc78baa2SNicolas Ferre vdbg_dump_regs(atchan); 231dc78baa2SNicolas Ferre } 232dc78baa2SNicolas Ferre 233dc78baa2SNicolas Ferre /** 234dc78baa2SNicolas Ferre * atc_chain_complete - finish work for one transaction chain 235dc78baa2SNicolas Ferre * @atchan: channel we work on 236dc78baa2SNicolas Ferre * @desc: descriptor at the head of the chain we want do complete 237dc78baa2SNicolas Ferre * 238dc78baa2SNicolas Ferre * Called with atchan->lock held and bh disabled */ 239dc78baa2SNicolas Ferre static void 240dc78baa2SNicolas Ferre atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) 241dc78baa2SNicolas Ferre { 242dc78baa2SNicolas Ferre struct dma_async_tx_descriptor *txd = &desc->txd; 243dc78baa2SNicolas Ferre 244dc78baa2SNicolas Ferre dev_vdbg(chan2dev(&atchan->chan_common), 245dc78baa2SNicolas Ferre "descriptor %u complete\n", txd->cookie); 246dc78baa2SNicolas Ferre 247d4116052SVinod Koul /* mark the descriptor as complete for non cyclic cases only */ 248d4116052SVinod Koul if (!atc_chan_is_cyclic(atchan)) 249f7fbce07SRussell King - ARM Linux dma_cookie_complete(txd); 250dc78baa2SNicolas Ferre 251dc78baa2SNicolas Ferre /* move children to free_list */ 252285a3c71SDan Williams list_splice_init(&desc->tx_list, &atchan->free_list); 253dc78baa2SNicolas Ferre /* move myself to free_list */ 254dc78baa2SNicolas Ferre list_move(&desc->desc_node, &atchan->free_list); 255dc78baa2SNicolas Ferre 256ebcf9b80SNicolas Ferre /* unmap dma addresses (not on slave channels) */ 257657a77faSAtsushi Nemoto if (!atchan->chan_common.private) { 258657a77faSAtsushi Nemoto struct device *parent = chan2parent(&atchan->chan_common); 259dc78baa2SNicolas Ferre if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { 260dc78baa2SNicolas Ferre if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) 261657a77faSAtsushi Nemoto dma_unmap_single(parent, 262dc78baa2SNicolas Ferre desc->lli.daddr, 263dc78baa2SNicolas Ferre desc->len, DMA_FROM_DEVICE); 264dc78baa2SNicolas Ferre else 265657a77faSAtsushi Nemoto dma_unmap_page(parent, 266dc78baa2SNicolas Ferre desc->lli.daddr, 267dc78baa2SNicolas Ferre desc->len, DMA_FROM_DEVICE); 268dc78baa2SNicolas Ferre } 269dc78baa2SNicolas Ferre if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { 270dc78baa2SNicolas Ferre if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) 271657a77faSAtsushi Nemoto dma_unmap_single(parent, 272dc78baa2SNicolas Ferre desc->lli.saddr, 273dc78baa2SNicolas Ferre desc->len, DMA_TO_DEVICE); 274dc78baa2SNicolas Ferre else 275657a77faSAtsushi Nemoto dma_unmap_page(parent, 276dc78baa2SNicolas Ferre desc->lli.saddr, 277dc78baa2SNicolas Ferre desc->len, DMA_TO_DEVICE); 278dc78baa2SNicolas Ferre } 279657a77faSAtsushi Nemoto } 280dc78baa2SNicolas Ferre 28153830cc7SNicolas Ferre /* for cyclic transfers, 28253830cc7SNicolas Ferre * no need to replay callback function while stopping */ 2833c477482SNicolas Ferre if (!atc_chan_is_cyclic(atchan)) { 28453830cc7SNicolas Ferre dma_async_tx_callback callback = txd->callback; 28553830cc7SNicolas Ferre void *param = txd->callback_param; 28653830cc7SNicolas Ferre 287dc78baa2SNicolas Ferre /* 288dc78baa2SNicolas Ferre * The API requires that no submissions are done from a 289dc78baa2SNicolas Ferre * callback, so we don't need to drop the lock here 290dc78baa2SNicolas Ferre */ 291dc78baa2SNicolas Ferre if (callback) 292dc78baa2SNicolas Ferre callback(param); 29353830cc7SNicolas Ferre } 294dc78baa2SNicolas Ferre 295dc78baa2SNicolas Ferre dma_run_dependencies(txd); 296dc78baa2SNicolas Ferre } 297dc78baa2SNicolas Ferre 298dc78baa2SNicolas Ferre /** 299dc78baa2SNicolas Ferre * atc_complete_all - finish work for all transactions 300dc78baa2SNicolas Ferre * @atchan: channel to complete transactions for 301dc78baa2SNicolas Ferre * 302dc78baa2SNicolas Ferre * Eventually submit queued descriptors if any 303dc78baa2SNicolas Ferre * 304dc78baa2SNicolas Ferre * Assume channel is idle while calling this function 305dc78baa2SNicolas Ferre * Called with atchan->lock held and bh disabled 306dc78baa2SNicolas Ferre */ 307dc78baa2SNicolas Ferre static void atc_complete_all(struct at_dma_chan *atchan) 308dc78baa2SNicolas Ferre { 309dc78baa2SNicolas Ferre struct at_desc *desc, *_desc; 310dc78baa2SNicolas Ferre LIST_HEAD(list); 311dc78baa2SNicolas Ferre 312dc78baa2SNicolas Ferre dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n"); 313dc78baa2SNicolas Ferre 314dc78baa2SNicolas Ferre /* 315dc78baa2SNicolas Ferre * Submit queued descriptors ASAP, i.e. before we go through 316dc78baa2SNicolas Ferre * the completed ones. 317dc78baa2SNicolas Ferre */ 318dc78baa2SNicolas Ferre if (!list_empty(&atchan->queue)) 319dc78baa2SNicolas Ferre atc_dostart(atchan, atc_first_queued(atchan)); 320dc78baa2SNicolas Ferre /* empty active_list now it is completed */ 321dc78baa2SNicolas Ferre list_splice_init(&atchan->active_list, &list); 322dc78baa2SNicolas Ferre /* empty queue list by moving descriptors (if any) to active_list */ 323dc78baa2SNicolas Ferre list_splice_init(&atchan->queue, &atchan->active_list); 324dc78baa2SNicolas Ferre 325dc78baa2SNicolas Ferre list_for_each_entry_safe(desc, _desc, &list, desc_node) 326dc78baa2SNicolas Ferre atc_chain_complete(atchan, desc); 327dc78baa2SNicolas Ferre } 328dc78baa2SNicolas Ferre 329dc78baa2SNicolas Ferre /** 330dc78baa2SNicolas Ferre * atc_cleanup_descriptors - cleanup up finished descriptors in active_list 331dc78baa2SNicolas Ferre * @atchan: channel to be cleaned up 332dc78baa2SNicolas Ferre * 333dc78baa2SNicolas Ferre * Called with atchan->lock held and bh disabled 334dc78baa2SNicolas Ferre */ 335dc78baa2SNicolas Ferre static void atc_cleanup_descriptors(struct at_dma_chan *atchan) 336dc78baa2SNicolas Ferre { 337dc78baa2SNicolas Ferre struct at_desc *desc, *_desc; 338dc78baa2SNicolas Ferre struct at_desc *child; 339dc78baa2SNicolas Ferre 340dc78baa2SNicolas Ferre dev_vdbg(chan2dev(&atchan->chan_common), "cleanup descriptors\n"); 341dc78baa2SNicolas Ferre 342dc78baa2SNicolas Ferre list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) { 343dc78baa2SNicolas Ferre if (!(desc->lli.ctrla & ATC_DONE)) 344dc78baa2SNicolas Ferre /* This one is currently in progress */ 345dc78baa2SNicolas Ferre return; 346dc78baa2SNicolas Ferre 347285a3c71SDan Williams list_for_each_entry(child, &desc->tx_list, desc_node) 348dc78baa2SNicolas Ferre if (!(child->lli.ctrla & ATC_DONE)) 349dc78baa2SNicolas Ferre /* Currently in progress */ 350dc78baa2SNicolas Ferre return; 351dc78baa2SNicolas Ferre 352dc78baa2SNicolas Ferre /* 353dc78baa2SNicolas Ferre * No descriptors so far seem to be in progress, i.e. 354dc78baa2SNicolas Ferre * this chain must be done. 355dc78baa2SNicolas Ferre */ 356dc78baa2SNicolas Ferre atc_chain_complete(atchan, desc); 357dc78baa2SNicolas Ferre } 358dc78baa2SNicolas Ferre } 359dc78baa2SNicolas Ferre 360dc78baa2SNicolas Ferre /** 361dc78baa2SNicolas Ferre * atc_advance_work - at the end of a transaction, move forward 362dc78baa2SNicolas Ferre * @atchan: channel where the transaction ended 363dc78baa2SNicolas Ferre * 364dc78baa2SNicolas Ferre * Called with atchan->lock held and bh disabled 365dc78baa2SNicolas Ferre */ 366dc78baa2SNicolas Ferre static void atc_advance_work(struct at_dma_chan *atchan) 367dc78baa2SNicolas Ferre { 368dc78baa2SNicolas Ferre dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n"); 369dc78baa2SNicolas Ferre 370d202f051SLudovic Desroches if (atc_chan_is_enabled(atchan)) 371d202f051SLudovic Desroches return; 372d202f051SLudovic Desroches 373dc78baa2SNicolas Ferre if (list_empty(&atchan->active_list) || 374dc78baa2SNicolas Ferre list_is_singular(&atchan->active_list)) { 375dc78baa2SNicolas Ferre atc_complete_all(atchan); 376dc78baa2SNicolas Ferre } else { 377dc78baa2SNicolas Ferre atc_chain_complete(atchan, atc_first_active(atchan)); 378dc78baa2SNicolas Ferre /* advance work */ 379dc78baa2SNicolas Ferre atc_dostart(atchan, atc_first_active(atchan)); 380dc78baa2SNicolas Ferre } 381dc78baa2SNicolas Ferre } 382dc78baa2SNicolas Ferre 383dc78baa2SNicolas Ferre 384dc78baa2SNicolas Ferre /** 385dc78baa2SNicolas Ferre * atc_handle_error - handle errors reported by DMA controller 386dc78baa2SNicolas Ferre * @atchan: channel where error occurs 387dc78baa2SNicolas Ferre * 388dc78baa2SNicolas Ferre * Called with atchan->lock held and bh disabled 389dc78baa2SNicolas Ferre */ 390dc78baa2SNicolas Ferre static void atc_handle_error(struct at_dma_chan *atchan) 391dc78baa2SNicolas Ferre { 392dc78baa2SNicolas Ferre struct at_desc *bad_desc; 393dc78baa2SNicolas Ferre struct at_desc *child; 394dc78baa2SNicolas Ferre 395dc78baa2SNicolas Ferre /* 396dc78baa2SNicolas Ferre * The descriptor currently at the head of the active list is 397dc78baa2SNicolas Ferre * broked. Since we don't have any way to report errors, we'll 398dc78baa2SNicolas Ferre * just have to scream loudly and try to carry on. 399dc78baa2SNicolas Ferre */ 400dc78baa2SNicolas Ferre bad_desc = atc_first_active(atchan); 401dc78baa2SNicolas Ferre list_del_init(&bad_desc->desc_node); 402dc78baa2SNicolas Ferre 403dc78baa2SNicolas Ferre /* As we are stopped, take advantage to push queued descriptors 404dc78baa2SNicolas Ferre * in active_list */ 405dc78baa2SNicolas Ferre list_splice_init(&atchan->queue, atchan->active_list.prev); 406dc78baa2SNicolas Ferre 407dc78baa2SNicolas Ferre /* Try to restart the controller */ 408dc78baa2SNicolas Ferre if (!list_empty(&atchan->active_list)) 409dc78baa2SNicolas Ferre atc_dostart(atchan, atc_first_active(atchan)); 410dc78baa2SNicolas Ferre 411dc78baa2SNicolas Ferre /* 412dc78baa2SNicolas Ferre * KERN_CRITICAL may seem harsh, but since this only happens 413dc78baa2SNicolas Ferre * when someone submits a bad physical address in a 414dc78baa2SNicolas Ferre * descriptor, we should consider ourselves lucky that the 415dc78baa2SNicolas Ferre * controller flagged an error instead of scribbling over 416dc78baa2SNicolas Ferre * random memory locations. 417dc78baa2SNicolas Ferre */ 418dc78baa2SNicolas Ferre dev_crit(chan2dev(&atchan->chan_common), 419dc78baa2SNicolas Ferre "Bad descriptor submitted for DMA!\n"); 420dc78baa2SNicolas Ferre dev_crit(chan2dev(&atchan->chan_common), 421dc78baa2SNicolas Ferre " cookie: %d\n", bad_desc->txd.cookie); 422dc78baa2SNicolas Ferre atc_dump_lli(atchan, &bad_desc->lli); 423285a3c71SDan Williams list_for_each_entry(child, &bad_desc->tx_list, desc_node) 424dc78baa2SNicolas Ferre atc_dump_lli(atchan, &child->lli); 425dc78baa2SNicolas Ferre 426dc78baa2SNicolas Ferre /* Pretend the descriptor completed successfully */ 427dc78baa2SNicolas Ferre atc_chain_complete(atchan, bad_desc); 428dc78baa2SNicolas Ferre } 429dc78baa2SNicolas Ferre 43053830cc7SNicolas Ferre /** 43153830cc7SNicolas Ferre * atc_handle_cyclic - at the end of a period, run callback function 43253830cc7SNicolas Ferre * @atchan: channel used for cyclic operations 43353830cc7SNicolas Ferre * 43453830cc7SNicolas Ferre * Called with atchan->lock held and bh disabled 43553830cc7SNicolas Ferre */ 43653830cc7SNicolas Ferre static void atc_handle_cyclic(struct at_dma_chan *atchan) 43753830cc7SNicolas Ferre { 43853830cc7SNicolas Ferre struct at_desc *first = atc_first_active(atchan); 43953830cc7SNicolas Ferre struct dma_async_tx_descriptor *txd = &first->txd; 44053830cc7SNicolas Ferre dma_async_tx_callback callback = txd->callback; 44153830cc7SNicolas Ferre void *param = txd->callback_param; 44253830cc7SNicolas Ferre 44353830cc7SNicolas Ferre dev_vdbg(chan2dev(&atchan->chan_common), 44453830cc7SNicolas Ferre "new cyclic period llp 0x%08x\n", 44553830cc7SNicolas Ferre channel_readl(atchan, DSCR)); 44653830cc7SNicolas Ferre 44753830cc7SNicolas Ferre if (callback) 44853830cc7SNicolas Ferre callback(param); 44953830cc7SNicolas Ferre } 450dc78baa2SNicolas Ferre 451dc78baa2SNicolas Ferre /*-- IRQ & Tasklet ---------------------------------------------------*/ 452dc78baa2SNicolas Ferre 453dc78baa2SNicolas Ferre static void atc_tasklet(unsigned long data) 454dc78baa2SNicolas Ferre { 455dc78baa2SNicolas Ferre struct at_dma_chan *atchan = (struct at_dma_chan *)data; 456d8cb04b0SNicolas Ferre unsigned long flags; 457dc78baa2SNicolas Ferre 458d8cb04b0SNicolas Ferre spin_lock_irqsave(&atchan->lock, flags); 45953830cc7SNicolas Ferre if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status)) 460dc78baa2SNicolas Ferre atc_handle_error(atchan); 4613c477482SNicolas Ferre else if (atc_chan_is_cyclic(atchan)) 46253830cc7SNicolas Ferre atc_handle_cyclic(atchan); 463dc78baa2SNicolas Ferre else 464dc78baa2SNicolas Ferre atc_advance_work(atchan); 465dc78baa2SNicolas Ferre 466d8cb04b0SNicolas Ferre spin_unlock_irqrestore(&atchan->lock, flags); 467dc78baa2SNicolas Ferre } 468dc78baa2SNicolas Ferre 469dc78baa2SNicolas Ferre static irqreturn_t at_dma_interrupt(int irq, void *dev_id) 470dc78baa2SNicolas Ferre { 471dc78baa2SNicolas Ferre struct at_dma *atdma = (struct at_dma *)dev_id; 472dc78baa2SNicolas Ferre struct at_dma_chan *atchan; 473dc78baa2SNicolas Ferre int i; 474dc78baa2SNicolas Ferre u32 status, pending, imr; 475dc78baa2SNicolas Ferre int ret = IRQ_NONE; 476dc78baa2SNicolas Ferre 477dc78baa2SNicolas Ferre do { 478dc78baa2SNicolas Ferre imr = dma_readl(atdma, EBCIMR); 479dc78baa2SNicolas Ferre status = dma_readl(atdma, EBCISR); 480dc78baa2SNicolas Ferre pending = status & imr; 481dc78baa2SNicolas Ferre 482dc78baa2SNicolas Ferre if (!pending) 483dc78baa2SNicolas Ferre break; 484dc78baa2SNicolas Ferre 485dc78baa2SNicolas Ferre dev_vdbg(atdma->dma_common.dev, 486dc78baa2SNicolas Ferre "interrupt: status = 0x%08x, 0x%08x, 0x%08x\n", 487dc78baa2SNicolas Ferre status, imr, pending); 488dc78baa2SNicolas Ferre 489dc78baa2SNicolas Ferre for (i = 0; i < atdma->dma_common.chancnt; i++) { 490dc78baa2SNicolas Ferre atchan = &atdma->chan[i]; 4919b3aa589SNicolas Ferre if (pending & (AT_DMA_BTC(i) | AT_DMA_ERR(i))) { 492dc78baa2SNicolas Ferre if (pending & AT_DMA_ERR(i)) { 493dc78baa2SNicolas Ferre /* Disable channel on AHB error */ 49423b5e3adSNicolas Ferre dma_writel(atdma, CHDR, 49523b5e3adSNicolas Ferre AT_DMA_RES(i) | atchan->mask); 496dc78baa2SNicolas Ferre /* Give information to tasklet */ 49753830cc7SNicolas Ferre set_bit(ATC_IS_ERROR, &atchan->status); 498dc78baa2SNicolas Ferre } 499dc78baa2SNicolas Ferre tasklet_schedule(&atchan->tasklet); 500dc78baa2SNicolas Ferre ret = IRQ_HANDLED; 501dc78baa2SNicolas Ferre } 502dc78baa2SNicolas Ferre } 503dc78baa2SNicolas Ferre 504dc78baa2SNicolas Ferre } while (pending); 505dc78baa2SNicolas Ferre 506dc78baa2SNicolas Ferre return ret; 507dc78baa2SNicolas Ferre } 508dc78baa2SNicolas Ferre 509dc78baa2SNicolas Ferre 510dc78baa2SNicolas Ferre /*-- DMA Engine API --------------------------------------------------*/ 511dc78baa2SNicolas Ferre 512dc78baa2SNicolas Ferre /** 513dc78baa2SNicolas Ferre * atc_tx_submit - set the prepared descriptor(s) to be executed by the engine 514dc78baa2SNicolas Ferre * @desc: descriptor at the head of the transaction chain 515dc78baa2SNicolas Ferre * 516dc78baa2SNicolas Ferre * Queue chain if DMA engine is working already 517dc78baa2SNicolas Ferre * 518dc78baa2SNicolas Ferre * Cookie increment and adding to active_list or queue must be atomic 519dc78baa2SNicolas Ferre */ 520dc78baa2SNicolas Ferre static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx) 521dc78baa2SNicolas Ferre { 522dc78baa2SNicolas Ferre struct at_desc *desc = txd_to_at_desc(tx); 523dc78baa2SNicolas Ferre struct at_dma_chan *atchan = to_at_dma_chan(tx->chan); 524dc78baa2SNicolas Ferre dma_cookie_t cookie; 525d8cb04b0SNicolas Ferre unsigned long flags; 526dc78baa2SNicolas Ferre 527d8cb04b0SNicolas Ferre spin_lock_irqsave(&atchan->lock, flags); 528884485e1SRussell King - ARM Linux cookie = dma_cookie_assign(tx); 529dc78baa2SNicolas Ferre 530dc78baa2SNicolas Ferre if (list_empty(&atchan->active_list)) { 531dc78baa2SNicolas Ferre dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n", 532dc78baa2SNicolas Ferre desc->txd.cookie); 533dc78baa2SNicolas Ferre atc_dostart(atchan, desc); 534dc78baa2SNicolas Ferre list_add_tail(&desc->desc_node, &atchan->active_list); 535dc78baa2SNicolas Ferre } else { 536dc78baa2SNicolas Ferre dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n", 537dc78baa2SNicolas Ferre desc->txd.cookie); 538dc78baa2SNicolas Ferre list_add_tail(&desc->desc_node, &atchan->queue); 539dc78baa2SNicolas Ferre } 540dc78baa2SNicolas Ferre 541d8cb04b0SNicolas Ferre spin_unlock_irqrestore(&atchan->lock, flags); 542dc78baa2SNicolas Ferre 543dc78baa2SNicolas Ferre return cookie; 544dc78baa2SNicolas Ferre } 545dc78baa2SNicolas Ferre 546dc78baa2SNicolas Ferre /** 547dc78baa2SNicolas Ferre * atc_prep_dma_memcpy - prepare a memcpy operation 548dc78baa2SNicolas Ferre * @chan: the channel to prepare operation on 549dc78baa2SNicolas Ferre * @dest: operation virtual destination address 550dc78baa2SNicolas Ferre * @src: operation virtual source address 551dc78baa2SNicolas Ferre * @len: operation length 552dc78baa2SNicolas Ferre * @flags: tx descriptor status flags 553dc78baa2SNicolas Ferre */ 554dc78baa2SNicolas Ferre static struct dma_async_tx_descriptor * 555dc78baa2SNicolas Ferre atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 556dc78baa2SNicolas Ferre size_t len, unsigned long flags) 557dc78baa2SNicolas Ferre { 558dc78baa2SNicolas Ferre struct at_dma_chan *atchan = to_at_dma_chan(chan); 559dc78baa2SNicolas Ferre struct at_desc *desc = NULL; 560dc78baa2SNicolas Ferre struct at_desc *first = NULL; 561dc78baa2SNicolas Ferre struct at_desc *prev = NULL; 562dc78baa2SNicolas Ferre size_t xfer_count; 563dc78baa2SNicolas Ferre size_t offset; 564dc78baa2SNicolas Ferre unsigned int src_width; 565dc78baa2SNicolas Ferre unsigned int dst_width; 566dc78baa2SNicolas Ferre u32 ctrla; 567dc78baa2SNicolas Ferre u32 ctrlb; 568dc78baa2SNicolas Ferre 569dc78baa2SNicolas Ferre dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d0x%x s0x%x l0x%zx f0x%lx\n", 570dc78baa2SNicolas Ferre dest, src, len, flags); 571dc78baa2SNicolas Ferre 572dc78baa2SNicolas Ferre if (unlikely(!len)) { 573dc78baa2SNicolas Ferre dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); 574dc78baa2SNicolas Ferre return NULL; 575dc78baa2SNicolas Ferre } 576dc78baa2SNicolas Ferre 5779b3aa589SNicolas Ferre ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN 578dc78baa2SNicolas Ferre | ATC_SRC_ADDR_MODE_INCR 579dc78baa2SNicolas Ferre | ATC_DST_ADDR_MODE_INCR 580dc78baa2SNicolas Ferre | ATC_FC_MEM2MEM; 581dc78baa2SNicolas Ferre 582dc78baa2SNicolas Ferre /* 583dc78baa2SNicolas Ferre * We can be a lot more clever here, but this should take care 584dc78baa2SNicolas Ferre * of the most common optimization. 585dc78baa2SNicolas Ferre */ 586dc78baa2SNicolas Ferre if (!((src | dest | len) & 3)) { 587b409ebfbSNicolas Ferre ctrla = ATC_SRC_WIDTH_WORD | ATC_DST_WIDTH_WORD; 588dc78baa2SNicolas Ferre src_width = dst_width = 2; 589dc78baa2SNicolas Ferre } else if (!((src | dest | len) & 1)) { 590b409ebfbSNicolas Ferre ctrla = ATC_SRC_WIDTH_HALFWORD | ATC_DST_WIDTH_HALFWORD; 591dc78baa2SNicolas Ferre src_width = dst_width = 1; 592dc78baa2SNicolas Ferre } else { 593b409ebfbSNicolas Ferre ctrla = ATC_SRC_WIDTH_BYTE | ATC_DST_WIDTH_BYTE; 594dc78baa2SNicolas Ferre src_width = dst_width = 0; 595dc78baa2SNicolas Ferre } 596dc78baa2SNicolas Ferre 597dc78baa2SNicolas Ferre for (offset = 0; offset < len; offset += xfer_count << src_width) { 598dc78baa2SNicolas Ferre xfer_count = min_t(size_t, (len - offset) >> src_width, 599dc78baa2SNicolas Ferre ATC_BTSIZE_MAX); 600dc78baa2SNicolas Ferre 601dc78baa2SNicolas Ferre desc = atc_desc_get(atchan); 602dc78baa2SNicolas Ferre if (!desc) 603dc78baa2SNicolas Ferre goto err_desc_get; 604dc78baa2SNicolas Ferre 605dc78baa2SNicolas Ferre desc->lli.saddr = src + offset; 606dc78baa2SNicolas Ferre desc->lli.daddr = dest + offset; 607dc78baa2SNicolas Ferre desc->lli.ctrla = ctrla | xfer_count; 608dc78baa2SNicolas Ferre desc->lli.ctrlb = ctrlb; 609dc78baa2SNicolas Ferre 610dc78baa2SNicolas Ferre desc->txd.cookie = 0; 611dc78baa2SNicolas Ferre 612e257e156SNicolas Ferre atc_desc_chain(&first, &prev, desc); 613dc78baa2SNicolas Ferre } 614dc78baa2SNicolas Ferre 615dc78baa2SNicolas Ferre /* First descriptor of the chain embedds additional information */ 616dc78baa2SNicolas Ferre first->txd.cookie = -EBUSY; 617dc78baa2SNicolas Ferre first->len = len; 618d088c33bSElen Song first->tx_width = src_width; 619dc78baa2SNicolas Ferre 620dc78baa2SNicolas Ferre /* set end-of-link to the last link descriptor of list*/ 621dc78baa2SNicolas Ferre set_desc_eol(desc); 622dc78baa2SNicolas Ferre 623568f7f0cSNicolas Ferre first->txd.flags = flags; /* client is in control of this ack */ 624dc78baa2SNicolas Ferre 625dc78baa2SNicolas Ferre return &first->txd; 626dc78baa2SNicolas Ferre 627dc78baa2SNicolas Ferre err_desc_get: 628dc78baa2SNicolas Ferre atc_desc_put(atchan, first); 629dc78baa2SNicolas Ferre return NULL; 630dc78baa2SNicolas Ferre } 631dc78baa2SNicolas Ferre 632808347f6SNicolas Ferre 633808347f6SNicolas Ferre /** 634808347f6SNicolas Ferre * atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction 635808347f6SNicolas Ferre * @chan: DMA channel 636808347f6SNicolas Ferre * @sgl: scatterlist to transfer to/from 637808347f6SNicolas Ferre * @sg_len: number of entries in @scatterlist 638808347f6SNicolas Ferre * @direction: DMA direction 639808347f6SNicolas Ferre * @flags: tx descriptor status flags 640185ecb5fSAlexandre Bounine * @context: transaction context (ignored) 641808347f6SNicolas Ferre */ 642808347f6SNicolas Ferre static struct dma_async_tx_descriptor * 643808347f6SNicolas Ferre atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, 644db8196dfSVinod Koul unsigned int sg_len, enum dma_transfer_direction direction, 645185ecb5fSAlexandre Bounine unsigned long flags, void *context) 646808347f6SNicolas Ferre { 647808347f6SNicolas Ferre struct at_dma_chan *atchan = to_at_dma_chan(chan); 648808347f6SNicolas Ferre struct at_dma_slave *atslave = chan->private; 649beeaa103SNicolas Ferre struct dma_slave_config *sconfig = &atchan->dma_sconfig; 650808347f6SNicolas Ferre struct at_desc *first = NULL; 651808347f6SNicolas Ferre struct at_desc *prev = NULL; 652808347f6SNicolas Ferre u32 ctrla; 653808347f6SNicolas Ferre u32 ctrlb; 654808347f6SNicolas Ferre dma_addr_t reg; 655808347f6SNicolas Ferre unsigned int reg_width; 656808347f6SNicolas Ferre unsigned int mem_width; 657808347f6SNicolas Ferre unsigned int i; 658808347f6SNicolas Ferre struct scatterlist *sg; 659808347f6SNicolas Ferre size_t total_len = 0; 660808347f6SNicolas Ferre 661cc52a10aSNicolas Ferre dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n", 662cc52a10aSNicolas Ferre sg_len, 663db8196dfSVinod Koul direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE", 664808347f6SNicolas Ferre flags); 665808347f6SNicolas Ferre 666808347f6SNicolas Ferre if (unlikely(!atslave || !sg_len)) { 667c618a9beSNicolas Ferre dev_dbg(chan2dev(chan), "prep_slave_sg: sg length is zero!\n"); 668808347f6SNicolas Ferre return NULL; 669808347f6SNicolas Ferre } 670808347f6SNicolas Ferre 6711dd1ea8eSNicolas Ferre ctrla = ATC_SCSIZE(sconfig->src_maxburst) 6721dd1ea8eSNicolas Ferre | ATC_DCSIZE(sconfig->dst_maxburst); 673ae14d4b5SNicolas Ferre ctrlb = ATC_IEN; 674808347f6SNicolas Ferre 675808347f6SNicolas Ferre switch (direction) { 676db8196dfSVinod Koul case DMA_MEM_TO_DEV: 677beeaa103SNicolas Ferre reg_width = convert_buswidth(sconfig->dst_addr_width); 678808347f6SNicolas Ferre ctrla |= ATC_DST_WIDTH(reg_width); 679808347f6SNicolas Ferre ctrlb |= ATC_DST_ADDR_MODE_FIXED 680808347f6SNicolas Ferre | ATC_SRC_ADDR_MODE_INCR 681ae14d4b5SNicolas Ferre | ATC_FC_MEM2PER 682bbe89c8eSLudovic Desroches | ATC_SIF(atchan->mem_if) | ATC_DIF(atchan->per_if); 683beeaa103SNicolas Ferre reg = sconfig->dst_addr; 684808347f6SNicolas Ferre for_each_sg(sgl, sg, sg_len, i) { 685808347f6SNicolas Ferre struct at_desc *desc; 686808347f6SNicolas Ferre u32 len; 687808347f6SNicolas Ferre u32 mem; 688808347f6SNicolas Ferre 689808347f6SNicolas Ferre desc = atc_desc_get(atchan); 690808347f6SNicolas Ferre if (!desc) 691808347f6SNicolas Ferre goto err_desc_get; 692808347f6SNicolas Ferre 6930f70e8ceSNicolas Ferre mem = sg_dma_address(sg); 694808347f6SNicolas Ferre len = sg_dma_len(sg); 695c4567976SNicolas Ferre if (unlikely(!len)) { 696c4567976SNicolas Ferre dev_dbg(chan2dev(chan), 697c4567976SNicolas Ferre "prep_slave_sg: sg(%d) data length is zero\n", i); 698c4567976SNicolas Ferre goto err; 699c4567976SNicolas Ferre } 700808347f6SNicolas Ferre mem_width = 2; 701808347f6SNicolas Ferre if (unlikely(mem & 3 || len & 3)) 702808347f6SNicolas Ferre mem_width = 0; 703808347f6SNicolas Ferre 704808347f6SNicolas Ferre desc->lli.saddr = mem; 705808347f6SNicolas Ferre desc->lli.daddr = reg; 706808347f6SNicolas Ferre desc->lli.ctrla = ctrla 707808347f6SNicolas Ferre | ATC_SRC_WIDTH(mem_width) 708808347f6SNicolas Ferre | len >> mem_width; 709808347f6SNicolas Ferre desc->lli.ctrlb = ctrlb; 710808347f6SNicolas Ferre 711e257e156SNicolas Ferre atc_desc_chain(&first, &prev, desc); 712808347f6SNicolas Ferre total_len += len; 713808347f6SNicolas Ferre } 714808347f6SNicolas Ferre break; 715db8196dfSVinod Koul case DMA_DEV_TO_MEM: 716beeaa103SNicolas Ferre reg_width = convert_buswidth(sconfig->src_addr_width); 717808347f6SNicolas Ferre ctrla |= ATC_SRC_WIDTH(reg_width); 718808347f6SNicolas Ferre ctrlb |= ATC_DST_ADDR_MODE_INCR 719808347f6SNicolas Ferre | ATC_SRC_ADDR_MODE_FIXED 720ae14d4b5SNicolas Ferre | ATC_FC_PER2MEM 721bbe89c8eSLudovic Desroches | ATC_SIF(atchan->per_if) | ATC_DIF(atchan->mem_if); 722808347f6SNicolas Ferre 723beeaa103SNicolas Ferre reg = sconfig->src_addr; 724808347f6SNicolas Ferre for_each_sg(sgl, sg, sg_len, i) { 725808347f6SNicolas Ferre struct at_desc *desc; 726808347f6SNicolas Ferre u32 len; 727808347f6SNicolas Ferre u32 mem; 728808347f6SNicolas Ferre 729808347f6SNicolas Ferre desc = atc_desc_get(atchan); 730808347f6SNicolas Ferre if (!desc) 731808347f6SNicolas Ferre goto err_desc_get; 732808347f6SNicolas Ferre 7330f70e8ceSNicolas Ferre mem = sg_dma_address(sg); 734808347f6SNicolas Ferre len = sg_dma_len(sg); 735c4567976SNicolas Ferre if (unlikely(!len)) { 736c4567976SNicolas Ferre dev_dbg(chan2dev(chan), 737c4567976SNicolas Ferre "prep_slave_sg: sg(%d) data length is zero\n", i); 738c4567976SNicolas Ferre goto err; 739c4567976SNicolas Ferre } 740808347f6SNicolas Ferre mem_width = 2; 741808347f6SNicolas Ferre if (unlikely(mem & 3 || len & 3)) 742808347f6SNicolas Ferre mem_width = 0; 743808347f6SNicolas Ferre 744808347f6SNicolas Ferre desc->lli.saddr = reg; 745808347f6SNicolas Ferre desc->lli.daddr = mem; 746808347f6SNicolas Ferre desc->lli.ctrla = ctrla 747808347f6SNicolas Ferre | ATC_DST_WIDTH(mem_width) 74859a609d9SNicolas Ferre | len >> reg_width; 749808347f6SNicolas Ferre desc->lli.ctrlb = ctrlb; 750808347f6SNicolas Ferre 751e257e156SNicolas Ferre atc_desc_chain(&first, &prev, desc); 752808347f6SNicolas Ferre total_len += len; 753808347f6SNicolas Ferre } 754808347f6SNicolas Ferre break; 755808347f6SNicolas Ferre default: 756808347f6SNicolas Ferre return NULL; 757808347f6SNicolas Ferre } 758808347f6SNicolas Ferre 759808347f6SNicolas Ferre /* set end-of-link to the last link descriptor of list*/ 760808347f6SNicolas Ferre set_desc_eol(prev); 761808347f6SNicolas Ferre 762808347f6SNicolas Ferre /* First descriptor of the chain embedds additional information */ 763808347f6SNicolas Ferre first->txd.cookie = -EBUSY; 764808347f6SNicolas Ferre first->len = total_len; 765d088c33bSElen Song first->tx_width = reg_width; 766808347f6SNicolas Ferre 767568f7f0cSNicolas Ferre /* first link descriptor of list is responsible of flags */ 768568f7f0cSNicolas Ferre first->txd.flags = flags; /* client is in control of this ack */ 769808347f6SNicolas Ferre 770808347f6SNicolas Ferre return &first->txd; 771808347f6SNicolas Ferre 772808347f6SNicolas Ferre err_desc_get: 773808347f6SNicolas Ferre dev_err(chan2dev(chan), "not enough descriptors available\n"); 774c4567976SNicolas Ferre err: 775808347f6SNicolas Ferre atc_desc_put(atchan, first); 776808347f6SNicolas Ferre return NULL; 777808347f6SNicolas Ferre } 778808347f6SNicolas Ferre 77953830cc7SNicolas Ferre /** 78053830cc7SNicolas Ferre * atc_dma_cyclic_check_values 78153830cc7SNicolas Ferre * Check for too big/unaligned periods and unaligned DMA buffer 78253830cc7SNicolas Ferre */ 78353830cc7SNicolas Ferre static int 78453830cc7SNicolas Ferre atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr, 7850e7264ccSAndy Shevchenko size_t period_len) 78653830cc7SNicolas Ferre { 78753830cc7SNicolas Ferre if (period_len > (ATC_BTSIZE_MAX << reg_width)) 78853830cc7SNicolas Ferre goto err_out; 78953830cc7SNicolas Ferre if (unlikely(period_len & ((1 << reg_width) - 1))) 79053830cc7SNicolas Ferre goto err_out; 79153830cc7SNicolas Ferre if (unlikely(buf_addr & ((1 << reg_width) - 1))) 79253830cc7SNicolas Ferre goto err_out; 79353830cc7SNicolas Ferre 79453830cc7SNicolas Ferre return 0; 79553830cc7SNicolas Ferre 79653830cc7SNicolas Ferre err_out: 79753830cc7SNicolas Ferre return -EINVAL; 79853830cc7SNicolas Ferre } 79953830cc7SNicolas Ferre 80053830cc7SNicolas Ferre /** 801d73111c6SMasanari Iida * atc_dma_cyclic_fill_desc - Fill one period descriptor 80253830cc7SNicolas Ferre */ 80353830cc7SNicolas Ferre static int 804beeaa103SNicolas Ferre atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc, 80553830cc7SNicolas Ferre unsigned int period_index, dma_addr_t buf_addr, 806beeaa103SNicolas Ferre unsigned int reg_width, size_t period_len, 807beeaa103SNicolas Ferre enum dma_transfer_direction direction) 80853830cc7SNicolas Ferre { 809beeaa103SNicolas Ferre struct at_dma_chan *atchan = to_at_dma_chan(chan); 810beeaa103SNicolas Ferre struct dma_slave_config *sconfig = &atchan->dma_sconfig; 81153830cc7SNicolas Ferre u32 ctrla; 81253830cc7SNicolas Ferre 81353830cc7SNicolas Ferre /* prepare common CRTLA value */ 8141dd1ea8eSNicolas Ferre ctrla = ATC_SCSIZE(sconfig->src_maxburst) 8151dd1ea8eSNicolas Ferre | ATC_DCSIZE(sconfig->dst_maxburst) 81653830cc7SNicolas Ferre | ATC_DST_WIDTH(reg_width) 81753830cc7SNicolas Ferre | ATC_SRC_WIDTH(reg_width) 81853830cc7SNicolas Ferre | period_len >> reg_width; 81953830cc7SNicolas Ferre 82053830cc7SNicolas Ferre switch (direction) { 821db8196dfSVinod Koul case DMA_MEM_TO_DEV: 82253830cc7SNicolas Ferre desc->lli.saddr = buf_addr + (period_len * period_index); 823beeaa103SNicolas Ferre desc->lli.daddr = sconfig->dst_addr; 82453830cc7SNicolas Ferre desc->lli.ctrla = ctrla; 825ae14d4b5SNicolas Ferre desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED 82653830cc7SNicolas Ferre | ATC_SRC_ADDR_MODE_INCR 827ae14d4b5SNicolas Ferre | ATC_FC_MEM2PER 828bbe89c8eSLudovic Desroches | ATC_SIF(atchan->mem_if) 829bbe89c8eSLudovic Desroches | ATC_DIF(atchan->per_if); 83053830cc7SNicolas Ferre break; 83153830cc7SNicolas Ferre 832db8196dfSVinod Koul case DMA_DEV_TO_MEM: 833beeaa103SNicolas Ferre desc->lli.saddr = sconfig->src_addr; 83453830cc7SNicolas Ferre desc->lli.daddr = buf_addr + (period_len * period_index); 83553830cc7SNicolas Ferre desc->lli.ctrla = ctrla; 836ae14d4b5SNicolas Ferre desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR 83753830cc7SNicolas Ferre | ATC_SRC_ADDR_MODE_FIXED 838ae14d4b5SNicolas Ferre | ATC_FC_PER2MEM 839bbe89c8eSLudovic Desroches | ATC_SIF(atchan->per_if) 840bbe89c8eSLudovic Desroches | ATC_DIF(atchan->mem_if); 84153830cc7SNicolas Ferre break; 84253830cc7SNicolas Ferre 84353830cc7SNicolas Ferre default: 84453830cc7SNicolas Ferre return -EINVAL; 84553830cc7SNicolas Ferre } 84653830cc7SNicolas Ferre 84753830cc7SNicolas Ferre return 0; 84853830cc7SNicolas Ferre } 84953830cc7SNicolas Ferre 85053830cc7SNicolas Ferre /** 85153830cc7SNicolas Ferre * atc_prep_dma_cyclic - prepare the cyclic DMA transfer 85253830cc7SNicolas Ferre * @chan: the DMA channel to prepare 85353830cc7SNicolas Ferre * @buf_addr: physical DMA address where the buffer starts 85453830cc7SNicolas Ferre * @buf_len: total number of bytes for the entire buffer 85553830cc7SNicolas Ferre * @period_len: number of bytes for each period 85653830cc7SNicolas Ferre * @direction: transfer direction, to or from device 857ec8b5e48SPeter Ujfalusi * @flags: tx descriptor status flags 858185ecb5fSAlexandre Bounine * @context: transfer context (ignored) 85953830cc7SNicolas Ferre */ 86053830cc7SNicolas Ferre static struct dma_async_tx_descriptor * 86153830cc7SNicolas Ferre atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, 862185ecb5fSAlexandre Bounine size_t period_len, enum dma_transfer_direction direction, 863ec8b5e48SPeter Ujfalusi unsigned long flags, void *context) 86453830cc7SNicolas Ferre { 86553830cc7SNicolas Ferre struct at_dma_chan *atchan = to_at_dma_chan(chan); 86653830cc7SNicolas Ferre struct at_dma_slave *atslave = chan->private; 867beeaa103SNicolas Ferre struct dma_slave_config *sconfig = &atchan->dma_sconfig; 86853830cc7SNicolas Ferre struct at_desc *first = NULL; 86953830cc7SNicolas Ferre struct at_desc *prev = NULL; 87053830cc7SNicolas Ferre unsigned long was_cyclic; 871beeaa103SNicolas Ferre unsigned int reg_width; 87253830cc7SNicolas Ferre unsigned int periods = buf_len / period_len; 87353830cc7SNicolas Ferre unsigned int i; 87453830cc7SNicolas Ferre 87553830cc7SNicolas Ferre dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n", 876db8196dfSVinod Koul direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE", 87753830cc7SNicolas Ferre buf_addr, 87853830cc7SNicolas Ferre periods, buf_len, period_len); 87953830cc7SNicolas Ferre 88053830cc7SNicolas Ferre if (unlikely(!atslave || !buf_len || !period_len)) { 88153830cc7SNicolas Ferre dev_dbg(chan2dev(chan), "prep_dma_cyclic: length is zero!\n"); 88253830cc7SNicolas Ferre return NULL; 88353830cc7SNicolas Ferre } 88453830cc7SNicolas Ferre 88553830cc7SNicolas Ferre was_cyclic = test_and_set_bit(ATC_IS_CYCLIC, &atchan->status); 88653830cc7SNicolas Ferre if (was_cyclic) { 88753830cc7SNicolas Ferre dev_dbg(chan2dev(chan), "prep_dma_cyclic: channel in use!\n"); 88853830cc7SNicolas Ferre return NULL; 88953830cc7SNicolas Ferre } 89053830cc7SNicolas Ferre 8910e7264ccSAndy Shevchenko if (unlikely(!is_slave_direction(direction))) 8920e7264ccSAndy Shevchenko goto err_out; 8930e7264ccSAndy Shevchenko 894beeaa103SNicolas Ferre if (sconfig->direction == DMA_MEM_TO_DEV) 895beeaa103SNicolas Ferre reg_width = convert_buswidth(sconfig->dst_addr_width); 896beeaa103SNicolas Ferre else 897beeaa103SNicolas Ferre reg_width = convert_buswidth(sconfig->src_addr_width); 898beeaa103SNicolas Ferre 89953830cc7SNicolas Ferre /* Check for too big/unaligned periods and unaligned DMA buffer */ 9000e7264ccSAndy Shevchenko if (atc_dma_cyclic_check_values(reg_width, buf_addr, period_len)) 90153830cc7SNicolas Ferre goto err_out; 90253830cc7SNicolas Ferre 90353830cc7SNicolas Ferre /* build cyclic linked list */ 90453830cc7SNicolas Ferre for (i = 0; i < periods; i++) { 90553830cc7SNicolas Ferre struct at_desc *desc; 90653830cc7SNicolas Ferre 90753830cc7SNicolas Ferre desc = atc_desc_get(atchan); 90853830cc7SNicolas Ferre if (!desc) 90953830cc7SNicolas Ferre goto err_desc_get; 91053830cc7SNicolas Ferre 911beeaa103SNicolas Ferre if (atc_dma_cyclic_fill_desc(chan, desc, i, buf_addr, 912beeaa103SNicolas Ferre reg_width, period_len, direction)) 91353830cc7SNicolas Ferre goto err_desc_get; 91453830cc7SNicolas Ferre 91553830cc7SNicolas Ferre atc_desc_chain(&first, &prev, desc); 91653830cc7SNicolas Ferre } 91753830cc7SNicolas Ferre 91853830cc7SNicolas Ferre /* lets make a cyclic list */ 91953830cc7SNicolas Ferre prev->lli.dscr = first->txd.phys; 92053830cc7SNicolas Ferre 92153830cc7SNicolas Ferre /* First descriptor of the chain embedds additional information */ 92253830cc7SNicolas Ferre first->txd.cookie = -EBUSY; 92353830cc7SNicolas Ferre first->len = buf_len; 924d088c33bSElen Song first->tx_width = reg_width; 92553830cc7SNicolas Ferre 92653830cc7SNicolas Ferre return &first->txd; 92753830cc7SNicolas Ferre 92853830cc7SNicolas Ferre err_desc_get: 92953830cc7SNicolas Ferre dev_err(chan2dev(chan), "not enough descriptors available\n"); 93053830cc7SNicolas Ferre atc_desc_put(atchan, first); 93153830cc7SNicolas Ferre err_out: 93253830cc7SNicolas Ferre clear_bit(ATC_IS_CYCLIC, &atchan->status); 93353830cc7SNicolas Ferre return NULL; 93453830cc7SNicolas Ferre } 93553830cc7SNicolas Ferre 936beeaa103SNicolas Ferre static int set_runtime_config(struct dma_chan *chan, 937beeaa103SNicolas Ferre struct dma_slave_config *sconfig) 938beeaa103SNicolas Ferre { 939beeaa103SNicolas Ferre struct at_dma_chan *atchan = to_at_dma_chan(chan); 940beeaa103SNicolas Ferre 941beeaa103SNicolas Ferre /* Check if it is chan is configured for slave transfers */ 942beeaa103SNicolas Ferre if (!chan->private) 943beeaa103SNicolas Ferre return -EINVAL; 944beeaa103SNicolas Ferre 945beeaa103SNicolas Ferre memcpy(&atchan->dma_sconfig, sconfig, sizeof(*sconfig)); 946beeaa103SNicolas Ferre 947beeaa103SNicolas Ferre convert_burst(&atchan->dma_sconfig.src_maxburst); 948beeaa103SNicolas Ferre convert_burst(&atchan->dma_sconfig.dst_maxburst); 949beeaa103SNicolas Ferre 950beeaa103SNicolas Ferre return 0; 951beeaa103SNicolas Ferre } 952beeaa103SNicolas Ferre 95353830cc7SNicolas Ferre 95405827630SLinus Walleij static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 95505827630SLinus Walleij unsigned long arg) 956808347f6SNicolas Ferre { 957808347f6SNicolas Ferre struct at_dma_chan *atchan = to_at_dma_chan(chan); 958808347f6SNicolas Ferre struct at_dma *atdma = to_at_dma(chan->device); 95923b5e3adSNicolas Ferre int chan_id = atchan->chan_common.chan_id; 960d8cb04b0SNicolas Ferre unsigned long flags; 96123b5e3adSNicolas Ferre 962808347f6SNicolas Ferre LIST_HEAD(list); 963808347f6SNicolas Ferre 96423b5e3adSNicolas Ferre dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd); 965c3635c78SLinus Walleij 96623b5e3adSNicolas Ferre if (cmd == DMA_PAUSE) { 967d8cb04b0SNicolas Ferre spin_lock_irqsave(&atchan->lock, flags); 96823b5e3adSNicolas Ferre 96923b5e3adSNicolas Ferre dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id)); 97023b5e3adSNicolas Ferre set_bit(ATC_IS_PAUSED, &atchan->status); 97123b5e3adSNicolas Ferre 972d8cb04b0SNicolas Ferre spin_unlock_irqrestore(&atchan->lock, flags); 97323b5e3adSNicolas Ferre } else if (cmd == DMA_RESUME) { 9743c477482SNicolas Ferre if (!atc_chan_is_paused(atchan)) 97523b5e3adSNicolas Ferre return 0; 97623b5e3adSNicolas Ferre 977d8cb04b0SNicolas Ferre spin_lock_irqsave(&atchan->lock, flags); 97823b5e3adSNicolas Ferre 97923b5e3adSNicolas Ferre dma_writel(atdma, CHDR, AT_DMA_RES(chan_id)); 98023b5e3adSNicolas Ferre clear_bit(ATC_IS_PAUSED, &atchan->status); 98123b5e3adSNicolas Ferre 982d8cb04b0SNicolas Ferre spin_unlock_irqrestore(&atchan->lock, flags); 98323b5e3adSNicolas Ferre } else if (cmd == DMA_TERMINATE_ALL) { 98423b5e3adSNicolas Ferre struct at_desc *desc, *_desc; 985808347f6SNicolas Ferre /* 986808347f6SNicolas Ferre * This is only called when something went wrong elsewhere, so 987808347f6SNicolas Ferre * we don't really care about the data. Just disable the 988808347f6SNicolas Ferre * channel. We still have to poll the channel enable bit due 989808347f6SNicolas Ferre * to AHB/HSB limitations. 990808347f6SNicolas Ferre */ 991d8cb04b0SNicolas Ferre spin_lock_irqsave(&atchan->lock, flags); 992808347f6SNicolas Ferre 99323b5e3adSNicolas Ferre /* disabling channel: must also remove suspend state */ 99423b5e3adSNicolas Ferre dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask); 995808347f6SNicolas Ferre 996808347f6SNicolas Ferre /* confirm that this channel is disabled */ 997808347f6SNicolas Ferre while (dma_readl(atdma, CHSR) & atchan->mask) 998808347f6SNicolas Ferre cpu_relax(); 999808347f6SNicolas Ferre 1000808347f6SNicolas Ferre /* active_list entries will end up before queued entries */ 1001808347f6SNicolas Ferre list_splice_init(&atchan->queue, &list); 1002808347f6SNicolas Ferre list_splice_init(&atchan->active_list, &list); 1003808347f6SNicolas Ferre 1004808347f6SNicolas Ferre /* Flush all pending and queued descriptors */ 1005808347f6SNicolas Ferre list_for_each_entry_safe(desc, _desc, &list, desc_node) 1006808347f6SNicolas Ferre atc_chain_complete(atchan, desc); 1007c3635c78SLinus Walleij 100823b5e3adSNicolas Ferre clear_bit(ATC_IS_PAUSED, &atchan->status); 100953830cc7SNicolas Ferre /* if channel dedicated to cyclic operations, free it */ 101053830cc7SNicolas Ferre clear_bit(ATC_IS_CYCLIC, &atchan->status); 101153830cc7SNicolas Ferre 1012d8cb04b0SNicolas Ferre spin_unlock_irqrestore(&atchan->lock, flags); 1013beeaa103SNicolas Ferre } else if (cmd == DMA_SLAVE_CONFIG) { 1014beeaa103SNicolas Ferre return set_runtime_config(chan, (struct dma_slave_config *)arg); 101523b5e3adSNicolas Ferre } else { 101623b5e3adSNicolas Ferre return -ENXIO; 101723b5e3adSNicolas Ferre } 1018b0ebeb9cSYong Wang 1019c3635c78SLinus Walleij return 0; 1020808347f6SNicolas Ferre } 1021808347f6SNicolas Ferre 1022dc78baa2SNicolas Ferre /** 102307934481SLinus Walleij * atc_tx_status - poll for transaction completion 1024dc78baa2SNicolas Ferre * @chan: DMA channel 1025dc78baa2SNicolas Ferre * @cookie: transaction identifier to check status of 102607934481SLinus Walleij * @txstate: if not %NULL updated with transaction state 1027dc78baa2SNicolas Ferre * 102807934481SLinus Walleij * If @txstate is passed in, upon return it reflect the driver 1029dc78baa2SNicolas Ferre * internal state and can be used with dma_async_is_complete() to check 1030dc78baa2SNicolas Ferre * the status of multiple cookies without re-checking hardware state. 1031dc78baa2SNicolas Ferre */ 1032dc78baa2SNicolas Ferre static enum dma_status 103307934481SLinus Walleij atc_tx_status(struct dma_chan *chan, 1034dc78baa2SNicolas Ferre dma_cookie_t cookie, 103507934481SLinus Walleij struct dma_tx_state *txstate) 1036dc78baa2SNicolas Ferre { 1037dc78baa2SNicolas Ferre struct at_dma_chan *atchan = to_at_dma_chan(chan); 1038dc78baa2SNicolas Ferre dma_cookie_t last_used; 1039dc78baa2SNicolas Ferre dma_cookie_t last_complete; 1040d8cb04b0SNicolas Ferre unsigned long flags; 1041dc78baa2SNicolas Ferre enum dma_status ret; 1042dc78baa2SNicolas Ferre 1043d8cb04b0SNicolas Ferre spin_lock_irqsave(&atchan->lock, flags); 1044dc78baa2SNicolas Ferre 104596a2af41SRussell King - ARM Linux ret = dma_cookie_status(chan, cookie, txstate); 1046dc78baa2SNicolas Ferre if (ret != DMA_SUCCESS) { 1047dc78baa2SNicolas Ferre atc_cleanup_descriptors(atchan); 1048dc78baa2SNicolas Ferre 104996a2af41SRussell King - ARM Linux ret = dma_cookie_status(chan, cookie, txstate); 105096a2af41SRussell King - ARM Linux } 105196a2af41SRussell King - ARM Linux 10524d4e58deSRussell King - ARM Linux last_complete = chan->completed_cookie; 1053dc78baa2SNicolas Ferre last_used = chan->cookie; 1054dc78baa2SNicolas Ferre 1055d8cb04b0SNicolas Ferre spin_unlock_irqrestore(&atchan->lock, flags); 1056dc78baa2SNicolas Ferre 1057543aabc7SNicolas Ferre if (ret != DMA_SUCCESS) 105896a2af41SRussell King - ARM Linux dma_set_residue(txstate, atc_first_active(atchan)->len); 1059543aabc7SNicolas Ferre 10603c477482SNicolas Ferre if (atc_chan_is_paused(atchan)) 106123b5e3adSNicolas Ferre ret = DMA_PAUSED; 106223b5e3adSNicolas Ferre 106323b5e3adSNicolas Ferre dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d (d%d, u%d)\n", 106423b5e3adSNicolas Ferre ret, cookie, last_complete ? last_complete : 0, 106507934481SLinus Walleij last_used ? last_used : 0); 1066dc78baa2SNicolas Ferre 1067dc78baa2SNicolas Ferre return ret; 1068dc78baa2SNicolas Ferre } 1069dc78baa2SNicolas Ferre 1070dc78baa2SNicolas Ferre /** 1071dc78baa2SNicolas Ferre * atc_issue_pending - try to finish work 1072dc78baa2SNicolas Ferre * @chan: target DMA channel 1073dc78baa2SNicolas Ferre */ 1074dc78baa2SNicolas Ferre static void atc_issue_pending(struct dma_chan *chan) 1075dc78baa2SNicolas Ferre { 1076dc78baa2SNicolas Ferre struct at_dma_chan *atchan = to_at_dma_chan(chan); 1077d8cb04b0SNicolas Ferre unsigned long flags; 1078dc78baa2SNicolas Ferre 1079dc78baa2SNicolas Ferre dev_vdbg(chan2dev(chan), "issue_pending\n"); 1080dc78baa2SNicolas Ferre 108153830cc7SNicolas Ferre /* Not needed for cyclic transfers */ 10823c477482SNicolas Ferre if (atc_chan_is_cyclic(atchan)) 108353830cc7SNicolas Ferre return; 108453830cc7SNicolas Ferre 1085d8cb04b0SNicolas Ferre spin_lock_irqsave(&atchan->lock, flags); 1086dc78baa2SNicolas Ferre atc_advance_work(atchan); 1087d8cb04b0SNicolas Ferre spin_unlock_irqrestore(&atchan->lock, flags); 1088dc78baa2SNicolas Ferre } 1089dc78baa2SNicolas Ferre 1090dc78baa2SNicolas Ferre /** 1091dc78baa2SNicolas Ferre * atc_alloc_chan_resources - allocate resources for DMA channel 1092dc78baa2SNicolas Ferre * @chan: allocate descriptor resources for this channel 1093dc78baa2SNicolas Ferre * @client: current client requesting the channel be ready for requests 1094dc78baa2SNicolas Ferre * 1095dc78baa2SNicolas Ferre * return - the number of allocated descriptors 1096dc78baa2SNicolas Ferre */ 1097dc78baa2SNicolas Ferre static int atc_alloc_chan_resources(struct dma_chan *chan) 1098dc78baa2SNicolas Ferre { 1099dc78baa2SNicolas Ferre struct at_dma_chan *atchan = to_at_dma_chan(chan); 1100dc78baa2SNicolas Ferre struct at_dma *atdma = to_at_dma(chan->device); 1101dc78baa2SNicolas Ferre struct at_desc *desc; 1102808347f6SNicolas Ferre struct at_dma_slave *atslave; 1103d8cb04b0SNicolas Ferre unsigned long flags; 1104dc78baa2SNicolas Ferre int i; 1105808347f6SNicolas Ferre u32 cfg; 1106dc78baa2SNicolas Ferre LIST_HEAD(tmp_list); 1107dc78baa2SNicolas Ferre 1108dc78baa2SNicolas Ferre dev_vdbg(chan2dev(chan), "alloc_chan_resources\n"); 1109dc78baa2SNicolas Ferre 1110dc78baa2SNicolas Ferre /* ASSERT: channel is idle */ 1111dc78baa2SNicolas Ferre if (atc_chan_is_enabled(atchan)) { 1112dc78baa2SNicolas Ferre dev_dbg(chan2dev(chan), "DMA channel not idle ?\n"); 1113dc78baa2SNicolas Ferre return -EIO; 1114dc78baa2SNicolas Ferre } 1115dc78baa2SNicolas Ferre 1116808347f6SNicolas Ferre cfg = ATC_DEFAULT_CFG; 1117808347f6SNicolas Ferre 1118808347f6SNicolas Ferre atslave = chan->private; 1119808347f6SNicolas Ferre if (atslave) { 1120808347f6SNicolas Ferre /* 1121808347f6SNicolas Ferre * We need controller-specific data to set up slave 1122808347f6SNicolas Ferre * transfers. 1123808347f6SNicolas Ferre */ 1124808347f6SNicolas Ferre BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev); 1125808347f6SNicolas Ferre 1126ea7e7906SNicolas Ferre /* if cfg configuration specified take it instead of default */ 1127808347f6SNicolas Ferre if (atslave->cfg) 1128808347f6SNicolas Ferre cfg = atslave->cfg; 1129808347f6SNicolas Ferre } 1130808347f6SNicolas Ferre 1131808347f6SNicolas Ferre /* have we already been set up? 1132808347f6SNicolas Ferre * reconfigure channel but no need to reallocate descriptors */ 1133dc78baa2SNicolas Ferre if (!list_empty(&atchan->free_list)) 1134dc78baa2SNicolas Ferre return atchan->descs_allocated; 1135dc78baa2SNicolas Ferre 1136dc78baa2SNicolas Ferre /* Allocate initial pool of descriptors */ 1137dc78baa2SNicolas Ferre for (i = 0; i < init_nr_desc_per_channel; i++) { 1138dc78baa2SNicolas Ferre desc = atc_alloc_descriptor(chan, GFP_KERNEL); 1139dc78baa2SNicolas Ferre if (!desc) { 1140dc78baa2SNicolas Ferre dev_err(atdma->dma_common.dev, 1141dc78baa2SNicolas Ferre "Only %d initial descriptors\n", i); 1142dc78baa2SNicolas Ferre break; 1143dc78baa2SNicolas Ferre } 1144dc78baa2SNicolas Ferre list_add_tail(&desc->desc_node, &tmp_list); 1145dc78baa2SNicolas Ferre } 1146dc78baa2SNicolas Ferre 1147d8cb04b0SNicolas Ferre spin_lock_irqsave(&atchan->lock, flags); 1148dc78baa2SNicolas Ferre atchan->descs_allocated = i; 1149dc78baa2SNicolas Ferre list_splice(&tmp_list, &atchan->free_list); 1150d3ee98cdSRussell King - ARM Linux dma_cookie_init(chan); 1151d8cb04b0SNicolas Ferre spin_unlock_irqrestore(&atchan->lock, flags); 1152dc78baa2SNicolas Ferre 1153dc78baa2SNicolas Ferre /* channel parameters */ 1154808347f6SNicolas Ferre channel_writel(atchan, CFG, cfg); 1155dc78baa2SNicolas Ferre 1156dc78baa2SNicolas Ferre dev_dbg(chan2dev(chan), 1157dc78baa2SNicolas Ferre "alloc_chan_resources: allocated %d descriptors\n", 1158dc78baa2SNicolas Ferre atchan->descs_allocated); 1159dc78baa2SNicolas Ferre 1160dc78baa2SNicolas Ferre return atchan->descs_allocated; 1161dc78baa2SNicolas Ferre } 1162dc78baa2SNicolas Ferre 1163dc78baa2SNicolas Ferre /** 1164dc78baa2SNicolas Ferre * atc_free_chan_resources - free all channel resources 1165dc78baa2SNicolas Ferre * @chan: DMA channel 1166dc78baa2SNicolas Ferre */ 1167dc78baa2SNicolas Ferre static void atc_free_chan_resources(struct dma_chan *chan) 1168dc78baa2SNicolas Ferre { 1169dc78baa2SNicolas Ferre struct at_dma_chan *atchan = to_at_dma_chan(chan); 1170dc78baa2SNicolas Ferre struct at_dma *atdma = to_at_dma(chan->device); 1171dc78baa2SNicolas Ferre struct at_desc *desc, *_desc; 1172dc78baa2SNicolas Ferre LIST_HEAD(list); 1173dc78baa2SNicolas Ferre 1174dc78baa2SNicolas Ferre dev_dbg(chan2dev(chan), "free_chan_resources: (descs allocated=%u)\n", 1175dc78baa2SNicolas Ferre atchan->descs_allocated); 1176dc78baa2SNicolas Ferre 1177dc78baa2SNicolas Ferre /* ASSERT: channel is idle */ 1178dc78baa2SNicolas Ferre BUG_ON(!list_empty(&atchan->active_list)); 1179dc78baa2SNicolas Ferre BUG_ON(!list_empty(&atchan->queue)); 1180dc78baa2SNicolas Ferre BUG_ON(atc_chan_is_enabled(atchan)); 1181dc78baa2SNicolas Ferre 1182dc78baa2SNicolas Ferre list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) { 1183dc78baa2SNicolas Ferre dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); 1184dc78baa2SNicolas Ferre list_del(&desc->desc_node); 1185dc78baa2SNicolas Ferre /* free link descriptor */ 1186dc78baa2SNicolas Ferre dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys); 1187dc78baa2SNicolas Ferre } 1188dc78baa2SNicolas Ferre list_splice_init(&atchan->free_list, &list); 1189dc78baa2SNicolas Ferre atchan->descs_allocated = 0; 119053830cc7SNicolas Ferre atchan->status = 0; 1191dc78baa2SNicolas Ferre 1192dc78baa2SNicolas Ferre dev_vdbg(chan2dev(chan), "free_chan_resources: done\n"); 1193dc78baa2SNicolas Ferre } 1194dc78baa2SNicolas Ferre 1195bbe89c8eSLudovic Desroches #ifdef CONFIG_OF 1196bbe89c8eSLudovic Desroches static bool at_dma_filter(struct dma_chan *chan, void *slave) 1197bbe89c8eSLudovic Desroches { 1198bbe89c8eSLudovic Desroches struct at_dma_slave *atslave = slave; 1199bbe89c8eSLudovic Desroches 1200bbe89c8eSLudovic Desroches if (atslave->dma_dev == chan->device->dev) { 1201bbe89c8eSLudovic Desroches chan->private = atslave; 1202bbe89c8eSLudovic Desroches return true; 1203bbe89c8eSLudovic Desroches } else { 1204bbe89c8eSLudovic Desroches return false; 1205bbe89c8eSLudovic Desroches } 1206bbe89c8eSLudovic Desroches } 1207bbe89c8eSLudovic Desroches 1208bbe89c8eSLudovic Desroches static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec, 1209bbe89c8eSLudovic Desroches struct of_dma *of_dma) 1210bbe89c8eSLudovic Desroches { 1211bbe89c8eSLudovic Desroches struct dma_chan *chan; 1212bbe89c8eSLudovic Desroches struct at_dma_chan *atchan; 1213bbe89c8eSLudovic Desroches struct at_dma_slave *atslave; 1214bbe89c8eSLudovic Desroches dma_cap_mask_t mask; 1215bbe89c8eSLudovic Desroches unsigned int per_id; 1216bbe89c8eSLudovic Desroches struct platform_device *dmac_pdev; 1217bbe89c8eSLudovic Desroches 1218bbe89c8eSLudovic Desroches if (dma_spec->args_count != 2) 1219bbe89c8eSLudovic Desroches return NULL; 1220bbe89c8eSLudovic Desroches 1221bbe89c8eSLudovic Desroches dmac_pdev = of_find_device_by_node(dma_spec->np); 1222bbe89c8eSLudovic Desroches 1223bbe89c8eSLudovic Desroches dma_cap_zero(mask); 1224bbe89c8eSLudovic Desroches dma_cap_set(DMA_SLAVE, mask); 1225bbe89c8eSLudovic Desroches 1226bbe89c8eSLudovic Desroches atslave = devm_kzalloc(&dmac_pdev->dev, sizeof(*atslave), GFP_KERNEL); 1227bbe89c8eSLudovic Desroches if (!atslave) 1228bbe89c8eSLudovic Desroches return NULL; 1229bbe89c8eSLudovic Desroches /* 1230bbe89c8eSLudovic Desroches * We can fill both SRC_PER and DST_PER, one of these fields will be 1231bbe89c8eSLudovic Desroches * ignored depending on DMA transfer direction. 1232bbe89c8eSLudovic Desroches */ 1233bbe89c8eSLudovic Desroches per_id = dma_spec->args[1]; 12346c22770fSNicolas Ferre atslave->cfg = ATC_FIFOCFG_HALFFIFO 12356c22770fSNicolas Ferre | ATC_DST_H2SEL_HW | ATC_SRC_H2SEL_HW 12366c22770fSNicolas Ferre | ATC_DST_PER_MSB(per_id) | ATC_DST_PER(per_id) 12376c22770fSNicolas Ferre | ATC_SRC_PER_MSB(per_id) | ATC_SRC_PER(per_id); 1238bbe89c8eSLudovic Desroches atslave->dma_dev = &dmac_pdev->dev; 1239bbe89c8eSLudovic Desroches 1240bbe89c8eSLudovic Desroches chan = dma_request_channel(mask, at_dma_filter, atslave); 1241bbe89c8eSLudovic Desroches if (!chan) 1242bbe89c8eSLudovic Desroches return NULL; 1243bbe89c8eSLudovic Desroches 1244bbe89c8eSLudovic Desroches atchan = to_at_dma_chan(chan); 1245bbe89c8eSLudovic Desroches atchan->per_if = dma_spec->args[0] & 0xff; 1246bbe89c8eSLudovic Desroches atchan->mem_if = (dma_spec->args[0] >> 16) & 0xff; 1247bbe89c8eSLudovic Desroches 1248bbe89c8eSLudovic Desroches return chan; 1249bbe89c8eSLudovic Desroches } 1250bbe89c8eSLudovic Desroches #else 1251bbe89c8eSLudovic Desroches static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec, 1252bbe89c8eSLudovic Desroches struct of_dma *of_dma) 1253bbe89c8eSLudovic Desroches { 1254bbe89c8eSLudovic Desroches return NULL; 1255bbe89c8eSLudovic Desroches } 1256bbe89c8eSLudovic Desroches #endif 1257dc78baa2SNicolas Ferre 1258dc78baa2SNicolas Ferre /*-- Module Management -----------------------------------------------*/ 1259dc78baa2SNicolas Ferre 126002f88be9SNicolas Ferre /* cap_mask is a multi-u32 bitfield, fill it with proper C code. */ 126102f88be9SNicolas Ferre static struct at_dma_platform_data at91sam9rl_config = { 126202f88be9SNicolas Ferre .nr_channels = 2, 126302f88be9SNicolas Ferre }; 126402f88be9SNicolas Ferre static struct at_dma_platform_data at91sam9g45_config = { 126502f88be9SNicolas Ferre .nr_channels = 8, 126602f88be9SNicolas Ferre }; 126702f88be9SNicolas Ferre 1268c5115953SNicolas Ferre #if defined(CONFIG_OF) 1269c5115953SNicolas Ferre static const struct of_device_id atmel_dma_dt_ids[] = { 1270c5115953SNicolas Ferre { 1271c5115953SNicolas Ferre .compatible = "atmel,at91sam9rl-dma", 127202f88be9SNicolas Ferre .data = &at91sam9rl_config, 1273c5115953SNicolas Ferre }, { 1274c5115953SNicolas Ferre .compatible = "atmel,at91sam9g45-dma", 127502f88be9SNicolas Ferre .data = &at91sam9g45_config, 1276dcc81734SNicolas Ferre }, { 1277dcc81734SNicolas Ferre /* sentinel */ 1278dcc81734SNicolas Ferre } 1279c5115953SNicolas Ferre }; 1280c5115953SNicolas Ferre 1281c5115953SNicolas Ferre MODULE_DEVICE_TABLE(of, atmel_dma_dt_ids); 1282c5115953SNicolas Ferre #endif 1283c5115953SNicolas Ferre 12840ab88a01SNicolas Ferre static const struct platform_device_id atdma_devtypes[] = { 128567348450SNicolas Ferre { 128667348450SNicolas Ferre .name = "at91sam9rl_dma", 128702f88be9SNicolas Ferre .driver_data = (unsigned long) &at91sam9rl_config, 128867348450SNicolas Ferre }, { 128967348450SNicolas Ferre .name = "at91sam9g45_dma", 129002f88be9SNicolas Ferre .driver_data = (unsigned long) &at91sam9g45_config, 129167348450SNicolas Ferre }, { 129267348450SNicolas Ferre /* sentinel */ 129367348450SNicolas Ferre } 129467348450SNicolas Ferre }; 129567348450SNicolas Ferre 12967fd63ccdSUwe Kleine-König static inline const struct at_dma_platform_data * __init at_dma_get_driver_data( 1297c5115953SNicolas Ferre struct platform_device *pdev) 1298c5115953SNicolas Ferre { 1299c5115953SNicolas Ferre if (pdev->dev.of_node) { 1300c5115953SNicolas Ferre const struct of_device_id *match; 1301c5115953SNicolas Ferre match = of_match_node(atmel_dma_dt_ids, pdev->dev.of_node); 1302c5115953SNicolas Ferre if (match == NULL) 130302f88be9SNicolas Ferre return NULL; 130402f88be9SNicolas Ferre return match->data; 1305c5115953SNicolas Ferre } 130602f88be9SNicolas Ferre return (struct at_dma_platform_data *) 130702f88be9SNicolas Ferre platform_get_device_id(pdev)->driver_data; 1308c5115953SNicolas Ferre } 1309c5115953SNicolas Ferre 1310dc78baa2SNicolas Ferre /** 1311dc78baa2SNicolas Ferre * at_dma_off - disable DMA controller 1312dc78baa2SNicolas Ferre * @atdma: the Atmel HDAMC device 1313dc78baa2SNicolas Ferre */ 1314dc78baa2SNicolas Ferre static void at_dma_off(struct at_dma *atdma) 1315dc78baa2SNicolas Ferre { 1316dc78baa2SNicolas Ferre dma_writel(atdma, EN, 0); 1317dc78baa2SNicolas Ferre 1318dc78baa2SNicolas Ferre /* disable all interrupts */ 1319dc78baa2SNicolas Ferre dma_writel(atdma, EBCIDR, -1L); 1320dc78baa2SNicolas Ferre 1321dc78baa2SNicolas Ferre /* confirm that all channels are disabled */ 1322dc78baa2SNicolas Ferre while (dma_readl(atdma, CHSR) & atdma->all_chan_mask) 1323dc78baa2SNicolas Ferre cpu_relax(); 1324dc78baa2SNicolas Ferre } 1325dc78baa2SNicolas Ferre 1326dc78baa2SNicolas Ferre static int __init at_dma_probe(struct platform_device *pdev) 1327dc78baa2SNicolas Ferre { 1328dc78baa2SNicolas Ferre struct resource *io; 1329dc78baa2SNicolas Ferre struct at_dma *atdma; 1330dc78baa2SNicolas Ferre size_t size; 1331dc78baa2SNicolas Ferre int irq; 1332dc78baa2SNicolas Ferre int err; 1333dc78baa2SNicolas Ferre int i; 13347fd63ccdSUwe Kleine-König const struct at_dma_platform_data *plat_dat; 1335dc78baa2SNicolas Ferre 133602f88be9SNicolas Ferre /* setup platform data for each SoC */ 133702f88be9SNicolas Ferre dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask); 133802f88be9SNicolas Ferre dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask); 133902f88be9SNicolas Ferre dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask); 134067348450SNicolas Ferre 134167348450SNicolas Ferre /* get DMA parameters from controller type */ 134202f88be9SNicolas Ferre plat_dat = at_dma_get_driver_data(pdev); 134302f88be9SNicolas Ferre if (!plat_dat) 134402f88be9SNicolas Ferre return -ENODEV; 1345dc78baa2SNicolas Ferre 1346dc78baa2SNicolas Ferre io = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1347dc78baa2SNicolas Ferre if (!io) 1348dc78baa2SNicolas Ferre return -EINVAL; 1349dc78baa2SNicolas Ferre 1350dc78baa2SNicolas Ferre irq = platform_get_irq(pdev, 0); 1351dc78baa2SNicolas Ferre if (irq < 0) 1352dc78baa2SNicolas Ferre return irq; 1353dc78baa2SNicolas Ferre 1354dc78baa2SNicolas Ferre size = sizeof(struct at_dma); 135502f88be9SNicolas Ferre size += plat_dat->nr_channels * sizeof(struct at_dma_chan); 1356dc78baa2SNicolas Ferre atdma = kzalloc(size, GFP_KERNEL); 1357dc78baa2SNicolas Ferre if (!atdma) 1358dc78baa2SNicolas Ferre return -ENOMEM; 1359dc78baa2SNicolas Ferre 136067348450SNicolas Ferre /* discover transaction capabilities */ 136102f88be9SNicolas Ferre atdma->dma_common.cap_mask = plat_dat->cap_mask; 136202f88be9SNicolas Ferre atdma->all_chan_mask = (1 << plat_dat->nr_channels) - 1; 1363dc78baa2SNicolas Ferre 1364114df7d6SH Hartley Sweeten size = resource_size(io); 1365dc78baa2SNicolas Ferre if (!request_mem_region(io->start, size, pdev->dev.driver->name)) { 1366dc78baa2SNicolas Ferre err = -EBUSY; 1367dc78baa2SNicolas Ferre goto err_kfree; 1368dc78baa2SNicolas Ferre } 1369dc78baa2SNicolas Ferre 1370dc78baa2SNicolas Ferre atdma->regs = ioremap(io->start, size); 1371dc78baa2SNicolas Ferre if (!atdma->regs) { 1372dc78baa2SNicolas Ferre err = -ENOMEM; 1373dc78baa2SNicolas Ferre goto err_release_r; 1374dc78baa2SNicolas Ferre } 1375dc78baa2SNicolas Ferre 1376dc78baa2SNicolas Ferre atdma->clk = clk_get(&pdev->dev, "dma_clk"); 1377dc78baa2SNicolas Ferre if (IS_ERR(atdma->clk)) { 1378dc78baa2SNicolas Ferre err = PTR_ERR(atdma->clk); 1379dc78baa2SNicolas Ferre goto err_clk; 1380dc78baa2SNicolas Ferre } 1381dc78baa2SNicolas Ferre clk_enable(atdma->clk); 1382dc78baa2SNicolas Ferre 1383dc78baa2SNicolas Ferre /* force dma off, just in case */ 1384dc78baa2SNicolas Ferre at_dma_off(atdma); 1385dc78baa2SNicolas Ferre 1386dc78baa2SNicolas Ferre err = request_irq(irq, at_dma_interrupt, 0, "at_hdmac", atdma); 1387dc78baa2SNicolas Ferre if (err) 1388dc78baa2SNicolas Ferre goto err_irq; 1389dc78baa2SNicolas Ferre 1390dc78baa2SNicolas Ferre platform_set_drvdata(pdev, atdma); 1391dc78baa2SNicolas Ferre 1392dc78baa2SNicolas Ferre /* create a pool of consistent memory blocks for hardware descriptors */ 1393dc78baa2SNicolas Ferre atdma->dma_desc_pool = dma_pool_create("at_hdmac_desc_pool", 1394dc78baa2SNicolas Ferre &pdev->dev, sizeof(struct at_desc), 1395dc78baa2SNicolas Ferre 4 /* word alignment */, 0); 1396dc78baa2SNicolas Ferre if (!atdma->dma_desc_pool) { 1397dc78baa2SNicolas Ferre dev_err(&pdev->dev, "No memory for descriptors dma pool\n"); 1398dc78baa2SNicolas Ferre err = -ENOMEM; 1399dc78baa2SNicolas Ferre goto err_pool_create; 1400dc78baa2SNicolas Ferre } 1401dc78baa2SNicolas Ferre 1402dc78baa2SNicolas Ferre /* clear any pending interrupt */ 1403dc78baa2SNicolas Ferre while (dma_readl(atdma, EBCISR)) 1404dc78baa2SNicolas Ferre cpu_relax(); 1405dc78baa2SNicolas Ferre 1406dc78baa2SNicolas Ferre /* initialize channels related values */ 1407dc78baa2SNicolas Ferre INIT_LIST_HEAD(&atdma->dma_common.channels); 140802f88be9SNicolas Ferre for (i = 0; i < plat_dat->nr_channels; i++) { 1409dc78baa2SNicolas Ferre struct at_dma_chan *atchan = &atdma->chan[i]; 1410dc78baa2SNicolas Ferre 1411bbe89c8eSLudovic Desroches atchan->mem_if = AT_DMA_MEM_IF; 1412bbe89c8eSLudovic Desroches atchan->per_if = AT_DMA_PER_IF; 1413dc78baa2SNicolas Ferre atchan->chan_common.device = &atdma->dma_common; 1414d3ee98cdSRussell King - ARM Linux dma_cookie_init(&atchan->chan_common); 1415dc78baa2SNicolas Ferre list_add_tail(&atchan->chan_common.device_node, 1416dc78baa2SNicolas Ferre &atdma->dma_common.channels); 1417dc78baa2SNicolas Ferre 1418dc78baa2SNicolas Ferre atchan->ch_regs = atdma->regs + ch_regs(i); 1419dc78baa2SNicolas Ferre spin_lock_init(&atchan->lock); 1420dc78baa2SNicolas Ferre atchan->mask = 1 << i; 1421dc78baa2SNicolas Ferre 1422dc78baa2SNicolas Ferre INIT_LIST_HEAD(&atchan->active_list); 1423dc78baa2SNicolas Ferre INIT_LIST_HEAD(&atchan->queue); 1424dc78baa2SNicolas Ferre INIT_LIST_HEAD(&atchan->free_list); 1425dc78baa2SNicolas Ferre 1426dc78baa2SNicolas Ferre tasklet_init(&atchan->tasklet, atc_tasklet, 1427dc78baa2SNicolas Ferre (unsigned long)atchan); 1428bda3a47cSNikolaus Voss atc_enable_chan_irq(atdma, i); 1429dc78baa2SNicolas Ferre } 1430dc78baa2SNicolas Ferre 1431dc78baa2SNicolas Ferre /* set base routines */ 1432dc78baa2SNicolas Ferre atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources; 1433dc78baa2SNicolas Ferre atdma->dma_common.device_free_chan_resources = atc_free_chan_resources; 143407934481SLinus Walleij atdma->dma_common.device_tx_status = atc_tx_status; 1435dc78baa2SNicolas Ferre atdma->dma_common.device_issue_pending = atc_issue_pending; 1436dc78baa2SNicolas Ferre atdma->dma_common.dev = &pdev->dev; 1437dc78baa2SNicolas Ferre 1438dc78baa2SNicolas Ferre /* set prep routines based on capability */ 1439dc78baa2SNicolas Ferre if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask)) 1440dc78baa2SNicolas Ferre atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy; 1441dc78baa2SNicolas Ferre 1442d7db8080SNicolas Ferre if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) { 1443808347f6SNicolas Ferre atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg; 1444d7db8080SNicolas Ferre /* controller can do slave DMA: can trigger cyclic transfers */ 1445d7db8080SNicolas Ferre dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask); 144653830cc7SNicolas Ferre atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic; 1447c3635c78SLinus Walleij atdma->dma_common.device_control = atc_control; 1448d7db8080SNicolas Ferre } 1449808347f6SNicolas Ferre 1450dc78baa2SNicolas Ferre dma_writel(atdma, EN, AT_DMA_ENABLE); 1451dc78baa2SNicolas Ferre 1452dc78baa2SNicolas Ferre dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n", 1453dc78baa2SNicolas Ferre dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "", 1454dc78baa2SNicolas Ferre dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "", 145502f88be9SNicolas Ferre plat_dat->nr_channels); 1456dc78baa2SNicolas Ferre 1457dc78baa2SNicolas Ferre dma_async_device_register(&atdma->dma_common); 1458dc78baa2SNicolas Ferre 1459bbe89c8eSLudovic Desroches /* 1460bbe89c8eSLudovic Desroches * Do not return an error if the dmac node is not present in order to 1461bbe89c8eSLudovic Desroches * not break the existing way of requesting channel with 1462bbe89c8eSLudovic Desroches * dma_request_channel(). 1463bbe89c8eSLudovic Desroches */ 1464bbe89c8eSLudovic Desroches if (pdev->dev.of_node) { 1465bbe89c8eSLudovic Desroches err = of_dma_controller_register(pdev->dev.of_node, 1466bbe89c8eSLudovic Desroches at_dma_xlate, atdma); 1467bbe89c8eSLudovic Desroches if (err) { 1468bbe89c8eSLudovic Desroches dev_err(&pdev->dev, "could not register of_dma_controller\n"); 1469bbe89c8eSLudovic Desroches goto err_of_dma_controller_register; 1470bbe89c8eSLudovic Desroches } 1471bbe89c8eSLudovic Desroches } 1472bbe89c8eSLudovic Desroches 1473dc78baa2SNicolas Ferre return 0; 1474dc78baa2SNicolas Ferre 1475bbe89c8eSLudovic Desroches err_of_dma_controller_register: 1476bbe89c8eSLudovic Desroches dma_async_device_unregister(&atdma->dma_common); 1477bbe89c8eSLudovic Desroches dma_pool_destroy(atdma->dma_desc_pool); 1478dc78baa2SNicolas Ferre err_pool_create: 1479dc78baa2SNicolas Ferre platform_set_drvdata(pdev, NULL); 1480dc78baa2SNicolas Ferre free_irq(platform_get_irq(pdev, 0), atdma); 1481dc78baa2SNicolas Ferre err_irq: 1482dc78baa2SNicolas Ferre clk_disable(atdma->clk); 1483dc78baa2SNicolas Ferre clk_put(atdma->clk); 1484dc78baa2SNicolas Ferre err_clk: 1485dc78baa2SNicolas Ferre iounmap(atdma->regs); 1486dc78baa2SNicolas Ferre atdma->regs = NULL; 1487dc78baa2SNicolas Ferre err_release_r: 1488dc78baa2SNicolas Ferre release_mem_region(io->start, size); 1489dc78baa2SNicolas Ferre err_kfree: 1490dc78baa2SNicolas Ferre kfree(atdma); 1491dc78baa2SNicolas Ferre return err; 1492dc78baa2SNicolas Ferre } 1493dc78baa2SNicolas Ferre 14941d1bbd30SMaxin B. John static int at_dma_remove(struct platform_device *pdev) 1495dc78baa2SNicolas Ferre { 1496dc78baa2SNicolas Ferre struct at_dma *atdma = platform_get_drvdata(pdev); 1497dc78baa2SNicolas Ferre struct dma_chan *chan, *_chan; 1498dc78baa2SNicolas Ferre struct resource *io; 1499dc78baa2SNicolas Ferre 1500dc78baa2SNicolas Ferre at_dma_off(atdma); 1501dc78baa2SNicolas Ferre dma_async_device_unregister(&atdma->dma_common); 1502dc78baa2SNicolas Ferre 1503dc78baa2SNicolas Ferre dma_pool_destroy(atdma->dma_desc_pool); 1504dc78baa2SNicolas Ferre platform_set_drvdata(pdev, NULL); 1505dc78baa2SNicolas Ferre free_irq(platform_get_irq(pdev, 0), atdma); 1506dc78baa2SNicolas Ferre 1507dc78baa2SNicolas Ferre list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, 1508dc78baa2SNicolas Ferre device_node) { 1509dc78baa2SNicolas Ferre struct at_dma_chan *atchan = to_at_dma_chan(chan); 1510dc78baa2SNicolas Ferre 1511dc78baa2SNicolas Ferre /* Disable interrupts */ 1512bda3a47cSNikolaus Voss atc_disable_chan_irq(atdma, chan->chan_id); 1513dc78baa2SNicolas Ferre tasklet_disable(&atchan->tasklet); 1514dc78baa2SNicolas Ferre 1515dc78baa2SNicolas Ferre tasklet_kill(&atchan->tasklet); 1516dc78baa2SNicolas Ferre list_del(&chan->device_node); 1517dc78baa2SNicolas Ferre } 1518dc78baa2SNicolas Ferre 1519dc78baa2SNicolas Ferre clk_disable(atdma->clk); 1520dc78baa2SNicolas Ferre clk_put(atdma->clk); 1521dc78baa2SNicolas Ferre 1522dc78baa2SNicolas Ferre iounmap(atdma->regs); 1523dc78baa2SNicolas Ferre atdma->regs = NULL; 1524dc78baa2SNicolas Ferre 1525dc78baa2SNicolas Ferre io = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1526114df7d6SH Hartley Sweeten release_mem_region(io->start, resource_size(io)); 1527dc78baa2SNicolas Ferre 1528dc78baa2SNicolas Ferre kfree(atdma); 1529dc78baa2SNicolas Ferre 1530dc78baa2SNicolas Ferre return 0; 1531dc78baa2SNicolas Ferre } 1532dc78baa2SNicolas Ferre 1533dc78baa2SNicolas Ferre static void at_dma_shutdown(struct platform_device *pdev) 1534dc78baa2SNicolas Ferre { 1535dc78baa2SNicolas Ferre struct at_dma *atdma = platform_get_drvdata(pdev); 1536dc78baa2SNicolas Ferre 1537dc78baa2SNicolas Ferre at_dma_off(platform_get_drvdata(pdev)); 1538dc78baa2SNicolas Ferre clk_disable(atdma->clk); 1539dc78baa2SNicolas Ferre } 1540dc78baa2SNicolas Ferre 1541c0ba5947SNicolas Ferre static int at_dma_prepare(struct device *dev) 1542c0ba5947SNicolas Ferre { 1543c0ba5947SNicolas Ferre struct platform_device *pdev = to_platform_device(dev); 1544c0ba5947SNicolas Ferre struct at_dma *atdma = platform_get_drvdata(pdev); 1545c0ba5947SNicolas Ferre struct dma_chan *chan, *_chan; 1546c0ba5947SNicolas Ferre 1547c0ba5947SNicolas Ferre list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, 1548c0ba5947SNicolas Ferre device_node) { 1549c0ba5947SNicolas Ferre struct at_dma_chan *atchan = to_at_dma_chan(chan); 1550c0ba5947SNicolas Ferre /* wait for transaction completion (except in cyclic case) */ 15513c477482SNicolas Ferre if (atc_chan_is_enabled(atchan) && !atc_chan_is_cyclic(atchan)) 1552c0ba5947SNicolas Ferre return -EAGAIN; 1553c0ba5947SNicolas Ferre } 1554c0ba5947SNicolas Ferre return 0; 1555c0ba5947SNicolas Ferre } 1556c0ba5947SNicolas Ferre 1557c0ba5947SNicolas Ferre static void atc_suspend_cyclic(struct at_dma_chan *atchan) 1558c0ba5947SNicolas Ferre { 1559c0ba5947SNicolas Ferre struct dma_chan *chan = &atchan->chan_common; 1560c0ba5947SNicolas Ferre 1561c0ba5947SNicolas Ferre /* Channel should be paused by user 1562c0ba5947SNicolas Ferre * do it anyway even if it is not done already */ 15633c477482SNicolas Ferre if (!atc_chan_is_paused(atchan)) { 1564c0ba5947SNicolas Ferre dev_warn(chan2dev(chan), 1565c0ba5947SNicolas Ferre "cyclic channel not paused, should be done by channel user\n"); 1566c0ba5947SNicolas Ferre atc_control(chan, DMA_PAUSE, 0); 1567c0ba5947SNicolas Ferre } 1568c0ba5947SNicolas Ferre 1569c0ba5947SNicolas Ferre /* now preserve additional data for cyclic operations */ 1570c0ba5947SNicolas Ferre /* next descriptor address in the cyclic list */ 1571c0ba5947SNicolas Ferre atchan->save_dscr = channel_readl(atchan, DSCR); 1572c0ba5947SNicolas Ferre 1573c0ba5947SNicolas Ferre vdbg_dump_regs(atchan); 1574c0ba5947SNicolas Ferre } 1575c0ba5947SNicolas Ferre 157633f82d14SDan Williams static int at_dma_suspend_noirq(struct device *dev) 1577dc78baa2SNicolas Ferre { 157833f82d14SDan Williams struct platform_device *pdev = to_platform_device(dev); 1579dc78baa2SNicolas Ferre struct at_dma *atdma = platform_get_drvdata(pdev); 1580c0ba5947SNicolas Ferre struct dma_chan *chan, *_chan; 1581dc78baa2SNicolas Ferre 1582c0ba5947SNicolas Ferre /* preserve data */ 1583c0ba5947SNicolas Ferre list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, 1584c0ba5947SNicolas Ferre device_node) { 1585c0ba5947SNicolas Ferre struct at_dma_chan *atchan = to_at_dma_chan(chan); 1586c0ba5947SNicolas Ferre 15873c477482SNicolas Ferre if (atc_chan_is_cyclic(atchan)) 1588c0ba5947SNicolas Ferre atc_suspend_cyclic(atchan); 1589c0ba5947SNicolas Ferre atchan->save_cfg = channel_readl(atchan, CFG); 1590c0ba5947SNicolas Ferre } 1591c0ba5947SNicolas Ferre atdma->save_imr = dma_readl(atdma, EBCIMR); 1592c0ba5947SNicolas Ferre 1593c0ba5947SNicolas Ferre /* disable DMA controller */ 1594c0ba5947SNicolas Ferre at_dma_off(atdma); 1595dc78baa2SNicolas Ferre clk_disable(atdma->clk); 1596dc78baa2SNicolas Ferre return 0; 1597dc78baa2SNicolas Ferre } 1598dc78baa2SNicolas Ferre 1599c0ba5947SNicolas Ferre static void atc_resume_cyclic(struct at_dma_chan *atchan) 1600c0ba5947SNicolas Ferre { 1601c0ba5947SNicolas Ferre struct at_dma *atdma = to_at_dma(atchan->chan_common.device); 1602c0ba5947SNicolas Ferre 1603c0ba5947SNicolas Ferre /* restore channel status for cyclic descriptors list: 1604c0ba5947SNicolas Ferre * next descriptor in the cyclic list at the time of suspend */ 1605c0ba5947SNicolas Ferre channel_writel(atchan, SADDR, 0); 1606c0ba5947SNicolas Ferre channel_writel(atchan, DADDR, 0); 1607c0ba5947SNicolas Ferre channel_writel(atchan, CTRLA, 0); 1608c0ba5947SNicolas Ferre channel_writel(atchan, CTRLB, 0); 1609c0ba5947SNicolas Ferre channel_writel(atchan, DSCR, atchan->save_dscr); 1610c0ba5947SNicolas Ferre dma_writel(atdma, CHER, atchan->mask); 1611c0ba5947SNicolas Ferre 1612c0ba5947SNicolas Ferre /* channel pause status should be removed by channel user 1613c0ba5947SNicolas Ferre * We cannot take the initiative to do it here */ 1614c0ba5947SNicolas Ferre 1615c0ba5947SNicolas Ferre vdbg_dump_regs(atchan); 1616c0ba5947SNicolas Ferre } 1617c0ba5947SNicolas Ferre 161833f82d14SDan Williams static int at_dma_resume_noirq(struct device *dev) 1619dc78baa2SNicolas Ferre { 162033f82d14SDan Williams struct platform_device *pdev = to_platform_device(dev); 1621dc78baa2SNicolas Ferre struct at_dma *atdma = platform_get_drvdata(pdev); 1622c0ba5947SNicolas Ferre struct dma_chan *chan, *_chan; 1623dc78baa2SNicolas Ferre 1624c0ba5947SNicolas Ferre /* bring back DMA controller */ 1625dc78baa2SNicolas Ferre clk_enable(atdma->clk); 1626dc78baa2SNicolas Ferre dma_writel(atdma, EN, AT_DMA_ENABLE); 1627c0ba5947SNicolas Ferre 1628c0ba5947SNicolas Ferre /* clear any pending interrupt */ 1629c0ba5947SNicolas Ferre while (dma_readl(atdma, EBCISR)) 1630c0ba5947SNicolas Ferre cpu_relax(); 1631c0ba5947SNicolas Ferre 1632c0ba5947SNicolas Ferre /* restore saved data */ 1633c0ba5947SNicolas Ferre dma_writel(atdma, EBCIER, atdma->save_imr); 1634c0ba5947SNicolas Ferre list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, 1635c0ba5947SNicolas Ferre device_node) { 1636c0ba5947SNicolas Ferre struct at_dma_chan *atchan = to_at_dma_chan(chan); 1637c0ba5947SNicolas Ferre 1638c0ba5947SNicolas Ferre channel_writel(atchan, CFG, atchan->save_cfg); 16393c477482SNicolas Ferre if (atc_chan_is_cyclic(atchan)) 1640c0ba5947SNicolas Ferre atc_resume_cyclic(atchan); 1641c0ba5947SNicolas Ferre } 1642dc78baa2SNicolas Ferre return 0; 1643dc78baa2SNicolas Ferre } 1644dc78baa2SNicolas Ferre 164547145210SAlexey Dobriyan static const struct dev_pm_ops at_dma_dev_pm_ops = { 1646c0ba5947SNicolas Ferre .prepare = at_dma_prepare, 164733f82d14SDan Williams .suspend_noirq = at_dma_suspend_noirq, 164833f82d14SDan Williams .resume_noirq = at_dma_resume_noirq, 164933f82d14SDan Williams }; 165033f82d14SDan Williams 1651dc78baa2SNicolas Ferre static struct platform_driver at_dma_driver = { 16521d1bbd30SMaxin B. John .remove = at_dma_remove, 1653dc78baa2SNicolas Ferre .shutdown = at_dma_shutdown, 165467348450SNicolas Ferre .id_table = atdma_devtypes, 1655dc78baa2SNicolas Ferre .driver = { 1656dc78baa2SNicolas Ferre .name = "at_hdmac", 165733f82d14SDan Williams .pm = &at_dma_dev_pm_ops, 1658c5115953SNicolas Ferre .of_match_table = of_match_ptr(atmel_dma_dt_ids), 1659dc78baa2SNicolas Ferre }, 1660dc78baa2SNicolas Ferre }; 1661dc78baa2SNicolas Ferre 1662dc78baa2SNicolas Ferre static int __init at_dma_init(void) 1663dc78baa2SNicolas Ferre { 1664dc78baa2SNicolas Ferre return platform_driver_probe(&at_dma_driver, at_dma_probe); 1665dc78baa2SNicolas Ferre } 166693d0bec2SEric Xu subsys_initcall(at_dma_init); 1667dc78baa2SNicolas Ferre 1668dc78baa2SNicolas Ferre static void __exit at_dma_exit(void) 1669dc78baa2SNicolas Ferre { 1670dc78baa2SNicolas Ferre platform_driver_unregister(&at_dma_driver); 1671dc78baa2SNicolas Ferre } 1672dc78baa2SNicolas Ferre module_exit(at_dma_exit); 1673dc78baa2SNicolas Ferre 1674dc78baa2SNicolas Ferre MODULE_DESCRIPTION("Atmel AHB DMA Controller driver"); 1675dc78baa2SNicolas Ferre MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>"); 1676dc78baa2SNicolas Ferre MODULE_LICENSE("GPL"); 1677dc78baa2SNicolas Ferre MODULE_ALIAS("platform:at_hdmac"); 1678