1dc78baa2SNicolas Ferre /* 2dc78baa2SNicolas Ferre * Driver for the Atmel AHB DMA Controller (aka HDMA or DMAC on AT91 systems) 3dc78baa2SNicolas Ferre * 4dc78baa2SNicolas Ferre * Copyright (C) 2008 Atmel Corporation 5dc78baa2SNicolas Ferre * 6dc78baa2SNicolas Ferre * This program is free software; you can redistribute it and/or modify 7dc78baa2SNicolas Ferre * it under the terms of the GNU General Public License as published by 8dc78baa2SNicolas Ferre * the Free Software Foundation; either version 2 of the License, or 9dc78baa2SNicolas Ferre * (at your option) any later version. 10dc78baa2SNicolas Ferre * 11dc78baa2SNicolas Ferre * 12dc78baa2SNicolas Ferre * This supports the Atmel AHB DMA Controller, 13dc78baa2SNicolas Ferre * 14dc78baa2SNicolas Ferre * The driver has currently been tested with the Atmel AT91SAM9RL 15dc78baa2SNicolas Ferre * and AT91SAM9G45 series. 16dc78baa2SNicolas Ferre */ 17dc78baa2SNicolas Ferre 18dc78baa2SNicolas Ferre #include <linux/clk.h> 19dc78baa2SNicolas Ferre #include <linux/dmaengine.h> 20dc78baa2SNicolas Ferre #include <linux/dma-mapping.h> 21dc78baa2SNicolas Ferre #include <linux/dmapool.h> 22dc78baa2SNicolas Ferre #include <linux/interrupt.h> 23dc78baa2SNicolas Ferre #include <linux/module.h> 24dc78baa2SNicolas Ferre #include <linux/platform_device.h> 255a0e3ad6STejun Heo #include <linux/slab.h> 26dc78baa2SNicolas Ferre 27dc78baa2SNicolas Ferre #include "at_hdmac_regs.h" 28dc78baa2SNicolas Ferre 29dc78baa2SNicolas Ferre /* 30dc78baa2SNicolas Ferre * Glossary 31dc78baa2SNicolas Ferre * -------- 32dc78baa2SNicolas Ferre * 33dc78baa2SNicolas Ferre * at_hdmac : Name of the ATmel AHB DMA Controller 34dc78baa2SNicolas Ferre * at_dma_ / atdma : ATmel DMA controller entity related 35dc78baa2SNicolas Ferre * atc_ / atchan : ATmel DMA Channel entity related 36dc78baa2SNicolas Ferre */ 37dc78baa2SNicolas Ferre 38dc78baa2SNicolas Ferre #define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO) 39dc78baa2SNicolas Ferre #define ATC_DEFAULT_CTRLA (0) 40dc78baa2SNicolas Ferre #define ATC_DEFAULT_CTRLB (ATC_SIF(0) \ 41dc78baa2SNicolas Ferre |ATC_DIF(1)) 42dc78baa2SNicolas Ferre 43dc78baa2SNicolas Ferre /* 44dc78baa2SNicolas Ferre * Initial number of descriptors to allocate for each channel. This could 45dc78baa2SNicolas Ferre * be increased during dma usage. 46dc78baa2SNicolas Ferre */ 47dc78baa2SNicolas Ferre static unsigned int init_nr_desc_per_channel = 64; 48dc78baa2SNicolas Ferre module_param(init_nr_desc_per_channel, uint, 0644); 49dc78baa2SNicolas Ferre MODULE_PARM_DESC(init_nr_desc_per_channel, 50dc78baa2SNicolas Ferre "initial descriptors per channel (default: 64)"); 51dc78baa2SNicolas Ferre 52dc78baa2SNicolas Ferre 53dc78baa2SNicolas Ferre /* prototypes */ 54dc78baa2SNicolas Ferre static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx); 55dc78baa2SNicolas Ferre 56dc78baa2SNicolas Ferre 57dc78baa2SNicolas Ferre /*----------------------------------------------------------------------*/ 58dc78baa2SNicolas Ferre 59dc78baa2SNicolas Ferre static struct at_desc *atc_first_active(struct at_dma_chan *atchan) 60dc78baa2SNicolas Ferre { 61dc78baa2SNicolas Ferre return list_first_entry(&atchan->active_list, 62dc78baa2SNicolas Ferre struct at_desc, desc_node); 63dc78baa2SNicolas Ferre } 64dc78baa2SNicolas Ferre 65dc78baa2SNicolas Ferre static struct at_desc *atc_first_queued(struct at_dma_chan *atchan) 66dc78baa2SNicolas Ferre { 67dc78baa2SNicolas Ferre return list_first_entry(&atchan->queue, 68dc78baa2SNicolas Ferre struct at_desc, desc_node); 69dc78baa2SNicolas Ferre } 70dc78baa2SNicolas Ferre 71dc78baa2SNicolas Ferre /** 72421f91d2SUwe Kleine-König * atc_alloc_descriptor - allocate and return an initialized descriptor 73dc78baa2SNicolas Ferre * @chan: the channel to allocate descriptors for 74dc78baa2SNicolas Ferre * @gfp_flags: GFP allocation flags 75dc78baa2SNicolas Ferre * 76dc78baa2SNicolas Ferre * Note: The ack-bit is positioned in the descriptor flag at creation time 77dc78baa2SNicolas Ferre * to make initial allocation more convenient. This bit will be cleared 78dc78baa2SNicolas Ferre * and control will be given to client at usage time (during 79dc78baa2SNicolas Ferre * preparation functions). 80dc78baa2SNicolas Ferre */ 81dc78baa2SNicolas Ferre static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan, 82dc78baa2SNicolas Ferre gfp_t gfp_flags) 83dc78baa2SNicolas Ferre { 84dc78baa2SNicolas Ferre struct at_desc *desc = NULL; 85dc78baa2SNicolas Ferre struct at_dma *atdma = to_at_dma(chan->device); 86dc78baa2SNicolas Ferre dma_addr_t phys; 87dc78baa2SNicolas Ferre 88dc78baa2SNicolas Ferre desc = dma_pool_alloc(atdma->dma_desc_pool, gfp_flags, &phys); 89dc78baa2SNicolas Ferre if (desc) { 90dc78baa2SNicolas Ferre memset(desc, 0, sizeof(struct at_desc)); 91285a3c71SDan Williams INIT_LIST_HEAD(&desc->tx_list); 92dc78baa2SNicolas Ferre dma_async_tx_descriptor_init(&desc->txd, chan); 93dc78baa2SNicolas Ferre /* txd.flags will be overwritten in prep functions */ 94dc78baa2SNicolas Ferre desc->txd.flags = DMA_CTRL_ACK; 95dc78baa2SNicolas Ferre desc->txd.tx_submit = atc_tx_submit; 96dc78baa2SNicolas Ferre desc->txd.phys = phys; 97dc78baa2SNicolas Ferre } 98dc78baa2SNicolas Ferre 99dc78baa2SNicolas Ferre return desc; 100dc78baa2SNicolas Ferre } 101dc78baa2SNicolas Ferre 102dc78baa2SNicolas Ferre /** 103af901ca1SAndré Goddard Rosa * atc_desc_get - get an unused descriptor from free_list 104dc78baa2SNicolas Ferre * @atchan: channel we want a new descriptor for 105dc78baa2SNicolas Ferre */ 106dc78baa2SNicolas Ferre static struct at_desc *atc_desc_get(struct at_dma_chan *atchan) 107dc78baa2SNicolas Ferre { 108dc78baa2SNicolas Ferre struct at_desc *desc, *_desc; 109dc78baa2SNicolas Ferre struct at_desc *ret = NULL; 110dc78baa2SNicolas Ferre unsigned int i = 0; 111dc78baa2SNicolas Ferre LIST_HEAD(tmp_list); 112dc78baa2SNicolas Ferre 113dc78baa2SNicolas Ferre spin_lock_bh(&atchan->lock); 114dc78baa2SNicolas Ferre list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) { 115dc78baa2SNicolas Ferre i++; 116dc78baa2SNicolas Ferre if (async_tx_test_ack(&desc->txd)) { 117dc78baa2SNicolas Ferre list_del(&desc->desc_node); 118dc78baa2SNicolas Ferre ret = desc; 119dc78baa2SNicolas Ferre break; 120dc78baa2SNicolas Ferre } 121dc78baa2SNicolas Ferre dev_dbg(chan2dev(&atchan->chan_common), 122dc78baa2SNicolas Ferre "desc %p not ACKed\n", desc); 123dc78baa2SNicolas Ferre } 124dc78baa2SNicolas Ferre spin_unlock_bh(&atchan->lock); 125dc78baa2SNicolas Ferre dev_vdbg(chan2dev(&atchan->chan_common), 126dc78baa2SNicolas Ferre "scanned %u descriptors on freelist\n", i); 127dc78baa2SNicolas Ferre 128dc78baa2SNicolas Ferre /* no more descriptor available in initial pool: create one more */ 129dc78baa2SNicolas Ferre if (!ret) { 130dc78baa2SNicolas Ferre ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC); 131dc78baa2SNicolas Ferre if (ret) { 132dc78baa2SNicolas Ferre spin_lock_bh(&atchan->lock); 133dc78baa2SNicolas Ferre atchan->descs_allocated++; 134dc78baa2SNicolas Ferre spin_unlock_bh(&atchan->lock); 135dc78baa2SNicolas Ferre } else { 136dc78baa2SNicolas Ferre dev_err(chan2dev(&atchan->chan_common), 137dc78baa2SNicolas Ferre "not enough descriptors available\n"); 138dc78baa2SNicolas Ferre } 139dc78baa2SNicolas Ferre } 140dc78baa2SNicolas Ferre 141dc78baa2SNicolas Ferre return ret; 142dc78baa2SNicolas Ferre } 143dc78baa2SNicolas Ferre 144dc78baa2SNicolas Ferre /** 145dc78baa2SNicolas Ferre * atc_desc_put - move a descriptor, including any children, to the free list 146dc78baa2SNicolas Ferre * @atchan: channel we work on 147dc78baa2SNicolas Ferre * @desc: descriptor, at the head of a chain, to move to free list 148dc78baa2SNicolas Ferre */ 149dc78baa2SNicolas Ferre static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc) 150dc78baa2SNicolas Ferre { 151dc78baa2SNicolas Ferre if (desc) { 152dc78baa2SNicolas Ferre struct at_desc *child; 153dc78baa2SNicolas Ferre 154dc78baa2SNicolas Ferre spin_lock_bh(&atchan->lock); 155285a3c71SDan Williams list_for_each_entry(child, &desc->tx_list, desc_node) 156dc78baa2SNicolas Ferre dev_vdbg(chan2dev(&atchan->chan_common), 157dc78baa2SNicolas Ferre "moving child desc %p to freelist\n", 158dc78baa2SNicolas Ferre child); 159285a3c71SDan Williams list_splice_init(&desc->tx_list, &atchan->free_list); 160dc78baa2SNicolas Ferre dev_vdbg(chan2dev(&atchan->chan_common), 161dc78baa2SNicolas Ferre "moving desc %p to freelist\n", desc); 162dc78baa2SNicolas Ferre list_add(&desc->desc_node, &atchan->free_list); 163dc78baa2SNicolas Ferre spin_unlock_bh(&atchan->lock); 164dc78baa2SNicolas Ferre } 165dc78baa2SNicolas Ferre } 166dc78baa2SNicolas Ferre 167dc78baa2SNicolas Ferre /** 168dc78baa2SNicolas Ferre * atc_assign_cookie - compute and assign new cookie 169dc78baa2SNicolas Ferre * @atchan: channel we work on 170dc78baa2SNicolas Ferre * @desc: descriptor to asign cookie for 171dc78baa2SNicolas Ferre * 172dc78baa2SNicolas Ferre * Called with atchan->lock held and bh disabled 173dc78baa2SNicolas Ferre */ 174dc78baa2SNicolas Ferre static dma_cookie_t 175dc78baa2SNicolas Ferre atc_assign_cookie(struct at_dma_chan *atchan, struct at_desc *desc) 176dc78baa2SNicolas Ferre { 177dc78baa2SNicolas Ferre dma_cookie_t cookie = atchan->chan_common.cookie; 178dc78baa2SNicolas Ferre 179dc78baa2SNicolas Ferre if (++cookie < 0) 180dc78baa2SNicolas Ferre cookie = 1; 181dc78baa2SNicolas Ferre 182dc78baa2SNicolas Ferre atchan->chan_common.cookie = cookie; 183dc78baa2SNicolas Ferre desc->txd.cookie = cookie; 184dc78baa2SNicolas Ferre 185dc78baa2SNicolas Ferre return cookie; 186dc78baa2SNicolas Ferre } 187dc78baa2SNicolas Ferre 188dc78baa2SNicolas Ferre /** 189dc78baa2SNicolas Ferre * atc_dostart - starts the DMA engine for real 190dc78baa2SNicolas Ferre * @atchan: the channel we want to start 191dc78baa2SNicolas Ferre * @first: first descriptor in the list we want to begin with 192dc78baa2SNicolas Ferre * 193dc78baa2SNicolas Ferre * Called with atchan->lock held and bh disabled 194dc78baa2SNicolas Ferre */ 195dc78baa2SNicolas Ferre static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first) 196dc78baa2SNicolas Ferre { 197dc78baa2SNicolas Ferre struct at_dma *atdma = to_at_dma(atchan->chan_common.device); 198dc78baa2SNicolas Ferre 199dc78baa2SNicolas Ferre /* ASSERT: channel is idle */ 200dc78baa2SNicolas Ferre if (atc_chan_is_enabled(atchan)) { 201dc78baa2SNicolas Ferre dev_err(chan2dev(&atchan->chan_common), 202dc78baa2SNicolas Ferre "BUG: Attempted to start non-idle channel\n"); 203dc78baa2SNicolas Ferre dev_err(chan2dev(&atchan->chan_common), 204dc78baa2SNicolas Ferre " channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n", 205dc78baa2SNicolas Ferre channel_readl(atchan, SADDR), 206dc78baa2SNicolas Ferre channel_readl(atchan, DADDR), 207dc78baa2SNicolas Ferre channel_readl(atchan, CTRLA), 208dc78baa2SNicolas Ferre channel_readl(atchan, CTRLB), 209dc78baa2SNicolas Ferre channel_readl(atchan, DSCR)); 210dc78baa2SNicolas Ferre 211dc78baa2SNicolas Ferre /* The tasklet will hopefully advance the queue... */ 212dc78baa2SNicolas Ferre return; 213dc78baa2SNicolas Ferre } 214dc78baa2SNicolas Ferre 215dc78baa2SNicolas Ferre vdbg_dump_regs(atchan); 216dc78baa2SNicolas Ferre 217dc78baa2SNicolas Ferre /* clear any pending interrupt */ 218dc78baa2SNicolas Ferre while (dma_readl(atdma, EBCISR)) 219dc78baa2SNicolas Ferre cpu_relax(); 220dc78baa2SNicolas Ferre 221dc78baa2SNicolas Ferre channel_writel(atchan, SADDR, 0); 222dc78baa2SNicolas Ferre channel_writel(atchan, DADDR, 0); 223dc78baa2SNicolas Ferre channel_writel(atchan, CTRLA, 0); 224dc78baa2SNicolas Ferre channel_writel(atchan, CTRLB, 0); 225dc78baa2SNicolas Ferre channel_writel(atchan, DSCR, first->txd.phys); 226dc78baa2SNicolas Ferre dma_writel(atdma, CHER, atchan->mask); 227dc78baa2SNicolas Ferre 228dc78baa2SNicolas Ferre vdbg_dump_regs(atchan); 229dc78baa2SNicolas Ferre } 230dc78baa2SNicolas Ferre 231dc78baa2SNicolas Ferre /** 232dc78baa2SNicolas Ferre * atc_chain_complete - finish work for one transaction chain 233dc78baa2SNicolas Ferre * @atchan: channel we work on 234dc78baa2SNicolas Ferre * @desc: descriptor at the head of the chain we want do complete 235dc78baa2SNicolas Ferre * 236dc78baa2SNicolas Ferre * Called with atchan->lock held and bh disabled */ 237dc78baa2SNicolas Ferre static void 238dc78baa2SNicolas Ferre atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) 239dc78baa2SNicolas Ferre { 240dc78baa2SNicolas Ferre dma_async_tx_callback callback; 241dc78baa2SNicolas Ferre void *param; 242dc78baa2SNicolas Ferre struct dma_async_tx_descriptor *txd = &desc->txd; 243dc78baa2SNicolas Ferre 244dc78baa2SNicolas Ferre dev_vdbg(chan2dev(&atchan->chan_common), 245dc78baa2SNicolas Ferre "descriptor %u complete\n", txd->cookie); 246dc78baa2SNicolas Ferre 247dc78baa2SNicolas Ferre atchan->completed_cookie = txd->cookie; 248dc78baa2SNicolas Ferre callback = txd->callback; 249dc78baa2SNicolas Ferre param = txd->callback_param; 250dc78baa2SNicolas Ferre 251dc78baa2SNicolas Ferre /* move children to free_list */ 252285a3c71SDan Williams list_splice_init(&desc->tx_list, &atchan->free_list); 253dc78baa2SNicolas Ferre /* move myself to free_list */ 254dc78baa2SNicolas Ferre list_move(&desc->desc_node, &atchan->free_list); 255dc78baa2SNicolas Ferre 256ebcf9b80SNicolas Ferre /* unmap dma addresses (not on slave channels) */ 257657a77faSAtsushi Nemoto if (!atchan->chan_common.private) { 258657a77faSAtsushi Nemoto struct device *parent = chan2parent(&atchan->chan_common); 259dc78baa2SNicolas Ferre if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { 260dc78baa2SNicolas Ferre if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) 261657a77faSAtsushi Nemoto dma_unmap_single(parent, 262dc78baa2SNicolas Ferre desc->lli.daddr, 263dc78baa2SNicolas Ferre desc->len, DMA_FROM_DEVICE); 264dc78baa2SNicolas Ferre else 265657a77faSAtsushi Nemoto dma_unmap_page(parent, 266dc78baa2SNicolas Ferre desc->lli.daddr, 267dc78baa2SNicolas Ferre desc->len, DMA_FROM_DEVICE); 268dc78baa2SNicolas Ferre } 269dc78baa2SNicolas Ferre if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { 270dc78baa2SNicolas Ferre if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) 271657a77faSAtsushi Nemoto dma_unmap_single(parent, 272dc78baa2SNicolas Ferre desc->lli.saddr, 273dc78baa2SNicolas Ferre desc->len, DMA_TO_DEVICE); 274dc78baa2SNicolas Ferre else 275657a77faSAtsushi Nemoto dma_unmap_page(parent, 276dc78baa2SNicolas Ferre desc->lli.saddr, 277dc78baa2SNicolas Ferre desc->len, DMA_TO_DEVICE); 278dc78baa2SNicolas Ferre } 279657a77faSAtsushi Nemoto } 280dc78baa2SNicolas Ferre 281dc78baa2SNicolas Ferre /* 282dc78baa2SNicolas Ferre * The API requires that no submissions are done from a 283dc78baa2SNicolas Ferre * callback, so we don't need to drop the lock here 284dc78baa2SNicolas Ferre */ 285dc78baa2SNicolas Ferre if (callback) 286dc78baa2SNicolas Ferre callback(param); 287dc78baa2SNicolas Ferre 288dc78baa2SNicolas Ferre dma_run_dependencies(txd); 289dc78baa2SNicolas Ferre } 290dc78baa2SNicolas Ferre 291dc78baa2SNicolas Ferre /** 292dc78baa2SNicolas Ferre * atc_complete_all - finish work for all transactions 293dc78baa2SNicolas Ferre * @atchan: channel to complete transactions for 294dc78baa2SNicolas Ferre * 295dc78baa2SNicolas Ferre * Eventually submit queued descriptors if any 296dc78baa2SNicolas Ferre * 297dc78baa2SNicolas Ferre * Assume channel is idle while calling this function 298dc78baa2SNicolas Ferre * Called with atchan->lock held and bh disabled 299dc78baa2SNicolas Ferre */ 300dc78baa2SNicolas Ferre static void atc_complete_all(struct at_dma_chan *atchan) 301dc78baa2SNicolas Ferre { 302dc78baa2SNicolas Ferre struct at_desc *desc, *_desc; 303dc78baa2SNicolas Ferre LIST_HEAD(list); 304dc78baa2SNicolas Ferre 305dc78baa2SNicolas Ferre dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n"); 306dc78baa2SNicolas Ferre 307dc78baa2SNicolas Ferre BUG_ON(atc_chan_is_enabled(atchan)); 308dc78baa2SNicolas Ferre 309dc78baa2SNicolas Ferre /* 310dc78baa2SNicolas Ferre * Submit queued descriptors ASAP, i.e. before we go through 311dc78baa2SNicolas Ferre * the completed ones. 312dc78baa2SNicolas Ferre */ 313dc78baa2SNicolas Ferre if (!list_empty(&atchan->queue)) 314dc78baa2SNicolas Ferre atc_dostart(atchan, atc_first_queued(atchan)); 315dc78baa2SNicolas Ferre /* empty active_list now it is completed */ 316dc78baa2SNicolas Ferre list_splice_init(&atchan->active_list, &list); 317dc78baa2SNicolas Ferre /* empty queue list by moving descriptors (if any) to active_list */ 318dc78baa2SNicolas Ferre list_splice_init(&atchan->queue, &atchan->active_list); 319dc78baa2SNicolas Ferre 320dc78baa2SNicolas Ferre list_for_each_entry_safe(desc, _desc, &list, desc_node) 321dc78baa2SNicolas Ferre atc_chain_complete(atchan, desc); 322dc78baa2SNicolas Ferre } 323dc78baa2SNicolas Ferre 324dc78baa2SNicolas Ferre /** 325dc78baa2SNicolas Ferre * atc_cleanup_descriptors - cleanup up finished descriptors in active_list 326dc78baa2SNicolas Ferre * @atchan: channel to be cleaned up 327dc78baa2SNicolas Ferre * 328dc78baa2SNicolas Ferre * Called with atchan->lock held and bh disabled 329dc78baa2SNicolas Ferre */ 330dc78baa2SNicolas Ferre static void atc_cleanup_descriptors(struct at_dma_chan *atchan) 331dc78baa2SNicolas Ferre { 332dc78baa2SNicolas Ferre struct at_desc *desc, *_desc; 333dc78baa2SNicolas Ferre struct at_desc *child; 334dc78baa2SNicolas Ferre 335dc78baa2SNicolas Ferre dev_vdbg(chan2dev(&atchan->chan_common), "cleanup descriptors\n"); 336dc78baa2SNicolas Ferre 337dc78baa2SNicolas Ferre list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) { 338dc78baa2SNicolas Ferre if (!(desc->lli.ctrla & ATC_DONE)) 339dc78baa2SNicolas Ferre /* This one is currently in progress */ 340dc78baa2SNicolas Ferre return; 341dc78baa2SNicolas Ferre 342285a3c71SDan Williams list_for_each_entry(child, &desc->tx_list, desc_node) 343dc78baa2SNicolas Ferre if (!(child->lli.ctrla & ATC_DONE)) 344dc78baa2SNicolas Ferre /* Currently in progress */ 345dc78baa2SNicolas Ferre return; 346dc78baa2SNicolas Ferre 347dc78baa2SNicolas Ferre /* 348dc78baa2SNicolas Ferre * No descriptors so far seem to be in progress, i.e. 349dc78baa2SNicolas Ferre * this chain must be done. 350dc78baa2SNicolas Ferre */ 351dc78baa2SNicolas Ferre atc_chain_complete(atchan, desc); 352dc78baa2SNicolas Ferre } 353dc78baa2SNicolas Ferre } 354dc78baa2SNicolas Ferre 355dc78baa2SNicolas Ferre /** 356dc78baa2SNicolas Ferre * atc_advance_work - at the end of a transaction, move forward 357dc78baa2SNicolas Ferre * @atchan: channel where the transaction ended 358dc78baa2SNicolas Ferre * 359dc78baa2SNicolas Ferre * Called with atchan->lock held and bh disabled 360dc78baa2SNicolas Ferre */ 361dc78baa2SNicolas Ferre static void atc_advance_work(struct at_dma_chan *atchan) 362dc78baa2SNicolas Ferre { 363dc78baa2SNicolas Ferre dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n"); 364dc78baa2SNicolas Ferre 365dc78baa2SNicolas Ferre if (list_empty(&atchan->active_list) || 366dc78baa2SNicolas Ferre list_is_singular(&atchan->active_list)) { 367dc78baa2SNicolas Ferre atc_complete_all(atchan); 368dc78baa2SNicolas Ferre } else { 369dc78baa2SNicolas Ferre atc_chain_complete(atchan, atc_first_active(atchan)); 370dc78baa2SNicolas Ferre /* advance work */ 371dc78baa2SNicolas Ferre atc_dostart(atchan, atc_first_active(atchan)); 372dc78baa2SNicolas Ferre } 373dc78baa2SNicolas Ferre } 374dc78baa2SNicolas Ferre 375dc78baa2SNicolas Ferre 376dc78baa2SNicolas Ferre /** 377dc78baa2SNicolas Ferre * atc_handle_error - handle errors reported by DMA controller 378dc78baa2SNicolas Ferre * @atchan: channel where error occurs 379dc78baa2SNicolas Ferre * 380dc78baa2SNicolas Ferre * Called with atchan->lock held and bh disabled 381dc78baa2SNicolas Ferre */ 382dc78baa2SNicolas Ferre static void atc_handle_error(struct at_dma_chan *atchan) 383dc78baa2SNicolas Ferre { 384dc78baa2SNicolas Ferre struct at_desc *bad_desc; 385dc78baa2SNicolas Ferre struct at_desc *child; 386dc78baa2SNicolas Ferre 387dc78baa2SNicolas Ferre /* 388dc78baa2SNicolas Ferre * The descriptor currently at the head of the active list is 389dc78baa2SNicolas Ferre * broked. Since we don't have any way to report errors, we'll 390dc78baa2SNicolas Ferre * just have to scream loudly and try to carry on. 391dc78baa2SNicolas Ferre */ 392dc78baa2SNicolas Ferre bad_desc = atc_first_active(atchan); 393dc78baa2SNicolas Ferre list_del_init(&bad_desc->desc_node); 394dc78baa2SNicolas Ferre 395dc78baa2SNicolas Ferre /* As we are stopped, take advantage to push queued descriptors 396dc78baa2SNicolas Ferre * in active_list */ 397dc78baa2SNicolas Ferre list_splice_init(&atchan->queue, atchan->active_list.prev); 398dc78baa2SNicolas Ferre 399dc78baa2SNicolas Ferre /* Try to restart the controller */ 400dc78baa2SNicolas Ferre if (!list_empty(&atchan->active_list)) 401dc78baa2SNicolas Ferre atc_dostart(atchan, atc_first_active(atchan)); 402dc78baa2SNicolas Ferre 403dc78baa2SNicolas Ferre /* 404dc78baa2SNicolas Ferre * KERN_CRITICAL may seem harsh, but since this only happens 405dc78baa2SNicolas Ferre * when someone submits a bad physical address in a 406dc78baa2SNicolas Ferre * descriptor, we should consider ourselves lucky that the 407dc78baa2SNicolas Ferre * controller flagged an error instead of scribbling over 408dc78baa2SNicolas Ferre * random memory locations. 409dc78baa2SNicolas Ferre */ 410dc78baa2SNicolas Ferre dev_crit(chan2dev(&atchan->chan_common), 411dc78baa2SNicolas Ferre "Bad descriptor submitted for DMA!\n"); 412dc78baa2SNicolas Ferre dev_crit(chan2dev(&atchan->chan_common), 413dc78baa2SNicolas Ferre " cookie: %d\n", bad_desc->txd.cookie); 414dc78baa2SNicolas Ferre atc_dump_lli(atchan, &bad_desc->lli); 415285a3c71SDan Williams list_for_each_entry(child, &bad_desc->tx_list, desc_node) 416dc78baa2SNicolas Ferre atc_dump_lli(atchan, &child->lli); 417dc78baa2SNicolas Ferre 418dc78baa2SNicolas Ferre /* Pretend the descriptor completed successfully */ 419dc78baa2SNicolas Ferre atc_chain_complete(atchan, bad_desc); 420dc78baa2SNicolas Ferre } 421dc78baa2SNicolas Ferre 422dc78baa2SNicolas Ferre 423dc78baa2SNicolas Ferre /*-- IRQ & Tasklet ---------------------------------------------------*/ 424dc78baa2SNicolas Ferre 425dc78baa2SNicolas Ferre static void atc_tasklet(unsigned long data) 426dc78baa2SNicolas Ferre { 427dc78baa2SNicolas Ferre struct at_dma_chan *atchan = (struct at_dma_chan *)data; 428dc78baa2SNicolas Ferre 429dc78baa2SNicolas Ferre /* Channel cannot be enabled here */ 430dc78baa2SNicolas Ferre if (atc_chan_is_enabled(atchan)) { 431dc78baa2SNicolas Ferre dev_err(chan2dev(&atchan->chan_common), 432dc78baa2SNicolas Ferre "BUG: channel enabled in tasklet\n"); 433dc78baa2SNicolas Ferre return; 434dc78baa2SNicolas Ferre } 435dc78baa2SNicolas Ferre 436dc78baa2SNicolas Ferre spin_lock(&atchan->lock); 437dc78baa2SNicolas Ferre if (test_and_clear_bit(0, &atchan->error_status)) 438dc78baa2SNicolas Ferre atc_handle_error(atchan); 439dc78baa2SNicolas Ferre else 440dc78baa2SNicolas Ferre atc_advance_work(atchan); 441dc78baa2SNicolas Ferre 442dc78baa2SNicolas Ferre spin_unlock(&atchan->lock); 443dc78baa2SNicolas Ferre } 444dc78baa2SNicolas Ferre 445dc78baa2SNicolas Ferre static irqreturn_t at_dma_interrupt(int irq, void *dev_id) 446dc78baa2SNicolas Ferre { 447dc78baa2SNicolas Ferre struct at_dma *atdma = (struct at_dma *)dev_id; 448dc78baa2SNicolas Ferre struct at_dma_chan *atchan; 449dc78baa2SNicolas Ferre int i; 450dc78baa2SNicolas Ferre u32 status, pending, imr; 451dc78baa2SNicolas Ferre int ret = IRQ_NONE; 452dc78baa2SNicolas Ferre 453dc78baa2SNicolas Ferre do { 454dc78baa2SNicolas Ferre imr = dma_readl(atdma, EBCIMR); 455dc78baa2SNicolas Ferre status = dma_readl(atdma, EBCISR); 456dc78baa2SNicolas Ferre pending = status & imr; 457dc78baa2SNicolas Ferre 458dc78baa2SNicolas Ferre if (!pending) 459dc78baa2SNicolas Ferre break; 460dc78baa2SNicolas Ferre 461dc78baa2SNicolas Ferre dev_vdbg(atdma->dma_common.dev, 462dc78baa2SNicolas Ferre "interrupt: status = 0x%08x, 0x%08x, 0x%08x\n", 463dc78baa2SNicolas Ferre status, imr, pending); 464dc78baa2SNicolas Ferre 465dc78baa2SNicolas Ferre for (i = 0; i < atdma->dma_common.chancnt; i++) { 466dc78baa2SNicolas Ferre atchan = &atdma->chan[i]; 467dc78baa2SNicolas Ferre if (pending & (AT_DMA_CBTC(i) | AT_DMA_ERR(i))) { 468dc78baa2SNicolas Ferre if (pending & AT_DMA_ERR(i)) { 469dc78baa2SNicolas Ferre /* Disable channel on AHB error */ 470dc78baa2SNicolas Ferre dma_writel(atdma, CHDR, atchan->mask); 471dc78baa2SNicolas Ferre /* Give information to tasklet */ 472dc78baa2SNicolas Ferre set_bit(0, &atchan->error_status); 473dc78baa2SNicolas Ferre } 474dc78baa2SNicolas Ferre tasklet_schedule(&atchan->tasklet); 475dc78baa2SNicolas Ferre ret = IRQ_HANDLED; 476dc78baa2SNicolas Ferre } 477dc78baa2SNicolas Ferre } 478dc78baa2SNicolas Ferre 479dc78baa2SNicolas Ferre } while (pending); 480dc78baa2SNicolas Ferre 481dc78baa2SNicolas Ferre return ret; 482dc78baa2SNicolas Ferre } 483dc78baa2SNicolas Ferre 484dc78baa2SNicolas Ferre 485dc78baa2SNicolas Ferre /*-- DMA Engine API --------------------------------------------------*/ 486dc78baa2SNicolas Ferre 487dc78baa2SNicolas Ferre /** 488dc78baa2SNicolas Ferre * atc_tx_submit - set the prepared descriptor(s) to be executed by the engine 489dc78baa2SNicolas Ferre * @desc: descriptor at the head of the transaction chain 490dc78baa2SNicolas Ferre * 491dc78baa2SNicolas Ferre * Queue chain if DMA engine is working already 492dc78baa2SNicolas Ferre * 493dc78baa2SNicolas Ferre * Cookie increment and adding to active_list or queue must be atomic 494dc78baa2SNicolas Ferre */ 495dc78baa2SNicolas Ferre static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx) 496dc78baa2SNicolas Ferre { 497dc78baa2SNicolas Ferre struct at_desc *desc = txd_to_at_desc(tx); 498dc78baa2SNicolas Ferre struct at_dma_chan *atchan = to_at_dma_chan(tx->chan); 499dc78baa2SNicolas Ferre dma_cookie_t cookie; 500dc78baa2SNicolas Ferre 501dc78baa2SNicolas Ferre spin_lock_bh(&atchan->lock); 502dc78baa2SNicolas Ferre cookie = atc_assign_cookie(atchan, desc); 503dc78baa2SNicolas Ferre 504dc78baa2SNicolas Ferre if (list_empty(&atchan->active_list)) { 505dc78baa2SNicolas Ferre dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n", 506dc78baa2SNicolas Ferre desc->txd.cookie); 507dc78baa2SNicolas Ferre atc_dostart(atchan, desc); 508dc78baa2SNicolas Ferre list_add_tail(&desc->desc_node, &atchan->active_list); 509dc78baa2SNicolas Ferre } else { 510dc78baa2SNicolas Ferre dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n", 511dc78baa2SNicolas Ferre desc->txd.cookie); 512dc78baa2SNicolas Ferre list_add_tail(&desc->desc_node, &atchan->queue); 513dc78baa2SNicolas Ferre } 514dc78baa2SNicolas Ferre 515dc78baa2SNicolas Ferre spin_unlock_bh(&atchan->lock); 516dc78baa2SNicolas Ferre 517dc78baa2SNicolas Ferre return cookie; 518dc78baa2SNicolas Ferre } 519dc78baa2SNicolas Ferre 520dc78baa2SNicolas Ferre /** 521dc78baa2SNicolas Ferre * atc_prep_dma_memcpy - prepare a memcpy operation 522dc78baa2SNicolas Ferre * @chan: the channel to prepare operation on 523dc78baa2SNicolas Ferre * @dest: operation virtual destination address 524dc78baa2SNicolas Ferre * @src: operation virtual source address 525dc78baa2SNicolas Ferre * @len: operation length 526dc78baa2SNicolas Ferre * @flags: tx descriptor status flags 527dc78baa2SNicolas Ferre */ 528dc78baa2SNicolas Ferre static struct dma_async_tx_descriptor * 529dc78baa2SNicolas Ferre atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 530dc78baa2SNicolas Ferre size_t len, unsigned long flags) 531dc78baa2SNicolas Ferre { 532dc78baa2SNicolas Ferre struct at_dma_chan *atchan = to_at_dma_chan(chan); 533dc78baa2SNicolas Ferre struct at_desc *desc = NULL; 534dc78baa2SNicolas Ferre struct at_desc *first = NULL; 535dc78baa2SNicolas Ferre struct at_desc *prev = NULL; 536dc78baa2SNicolas Ferre size_t xfer_count; 537dc78baa2SNicolas Ferre size_t offset; 538dc78baa2SNicolas Ferre unsigned int src_width; 539dc78baa2SNicolas Ferre unsigned int dst_width; 540dc78baa2SNicolas Ferre u32 ctrla; 541dc78baa2SNicolas Ferre u32 ctrlb; 542dc78baa2SNicolas Ferre 543dc78baa2SNicolas Ferre dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d0x%x s0x%x l0x%zx f0x%lx\n", 544dc78baa2SNicolas Ferre dest, src, len, flags); 545dc78baa2SNicolas Ferre 546dc78baa2SNicolas Ferre if (unlikely(!len)) { 547dc78baa2SNicolas Ferre dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); 548dc78baa2SNicolas Ferre return NULL; 549dc78baa2SNicolas Ferre } 550dc78baa2SNicolas Ferre 551dc78baa2SNicolas Ferre ctrla = ATC_DEFAULT_CTRLA; 552dc78baa2SNicolas Ferre ctrlb = ATC_DEFAULT_CTRLB 553dc78baa2SNicolas Ferre | ATC_SRC_ADDR_MODE_INCR 554dc78baa2SNicolas Ferre | ATC_DST_ADDR_MODE_INCR 555dc78baa2SNicolas Ferre | ATC_FC_MEM2MEM; 556dc78baa2SNicolas Ferre 557dc78baa2SNicolas Ferre /* 558dc78baa2SNicolas Ferre * We can be a lot more clever here, but this should take care 559dc78baa2SNicolas Ferre * of the most common optimization. 560dc78baa2SNicolas Ferre */ 561dc78baa2SNicolas Ferre if (!((src | dest | len) & 3)) { 562dc78baa2SNicolas Ferre ctrla |= ATC_SRC_WIDTH_WORD | ATC_DST_WIDTH_WORD; 563dc78baa2SNicolas Ferre src_width = dst_width = 2; 564dc78baa2SNicolas Ferre } else if (!((src | dest | len) & 1)) { 565dc78baa2SNicolas Ferre ctrla |= ATC_SRC_WIDTH_HALFWORD | ATC_DST_WIDTH_HALFWORD; 566dc78baa2SNicolas Ferre src_width = dst_width = 1; 567dc78baa2SNicolas Ferre } else { 568dc78baa2SNicolas Ferre ctrla |= ATC_SRC_WIDTH_BYTE | ATC_DST_WIDTH_BYTE; 569dc78baa2SNicolas Ferre src_width = dst_width = 0; 570dc78baa2SNicolas Ferre } 571dc78baa2SNicolas Ferre 572dc78baa2SNicolas Ferre for (offset = 0; offset < len; offset += xfer_count << src_width) { 573dc78baa2SNicolas Ferre xfer_count = min_t(size_t, (len - offset) >> src_width, 574dc78baa2SNicolas Ferre ATC_BTSIZE_MAX); 575dc78baa2SNicolas Ferre 576dc78baa2SNicolas Ferre desc = atc_desc_get(atchan); 577dc78baa2SNicolas Ferre if (!desc) 578dc78baa2SNicolas Ferre goto err_desc_get; 579dc78baa2SNicolas Ferre 580dc78baa2SNicolas Ferre desc->lli.saddr = src + offset; 581dc78baa2SNicolas Ferre desc->lli.daddr = dest + offset; 582dc78baa2SNicolas Ferre desc->lli.ctrla = ctrla | xfer_count; 583dc78baa2SNicolas Ferre desc->lli.ctrlb = ctrlb; 584dc78baa2SNicolas Ferre 585dc78baa2SNicolas Ferre desc->txd.cookie = 0; 586dc78baa2SNicolas Ferre async_tx_ack(&desc->txd); 587dc78baa2SNicolas Ferre 588dc78baa2SNicolas Ferre if (!first) { 589dc78baa2SNicolas Ferre first = desc; 590dc78baa2SNicolas Ferre } else { 591dc78baa2SNicolas Ferre /* inform the HW lli about chaining */ 592dc78baa2SNicolas Ferre prev->lli.dscr = desc->txd.phys; 593dc78baa2SNicolas Ferre /* insert the link descriptor to the LD ring */ 594dc78baa2SNicolas Ferre list_add_tail(&desc->desc_node, 595285a3c71SDan Williams &first->tx_list); 596dc78baa2SNicolas Ferre } 597dc78baa2SNicolas Ferre prev = desc; 598dc78baa2SNicolas Ferre } 599dc78baa2SNicolas Ferre 600dc78baa2SNicolas Ferre /* First descriptor of the chain embedds additional information */ 601dc78baa2SNicolas Ferre first->txd.cookie = -EBUSY; 602dc78baa2SNicolas Ferre first->len = len; 603dc78baa2SNicolas Ferre 604dc78baa2SNicolas Ferre /* set end-of-link to the last link descriptor of list*/ 605dc78baa2SNicolas Ferre set_desc_eol(desc); 606dc78baa2SNicolas Ferre 607dc78baa2SNicolas Ferre desc->txd.flags = flags; /* client is in control of this ack */ 608dc78baa2SNicolas Ferre 609dc78baa2SNicolas Ferre return &first->txd; 610dc78baa2SNicolas Ferre 611dc78baa2SNicolas Ferre err_desc_get: 612dc78baa2SNicolas Ferre atc_desc_put(atchan, first); 613dc78baa2SNicolas Ferre return NULL; 614dc78baa2SNicolas Ferre } 615dc78baa2SNicolas Ferre 616808347f6SNicolas Ferre 617808347f6SNicolas Ferre /** 618808347f6SNicolas Ferre * atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction 619808347f6SNicolas Ferre * @chan: DMA channel 620808347f6SNicolas Ferre * @sgl: scatterlist to transfer to/from 621808347f6SNicolas Ferre * @sg_len: number of entries in @scatterlist 622808347f6SNicolas Ferre * @direction: DMA direction 623808347f6SNicolas Ferre * @flags: tx descriptor status flags 624808347f6SNicolas Ferre */ 625808347f6SNicolas Ferre static struct dma_async_tx_descriptor * 626808347f6SNicolas Ferre atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, 627808347f6SNicolas Ferre unsigned int sg_len, enum dma_data_direction direction, 628808347f6SNicolas Ferre unsigned long flags) 629808347f6SNicolas Ferre { 630808347f6SNicolas Ferre struct at_dma_chan *atchan = to_at_dma_chan(chan); 631808347f6SNicolas Ferre struct at_dma_slave *atslave = chan->private; 632808347f6SNicolas Ferre struct at_desc *first = NULL; 633808347f6SNicolas Ferre struct at_desc *prev = NULL; 634808347f6SNicolas Ferre u32 ctrla; 635808347f6SNicolas Ferre u32 ctrlb; 636808347f6SNicolas Ferre dma_addr_t reg; 637808347f6SNicolas Ferre unsigned int reg_width; 638808347f6SNicolas Ferre unsigned int mem_width; 639808347f6SNicolas Ferre unsigned int i; 640808347f6SNicolas Ferre struct scatterlist *sg; 641808347f6SNicolas Ferre size_t total_len = 0; 642808347f6SNicolas Ferre 643808347f6SNicolas Ferre dev_vdbg(chan2dev(chan), "prep_slave_sg: %s f0x%lx\n", 644808347f6SNicolas Ferre direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE", 645808347f6SNicolas Ferre flags); 646808347f6SNicolas Ferre 647808347f6SNicolas Ferre if (unlikely(!atslave || !sg_len)) { 648808347f6SNicolas Ferre dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); 649808347f6SNicolas Ferre return NULL; 650808347f6SNicolas Ferre } 651808347f6SNicolas Ferre 652808347f6SNicolas Ferre reg_width = atslave->reg_width; 653808347f6SNicolas Ferre 654808347f6SNicolas Ferre ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla; 655808347f6SNicolas Ferre ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN; 656808347f6SNicolas Ferre 657808347f6SNicolas Ferre switch (direction) { 658808347f6SNicolas Ferre case DMA_TO_DEVICE: 659808347f6SNicolas Ferre ctrla |= ATC_DST_WIDTH(reg_width); 660808347f6SNicolas Ferre ctrlb |= ATC_DST_ADDR_MODE_FIXED 661808347f6SNicolas Ferre | ATC_SRC_ADDR_MODE_INCR 662808347f6SNicolas Ferre | ATC_FC_MEM2PER; 663808347f6SNicolas Ferre reg = atslave->tx_reg; 664808347f6SNicolas Ferre for_each_sg(sgl, sg, sg_len, i) { 665808347f6SNicolas Ferre struct at_desc *desc; 666808347f6SNicolas Ferre u32 len; 667808347f6SNicolas Ferre u32 mem; 668808347f6SNicolas Ferre 669808347f6SNicolas Ferre desc = atc_desc_get(atchan); 670808347f6SNicolas Ferre if (!desc) 671808347f6SNicolas Ferre goto err_desc_get; 672808347f6SNicolas Ferre 6730f70e8ceSNicolas Ferre mem = sg_dma_address(sg); 674808347f6SNicolas Ferre len = sg_dma_len(sg); 675808347f6SNicolas Ferre mem_width = 2; 676808347f6SNicolas Ferre if (unlikely(mem & 3 || len & 3)) 677808347f6SNicolas Ferre mem_width = 0; 678808347f6SNicolas Ferre 679808347f6SNicolas Ferre desc->lli.saddr = mem; 680808347f6SNicolas Ferre desc->lli.daddr = reg; 681808347f6SNicolas Ferre desc->lli.ctrla = ctrla 682808347f6SNicolas Ferre | ATC_SRC_WIDTH(mem_width) 683808347f6SNicolas Ferre | len >> mem_width; 684808347f6SNicolas Ferre desc->lli.ctrlb = ctrlb; 685808347f6SNicolas Ferre 686808347f6SNicolas Ferre if (!first) { 687808347f6SNicolas Ferre first = desc; 688808347f6SNicolas Ferre } else { 689808347f6SNicolas Ferre /* inform the HW lli about chaining */ 690808347f6SNicolas Ferre prev->lli.dscr = desc->txd.phys; 691808347f6SNicolas Ferre /* insert the link descriptor to the LD ring */ 692808347f6SNicolas Ferre list_add_tail(&desc->desc_node, 693285a3c71SDan Williams &first->tx_list); 694808347f6SNicolas Ferre } 695808347f6SNicolas Ferre prev = desc; 696808347f6SNicolas Ferre total_len += len; 697808347f6SNicolas Ferre } 698808347f6SNicolas Ferre break; 699808347f6SNicolas Ferre case DMA_FROM_DEVICE: 700808347f6SNicolas Ferre ctrla |= ATC_SRC_WIDTH(reg_width); 701808347f6SNicolas Ferre ctrlb |= ATC_DST_ADDR_MODE_INCR 702808347f6SNicolas Ferre | ATC_SRC_ADDR_MODE_FIXED 703808347f6SNicolas Ferre | ATC_FC_PER2MEM; 704808347f6SNicolas Ferre 705808347f6SNicolas Ferre reg = atslave->rx_reg; 706808347f6SNicolas Ferre for_each_sg(sgl, sg, sg_len, i) { 707808347f6SNicolas Ferre struct at_desc *desc; 708808347f6SNicolas Ferre u32 len; 709808347f6SNicolas Ferre u32 mem; 710808347f6SNicolas Ferre 711808347f6SNicolas Ferre desc = atc_desc_get(atchan); 712808347f6SNicolas Ferre if (!desc) 713808347f6SNicolas Ferre goto err_desc_get; 714808347f6SNicolas Ferre 7150f70e8ceSNicolas Ferre mem = sg_dma_address(sg); 716808347f6SNicolas Ferre len = sg_dma_len(sg); 717808347f6SNicolas Ferre mem_width = 2; 718808347f6SNicolas Ferre if (unlikely(mem & 3 || len & 3)) 719808347f6SNicolas Ferre mem_width = 0; 720808347f6SNicolas Ferre 721808347f6SNicolas Ferre desc->lli.saddr = reg; 722808347f6SNicolas Ferre desc->lli.daddr = mem; 723808347f6SNicolas Ferre desc->lli.ctrla = ctrla 724808347f6SNicolas Ferre | ATC_DST_WIDTH(mem_width) 725808347f6SNicolas Ferre | len >> mem_width; 726808347f6SNicolas Ferre desc->lli.ctrlb = ctrlb; 727808347f6SNicolas Ferre 728808347f6SNicolas Ferre if (!first) { 729808347f6SNicolas Ferre first = desc; 730808347f6SNicolas Ferre } else { 731808347f6SNicolas Ferre /* inform the HW lli about chaining */ 732808347f6SNicolas Ferre prev->lli.dscr = desc->txd.phys; 733808347f6SNicolas Ferre /* insert the link descriptor to the LD ring */ 734808347f6SNicolas Ferre list_add_tail(&desc->desc_node, 735285a3c71SDan Williams &first->tx_list); 736808347f6SNicolas Ferre } 737808347f6SNicolas Ferre prev = desc; 738808347f6SNicolas Ferre total_len += len; 739808347f6SNicolas Ferre } 740808347f6SNicolas Ferre break; 741808347f6SNicolas Ferre default: 742808347f6SNicolas Ferre return NULL; 743808347f6SNicolas Ferre } 744808347f6SNicolas Ferre 745808347f6SNicolas Ferre /* set end-of-link to the last link descriptor of list*/ 746808347f6SNicolas Ferre set_desc_eol(prev); 747808347f6SNicolas Ferre 748808347f6SNicolas Ferre /* First descriptor of the chain embedds additional information */ 749808347f6SNicolas Ferre first->txd.cookie = -EBUSY; 750808347f6SNicolas Ferre first->len = total_len; 751808347f6SNicolas Ferre 752808347f6SNicolas Ferre /* last link descriptor of list is responsible of flags */ 753808347f6SNicolas Ferre prev->txd.flags = flags; /* client is in control of this ack */ 754808347f6SNicolas Ferre 755808347f6SNicolas Ferre return &first->txd; 756808347f6SNicolas Ferre 757808347f6SNicolas Ferre err_desc_get: 758808347f6SNicolas Ferre dev_err(chan2dev(chan), "not enough descriptors available\n"); 759808347f6SNicolas Ferre atc_desc_put(atchan, first); 760808347f6SNicolas Ferre return NULL; 761808347f6SNicolas Ferre } 762808347f6SNicolas Ferre 76305827630SLinus Walleij static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 76405827630SLinus Walleij unsigned long arg) 765808347f6SNicolas Ferre { 766808347f6SNicolas Ferre struct at_dma_chan *atchan = to_at_dma_chan(chan); 767808347f6SNicolas Ferre struct at_dma *atdma = to_at_dma(chan->device); 768808347f6SNicolas Ferre struct at_desc *desc, *_desc; 769808347f6SNicolas Ferre LIST_HEAD(list); 770808347f6SNicolas Ferre 771c3635c78SLinus Walleij /* Only supports DMA_TERMINATE_ALL */ 772c3635c78SLinus Walleij if (cmd != DMA_TERMINATE_ALL) 773c3635c78SLinus Walleij return -ENXIO; 774c3635c78SLinus Walleij 775808347f6SNicolas Ferre /* 776808347f6SNicolas Ferre * This is only called when something went wrong elsewhere, so 777808347f6SNicolas Ferre * we don't really care about the data. Just disable the 778808347f6SNicolas Ferre * channel. We still have to poll the channel enable bit due 779808347f6SNicolas Ferre * to AHB/HSB limitations. 780808347f6SNicolas Ferre */ 781808347f6SNicolas Ferre spin_lock_bh(&atchan->lock); 782808347f6SNicolas Ferre 783808347f6SNicolas Ferre dma_writel(atdma, CHDR, atchan->mask); 784808347f6SNicolas Ferre 785808347f6SNicolas Ferre /* confirm that this channel is disabled */ 786808347f6SNicolas Ferre while (dma_readl(atdma, CHSR) & atchan->mask) 787808347f6SNicolas Ferre cpu_relax(); 788808347f6SNicolas Ferre 789808347f6SNicolas Ferre /* active_list entries will end up before queued entries */ 790808347f6SNicolas Ferre list_splice_init(&atchan->queue, &list); 791808347f6SNicolas Ferre list_splice_init(&atchan->active_list, &list); 792808347f6SNicolas Ferre 793808347f6SNicolas Ferre /* Flush all pending and queued descriptors */ 794808347f6SNicolas Ferre list_for_each_entry_safe(desc, _desc, &list, desc_node) 795808347f6SNicolas Ferre atc_chain_complete(atchan, desc); 796c3635c78SLinus Walleij 797b0ebeb9cSYong Wang spin_unlock_bh(&atchan->lock); 798b0ebeb9cSYong Wang 799c3635c78SLinus Walleij return 0; 800808347f6SNicolas Ferre } 801808347f6SNicolas Ferre 802dc78baa2SNicolas Ferre /** 80307934481SLinus Walleij * atc_tx_status - poll for transaction completion 804dc78baa2SNicolas Ferre * @chan: DMA channel 805dc78baa2SNicolas Ferre * @cookie: transaction identifier to check status of 80607934481SLinus Walleij * @txstate: if not %NULL updated with transaction state 807dc78baa2SNicolas Ferre * 80807934481SLinus Walleij * If @txstate is passed in, upon return it reflect the driver 809dc78baa2SNicolas Ferre * internal state and can be used with dma_async_is_complete() to check 810dc78baa2SNicolas Ferre * the status of multiple cookies without re-checking hardware state. 811dc78baa2SNicolas Ferre */ 812dc78baa2SNicolas Ferre static enum dma_status 81307934481SLinus Walleij atc_tx_status(struct dma_chan *chan, 814dc78baa2SNicolas Ferre dma_cookie_t cookie, 81507934481SLinus Walleij struct dma_tx_state *txstate) 816dc78baa2SNicolas Ferre { 817dc78baa2SNicolas Ferre struct at_dma_chan *atchan = to_at_dma_chan(chan); 818dc78baa2SNicolas Ferre dma_cookie_t last_used; 819dc78baa2SNicolas Ferre dma_cookie_t last_complete; 820dc78baa2SNicolas Ferre enum dma_status ret; 821dc78baa2SNicolas Ferre 8224297a462SNicolas Ferre spin_lock_bh(&atchan->lock); 823dc78baa2SNicolas Ferre 824dc78baa2SNicolas Ferre last_complete = atchan->completed_cookie; 825dc78baa2SNicolas Ferre last_used = chan->cookie; 826dc78baa2SNicolas Ferre 827dc78baa2SNicolas Ferre ret = dma_async_is_complete(cookie, last_complete, last_used); 828dc78baa2SNicolas Ferre if (ret != DMA_SUCCESS) { 829dc78baa2SNicolas Ferre atc_cleanup_descriptors(atchan); 830dc78baa2SNicolas Ferre 831dc78baa2SNicolas Ferre last_complete = atchan->completed_cookie; 832dc78baa2SNicolas Ferre last_used = chan->cookie; 833dc78baa2SNicolas Ferre 834dc78baa2SNicolas Ferre ret = dma_async_is_complete(cookie, last_complete, last_used); 835dc78baa2SNicolas Ferre } 836dc78baa2SNicolas Ferre 8374297a462SNicolas Ferre spin_unlock_bh(&atchan->lock); 838dc78baa2SNicolas Ferre 839bca34692SDan Williams dma_set_tx_state(txstate, last_complete, last_used, 0); 84007934481SLinus Walleij dev_vdbg(chan2dev(chan), "tx_status: %d (d%d, u%d)\n", 84107934481SLinus Walleij cookie, last_complete ? last_complete : 0, 84207934481SLinus Walleij last_used ? last_used : 0); 843dc78baa2SNicolas Ferre 844dc78baa2SNicolas Ferre return ret; 845dc78baa2SNicolas Ferre } 846dc78baa2SNicolas Ferre 847dc78baa2SNicolas Ferre /** 848dc78baa2SNicolas Ferre * atc_issue_pending - try to finish work 849dc78baa2SNicolas Ferre * @chan: target DMA channel 850dc78baa2SNicolas Ferre */ 851dc78baa2SNicolas Ferre static void atc_issue_pending(struct dma_chan *chan) 852dc78baa2SNicolas Ferre { 853dc78baa2SNicolas Ferre struct at_dma_chan *atchan = to_at_dma_chan(chan); 854dc78baa2SNicolas Ferre 855dc78baa2SNicolas Ferre dev_vdbg(chan2dev(chan), "issue_pending\n"); 856dc78baa2SNicolas Ferre 857dc78baa2SNicolas Ferre if (!atc_chan_is_enabled(atchan)) { 858dc78baa2SNicolas Ferre spin_lock_bh(&atchan->lock); 859dc78baa2SNicolas Ferre atc_advance_work(atchan); 860dc78baa2SNicolas Ferre spin_unlock_bh(&atchan->lock); 861dc78baa2SNicolas Ferre } 862dc78baa2SNicolas Ferre } 863dc78baa2SNicolas Ferre 864dc78baa2SNicolas Ferre /** 865dc78baa2SNicolas Ferre * atc_alloc_chan_resources - allocate resources for DMA channel 866dc78baa2SNicolas Ferre * @chan: allocate descriptor resources for this channel 867dc78baa2SNicolas Ferre * @client: current client requesting the channel be ready for requests 868dc78baa2SNicolas Ferre * 869dc78baa2SNicolas Ferre * return - the number of allocated descriptors 870dc78baa2SNicolas Ferre */ 871dc78baa2SNicolas Ferre static int atc_alloc_chan_resources(struct dma_chan *chan) 872dc78baa2SNicolas Ferre { 873dc78baa2SNicolas Ferre struct at_dma_chan *atchan = to_at_dma_chan(chan); 874dc78baa2SNicolas Ferre struct at_dma *atdma = to_at_dma(chan->device); 875dc78baa2SNicolas Ferre struct at_desc *desc; 876808347f6SNicolas Ferre struct at_dma_slave *atslave; 877dc78baa2SNicolas Ferre int i; 878808347f6SNicolas Ferre u32 cfg; 879dc78baa2SNicolas Ferre LIST_HEAD(tmp_list); 880dc78baa2SNicolas Ferre 881dc78baa2SNicolas Ferre dev_vdbg(chan2dev(chan), "alloc_chan_resources\n"); 882dc78baa2SNicolas Ferre 883dc78baa2SNicolas Ferre /* ASSERT: channel is idle */ 884dc78baa2SNicolas Ferre if (atc_chan_is_enabled(atchan)) { 885dc78baa2SNicolas Ferre dev_dbg(chan2dev(chan), "DMA channel not idle ?\n"); 886dc78baa2SNicolas Ferre return -EIO; 887dc78baa2SNicolas Ferre } 888dc78baa2SNicolas Ferre 889808347f6SNicolas Ferre cfg = ATC_DEFAULT_CFG; 890808347f6SNicolas Ferre 891808347f6SNicolas Ferre atslave = chan->private; 892808347f6SNicolas Ferre if (atslave) { 893808347f6SNicolas Ferre /* 894808347f6SNicolas Ferre * We need controller-specific data to set up slave 895808347f6SNicolas Ferre * transfers. 896808347f6SNicolas Ferre */ 897808347f6SNicolas Ferre BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev); 898808347f6SNicolas Ferre 899808347f6SNicolas Ferre /* if cfg configuration specified take it instad of default */ 900808347f6SNicolas Ferre if (atslave->cfg) 901808347f6SNicolas Ferre cfg = atslave->cfg; 902808347f6SNicolas Ferre } 903808347f6SNicolas Ferre 904808347f6SNicolas Ferre /* have we already been set up? 905808347f6SNicolas Ferre * reconfigure channel but no need to reallocate descriptors */ 906dc78baa2SNicolas Ferre if (!list_empty(&atchan->free_list)) 907dc78baa2SNicolas Ferre return atchan->descs_allocated; 908dc78baa2SNicolas Ferre 909dc78baa2SNicolas Ferre /* Allocate initial pool of descriptors */ 910dc78baa2SNicolas Ferre for (i = 0; i < init_nr_desc_per_channel; i++) { 911dc78baa2SNicolas Ferre desc = atc_alloc_descriptor(chan, GFP_KERNEL); 912dc78baa2SNicolas Ferre if (!desc) { 913dc78baa2SNicolas Ferre dev_err(atdma->dma_common.dev, 914dc78baa2SNicolas Ferre "Only %d initial descriptors\n", i); 915dc78baa2SNicolas Ferre break; 916dc78baa2SNicolas Ferre } 917dc78baa2SNicolas Ferre list_add_tail(&desc->desc_node, &tmp_list); 918dc78baa2SNicolas Ferre } 919dc78baa2SNicolas Ferre 920dc78baa2SNicolas Ferre spin_lock_bh(&atchan->lock); 921dc78baa2SNicolas Ferre atchan->descs_allocated = i; 922dc78baa2SNicolas Ferre list_splice(&tmp_list, &atchan->free_list); 923dc78baa2SNicolas Ferre atchan->completed_cookie = chan->cookie = 1; 924dc78baa2SNicolas Ferre spin_unlock_bh(&atchan->lock); 925dc78baa2SNicolas Ferre 926dc78baa2SNicolas Ferre /* channel parameters */ 927808347f6SNicolas Ferre channel_writel(atchan, CFG, cfg); 928dc78baa2SNicolas Ferre 929dc78baa2SNicolas Ferre dev_dbg(chan2dev(chan), 930dc78baa2SNicolas Ferre "alloc_chan_resources: allocated %d descriptors\n", 931dc78baa2SNicolas Ferre atchan->descs_allocated); 932dc78baa2SNicolas Ferre 933dc78baa2SNicolas Ferre return atchan->descs_allocated; 934dc78baa2SNicolas Ferre } 935dc78baa2SNicolas Ferre 936dc78baa2SNicolas Ferre /** 937dc78baa2SNicolas Ferre * atc_free_chan_resources - free all channel resources 938dc78baa2SNicolas Ferre * @chan: DMA channel 939dc78baa2SNicolas Ferre */ 940dc78baa2SNicolas Ferre static void atc_free_chan_resources(struct dma_chan *chan) 941dc78baa2SNicolas Ferre { 942dc78baa2SNicolas Ferre struct at_dma_chan *atchan = to_at_dma_chan(chan); 943dc78baa2SNicolas Ferre struct at_dma *atdma = to_at_dma(chan->device); 944dc78baa2SNicolas Ferre struct at_desc *desc, *_desc; 945dc78baa2SNicolas Ferre LIST_HEAD(list); 946dc78baa2SNicolas Ferre 947dc78baa2SNicolas Ferre dev_dbg(chan2dev(chan), "free_chan_resources: (descs allocated=%u)\n", 948dc78baa2SNicolas Ferre atchan->descs_allocated); 949dc78baa2SNicolas Ferre 950dc78baa2SNicolas Ferre /* ASSERT: channel is idle */ 951dc78baa2SNicolas Ferre BUG_ON(!list_empty(&atchan->active_list)); 952dc78baa2SNicolas Ferre BUG_ON(!list_empty(&atchan->queue)); 953dc78baa2SNicolas Ferre BUG_ON(atc_chan_is_enabled(atchan)); 954dc78baa2SNicolas Ferre 955dc78baa2SNicolas Ferre list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) { 956dc78baa2SNicolas Ferre dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); 957dc78baa2SNicolas Ferre list_del(&desc->desc_node); 958dc78baa2SNicolas Ferre /* free link descriptor */ 959dc78baa2SNicolas Ferre dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys); 960dc78baa2SNicolas Ferre } 961dc78baa2SNicolas Ferre list_splice_init(&atchan->free_list, &list); 962dc78baa2SNicolas Ferre atchan->descs_allocated = 0; 963dc78baa2SNicolas Ferre 964dc78baa2SNicolas Ferre dev_vdbg(chan2dev(chan), "free_chan_resources: done\n"); 965dc78baa2SNicolas Ferre } 966dc78baa2SNicolas Ferre 967dc78baa2SNicolas Ferre 968dc78baa2SNicolas Ferre /*-- Module Management -----------------------------------------------*/ 969dc78baa2SNicolas Ferre 970dc78baa2SNicolas Ferre /** 971dc78baa2SNicolas Ferre * at_dma_off - disable DMA controller 972dc78baa2SNicolas Ferre * @atdma: the Atmel HDAMC device 973dc78baa2SNicolas Ferre */ 974dc78baa2SNicolas Ferre static void at_dma_off(struct at_dma *atdma) 975dc78baa2SNicolas Ferre { 976dc78baa2SNicolas Ferre dma_writel(atdma, EN, 0); 977dc78baa2SNicolas Ferre 978dc78baa2SNicolas Ferre /* disable all interrupts */ 979dc78baa2SNicolas Ferre dma_writel(atdma, EBCIDR, -1L); 980dc78baa2SNicolas Ferre 981dc78baa2SNicolas Ferre /* confirm that all channels are disabled */ 982dc78baa2SNicolas Ferre while (dma_readl(atdma, CHSR) & atdma->all_chan_mask) 983dc78baa2SNicolas Ferre cpu_relax(); 984dc78baa2SNicolas Ferre } 985dc78baa2SNicolas Ferre 986dc78baa2SNicolas Ferre static int __init at_dma_probe(struct platform_device *pdev) 987dc78baa2SNicolas Ferre { 988dc78baa2SNicolas Ferre struct at_dma_platform_data *pdata; 989dc78baa2SNicolas Ferre struct resource *io; 990dc78baa2SNicolas Ferre struct at_dma *atdma; 991dc78baa2SNicolas Ferre size_t size; 992dc78baa2SNicolas Ferre int irq; 993dc78baa2SNicolas Ferre int err; 994dc78baa2SNicolas Ferre int i; 995dc78baa2SNicolas Ferre 996dc78baa2SNicolas Ferre /* get DMA Controller parameters from platform */ 997dc78baa2SNicolas Ferre pdata = pdev->dev.platform_data; 998dc78baa2SNicolas Ferre if (!pdata || pdata->nr_channels > AT_DMA_MAX_NR_CHANNELS) 999dc78baa2SNicolas Ferre return -EINVAL; 1000dc78baa2SNicolas Ferre 1001dc78baa2SNicolas Ferre io = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1002dc78baa2SNicolas Ferre if (!io) 1003dc78baa2SNicolas Ferre return -EINVAL; 1004dc78baa2SNicolas Ferre 1005dc78baa2SNicolas Ferre irq = platform_get_irq(pdev, 0); 1006dc78baa2SNicolas Ferre if (irq < 0) 1007dc78baa2SNicolas Ferre return irq; 1008dc78baa2SNicolas Ferre 1009dc78baa2SNicolas Ferre size = sizeof(struct at_dma); 1010dc78baa2SNicolas Ferre size += pdata->nr_channels * sizeof(struct at_dma_chan); 1011dc78baa2SNicolas Ferre atdma = kzalloc(size, GFP_KERNEL); 1012dc78baa2SNicolas Ferre if (!atdma) 1013dc78baa2SNicolas Ferre return -ENOMEM; 1014dc78baa2SNicolas Ferre 1015dc78baa2SNicolas Ferre /* discover transaction capabilites from the platform data */ 1016dc78baa2SNicolas Ferre atdma->dma_common.cap_mask = pdata->cap_mask; 1017dc78baa2SNicolas Ferre atdma->all_chan_mask = (1 << pdata->nr_channels) - 1; 1018dc78baa2SNicolas Ferre 1019dc78baa2SNicolas Ferre size = io->end - io->start + 1; 1020dc78baa2SNicolas Ferre if (!request_mem_region(io->start, size, pdev->dev.driver->name)) { 1021dc78baa2SNicolas Ferre err = -EBUSY; 1022dc78baa2SNicolas Ferre goto err_kfree; 1023dc78baa2SNicolas Ferre } 1024dc78baa2SNicolas Ferre 1025dc78baa2SNicolas Ferre atdma->regs = ioremap(io->start, size); 1026dc78baa2SNicolas Ferre if (!atdma->regs) { 1027dc78baa2SNicolas Ferre err = -ENOMEM; 1028dc78baa2SNicolas Ferre goto err_release_r; 1029dc78baa2SNicolas Ferre } 1030dc78baa2SNicolas Ferre 1031dc78baa2SNicolas Ferre atdma->clk = clk_get(&pdev->dev, "dma_clk"); 1032dc78baa2SNicolas Ferre if (IS_ERR(atdma->clk)) { 1033dc78baa2SNicolas Ferre err = PTR_ERR(atdma->clk); 1034dc78baa2SNicolas Ferre goto err_clk; 1035dc78baa2SNicolas Ferre } 1036dc78baa2SNicolas Ferre clk_enable(atdma->clk); 1037dc78baa2SNicolas Ferre 1038dc78baa2SNicolas Ferre /* force dma off, just in case */ 1039dc78baa2SNicolas Ferre at_dma_off(atdma); 1040dc78baa2SNicolas Ferre 1041dc78baa2SNicolas Ferre err = request_irq(irq, at_dma_interrupt, 0, "at_hdmac", atdma); 1042dc78baa2SNicolas Ferre if (err) 1043dc78baa2SNicolas Ferre goto err_irq; 1044dc78baa2SNicolas Ferre 1045dc78baa2SNicolas Ferre platform_set_drvdata(pdev, atdma); 1046dc78baa2SNicolas Ferre 1047dc78baa2SNicolas Ferre /* create a pool of consistent memory blocks for hardware descriptors */ 1048dc78baa2SNicolas Ferre atdma->dma_desc_pool = dma_pool_create("at_hdmac_desc_pool", 1049dc78baa2SNicolas Ferre &pdev->dev, sizeof(struct at_desc), 1050dc78baa2SNicolas Ferre 4 /* word alignment */, 0); 1051dc78baa2SNicolas Ferre if (!atdma->dma_desc_pool) { 1052dc78baa2SNicolas Ferre dev_err(&pdev->dev, "No memory for descriptors dma pool\n"); 1053dc78baa2SNicolas Ferre err = -ENOMEM; 1054dc78baa2SNicolas Ferre goto err_pool_create; 1055dc78baa2SNicolas Ferre } 1056dc78baa2SNicolas Ferre 1057dc78baa2SNicolas Ferre /* clear any pending interrupt */ 1058dc78baa2SNicolas Ferre while (dma_readl(atdma, EBCISR)) 1059dc78baa2SNicolas Ferre cpu_relax(); 1060dc78baa2SNicolas Ferre 1061dc78baa2SNicolas Ferre /* initialize channels related values */ 1062dc78baa2SNicolas Ferre INIT_LIST_HEAD(&atdma->dma_common.channels); 1063dc78baa2SNicolas Ferre for (i = 0; i < pdata->nr_channels; i++, atdma->dma_common.chancnt++) { 1064dc78baa2SNicolas Ferre struct at_dma_chan *atchan = &atdma->chan[i]; 1065dc78baa2SNicolas Ferre 1066dc78baa2SNicolas Ferre atchan->chan_common.device = &atdma->dma_common; 1067dc78baa2SNicolas Ferre atchan->chan_common.cookie = atchan->completed_cookie = 1; 1068dc78baa2SNicolas Ferre atchan->chan_common.chan_id = i; 1069dc78baa2SNicolas Ferre list_add_tail(&atchan->chan_common.device_node, 1070dc78baa2SNicolas Ferre &atdma->dma_common.channels); 1071dc78baa2SNicolas Ferre 1072dc78baa2SNicolas Ferre atchan->ch_regs = atdma->regs + ch_regs(i); 1073dc78baa2SNicolas Ferre spin_lock_init(&atchan->lock); 1074dc78baa2SNicolas Ferre atchan->mask = 1 << i; 1075dc78baa2SNicolas Ferre 1076dc78baa2SNicolas Ferre INIT_LIST_HEAD(&atchan->active_list); 1077dc78baa2SNicolas Ferre INIT_LIST_HEAD(&atchan->queue); 1078dc78baa2SNicolas Ferre INIT_LIST_HEAD(&atchan->free_list); 1079dc78baa2SNicolas Ferre 1080dc78baa2SNicolas Ferre tasklet_init(&atchan->tasklet, atc_tasklet, 1081dc78baa2SNicolas Ferre (unsigned long)atchan); 1082dc78baa2SNicolas Ferre atc_enable_irq(atchan); 1083dc78baa2SNicolas Ferre } 1084dc78baa2SNicolas Ferre 1085dc78baa2SNicolas Ferre /* set base routines */ 1086dc78baa2SNicolas Ferre atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources; 1087dc78baa2SNicolas Ferre atdma->dma_common.device_free_chan_resources = atc_free_chan_resources; 108807934481SLinus Walleij atdma->dma_common.device_tx_status = atc_tx_status; 1089dc78baa2SNicolas Ferre atdma->dma_common.device_issue_pending = atc_issue_pending; 1090dc78baa2SNicolas Ferre atdma->dma_common.dev = &pdev->dev; 1091dc78baa2SNicolas Ferre 1092dc78baa2SNicolas Ferre /* set prep routines based on capability */ 1093dc78baa2SNicolas Ferre if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask)) 1094dc78baa2SNicolas Ferre atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy; 1095dc78baa2SNicolas Ferre 1096808347f6SNicolas Ferre if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) { 1097808347f6SNicolas Ferre atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg; 1098c3635c78SLinus Walleij atdma->dma_common.device_control = atc_control; 1099808347f6SNicolas Ferre } 1100808347f6SNicolas Ferre 1101dc78baa2SNicolas Ferre dma_writel(atdma, EN, AT_DMA_ENABLE); 1102dc78baa2SNicolas Ferre 1103dc78baa2SNicolas Ferre dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n", 1104dc78baa2SNicolas Ferre dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "", 1105dc78baa2SNicolas Ferre dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "", 1106dc78baa2SNicolas Ferre atdma->dma_common.chancnt); 1107dc78baa2SNicolas Ferre 1108dc78baa2SNicolas Ferre dma_async_device_register(&atdma->dma_common); 1109dc78baa2SNicolas Ferre 1110dc78baa2SNicolas Ferre return 0; 1111dc78baa2SNicolas Ferre 1112dc78baa2SNicolas Ferre err_pool_create: 1113dc78baa2SNicolas Ferre platform_set_drvdata(pdev, NULL); 1114dc78baa2SNicolas Ferre free_irq(platform_get_irq(pdev, 0), atdma); 1115dc78baa2SNicolas Ferre err_irq: 1116dc78baa2SNicolas Ferre clk_disable(atdma->clk); 1117dc78baa2SNicolas Ferre clk_put(atdma->clk); 1118dc78baa2SNicolas Ferre err_clk: 1119dc78baa2SNicolas Ferre iounmap(atdma->regs); 1120dc78baa2SNicolas Ferre atdma->regs = NULL; 1121dc78baa2SNicolas Ferre err_release_r: 1122dc78baa2SNicolas Ferre release_mem_region(io->start, size); 1123dc78baa2SNicolas Ferre err_kfree: 1124dc78baa2SNicolas Ferre kfree(atdma); 1125dc78baa2SNicolas Ferre return err; 1126dc78baa2SNicolas Ferre } 1127dc78baa2SNicolas Ferre 1128dc78baa2SNicolas Ferre static int __exit at_dma_remove(struct platform_device *pdev) 1129dc78baa2SNicolas Ferre { 1130dc78baa2SNicolas Ferre struct at_dma *atdma = platform_get_drvdata(pdev); 1131dc78baa2SNicolas Ferre struct dma_chan *chan, *_chan; 1132dc78baa2SNicolas Ferre struct resource *io; 1133dc78baa2SNicolas Ferre 1134dc78baa2SNicolas Ferre at_dma_off(atdma); 1135dc78baa2SNicolas Ferre dma_async_device_unregister(&atdma->dma_common); 1136dc78baa2SNicolas Ferre 1137dc78baa2SNicolas Ferre dma_pool_destroy(atdma->dma_desc_pool); 1138dc78baa2SNicolas Ferre platform_set_drvdata(pdev, NULL); 1139dc78baa2SNicolas Ferre free_irq(platform_get_irq(pdev, 0), atdma); 1140dc78baa2SNicolas Ferre 1141dc78baa2SNicolas Ferre list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, 1142dc78baa2SNicolas Ferre device_node) { 1143dc78baa2SNicolas Ferre struct at_dma_chan *atchan = to_at_dma_chan(chan); 1144dc78baa2SNicolas Ferre 1145dc78baa2SNicolas Ferre /* Disable interrupts */ 1146dc78baa2SNicolas Ferre atc_disable_irq(atchan); 1147dc78baa2SNicolas Ferre tasklet_disable(&atchan->tasklet); 1148dc78baa2SNicolas Ferre 1149dc78baa2SNicolas Ferre tasklet_kill(&atchan->tasklet); 1150dc78baa2SNicolas Ferre list_del(&chan->device_node); 1151dc78baa2SNicolas Ferre } 1152dc78baa2SNicolas Ferre 1153dc78baa2SNicolas Ferre clk_disable(atdma->clk); 1154dc78baa2SNicolas Ferre clk_put(atdma->clk); 1155dc78baa2SNicolas Ferre 1156dc78baa2SNicolas Ferre iounmap(atdma->regs); 1157dc78baa2SNicolas Ferre atdma->regs = NULL; 1158dc78baa2SNicolas Ferre 1159dc78baa2SNicolas Ferre io = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1160dc78baa2SNicolas Ferre release_mem_region(io->start, io->end - io->start + 1); 1161dc78baa2SNicolas Ferre 1162dc78baa2SNicolas Ferre kfree(atdma); 1163dc78baa2SNicolas Ferre 1164dc78baa2SNicolas Ferre return 0; 1165dc78baa2SNicolas Ferre } 1166dc78baa2SNicolas Ferre 1167dc78baa2SNicolas Ferre static void at_dma_shutdown(struct platform_device *pdev) 1168dc78baa2SNicolas Ferre { 1169dc78baa2SNicolas Ferre struct at_dma *atdma = platform_get_drvdata(pdev); 1170dc78baa2SNicolas Ferre 1171dc78baa2SNicolas Ferre at_dma_off(platform_get_drvdata(pdev)); 1172dc78baa2SNicolas Ferre clk_disable(atdma->clk); 1173dc78baa2SNicolas Ferre } 1174dc78baa2SNicolas Ferre 117533f82d14SDan Williams static int at_dma_suspend_noirq(struct device *dev) 1176dc78baa2SNicolas Ferre { 117733f82d14SDan Williams struct platform_device *pdev = to_platform_device(dev); 1178dc78baa2SNicolas Ferre struct at_dma *atdma = platform_get_drvdata(pdev); 1179dc78baa2SNicolas Ferre 1180dc78baa2SNicolas Ferre at_dma_off(platform_get_drvdata(pdev)); 1181dc78baa2SNicolas Ferre clk_disable(atdma->clk); 1182dc78baa2SNicolas Ferre return 0; 1183dc78baa2SNicolas Ferre } 1184dc78baa2SNicolas Ferre 118533f82d14SDan Williams static int at_dma_resume_noirq(struct device *dev) 1186dc78baa2SNicolas Ferre { 118733f82d14SDan Williams struct platform_device *pdev = to_platform_device(dev); 1188dc78baa2SNicolas Ferre struct at_dma *atdma = platform_get_drvdata(pdev); 1189dc78baa2SNicolas Ferre 1190dc78baa2SNicolas Ferre clk_enable(atdma->clk); 1191dc78baa2SNicolas Ferre dma_writel(atdma, EN, AT_DMA_ENABLE); 1192dc78baa2SNicolas Ferre return 0; 1193dc78baa2SNicolas Ferre } 1194dc78baa2SNicolas Ferre 119547145210SAlexey Dobriyan static const struct dev_pm_ops at_dma_dev_pm_ops = { 119633f82d14SDan Williams .suspend_noirq = at_dma_suspend_noirq, 119733f82d14SDan Williams .resume_noirq = at_dma_resume_noirq, 119833f82d14SDan Williams }; 119933f82d14SDan Williams 1200dc78baa2SNicolas Ferre static struct platform_driver at_dma_driver = { 1201dc78baa2SNicolas Ferre .remove = __exit_p(at_dma_remove), 1202dc78baa2SNicolas Ferre .shutdown = at_dma_shutdown, 1203dc78baa2SNicolas Ferre .driver = { 1204dc78baa2SNicolas Ferre .name = "at_hdmac", 120533f82d14SDan Williams .pm = &at_dma_dev_pm_ops, 1206dc78baa2SNicolas Ferre }, 1207dc78baa2SNicolas Ferre }; 1208dc78baa2SNicolas Ferre 1209dc78baa2SNicolas Ferre static int __init at_dma_init(void) 1210dc78baa2SNicolas Ferre { 1211dc78baa2SNicolas Ferre return platform_driver_probe(&at_dma_driver, at_dma_probe); 1212dc78baa2SNicolas Ferre } 1213dc78baa2SNicolas Ferre module_init(at_dma_init); 1214dc78baa2SNicolas Ferre 1215dc78baa2SNicolas Ferre static void __exit at_dma_exit(void) 1216dc78baa2SNicolas Ferre { 1217dc78baa2SNicolas Ferre platform_driver_unregister(&at_dma_driver); 1218dc78baa2SNicolas Ferre } 1219dc78baa2SNicolas Ferre module_exit(at_dma_exit); 1220dc78baa2SNicolas Ferre 1221dc78baa2SNicolas Ferre MODULE_DESCRIPTION("Atmel AHB DMA Controller driver"); 1222dc78baa2SNicolas Ferre MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>"); 1223dc78baa2SNicolas Ferre MODULE_LICENSE("GPL"); 1224dc78baa2SNicolas Ferre MODULE_ALIAS("platform:at_hdmac"); 1225