1dc78baa2SNicolas Ferre /* 2dc78baa2SNicolas Ferre * Driver for the Atmel AHB DMA Controller (aka HDMA or DMAC on AT91 systems) 3dc78baa2SNicolas Ferre * 4dc78baa2SNicolas Ferre * Copyright (C) 2008 Atmel Corporation 5dc78baa2SNicolas Ferre * 6dc78baa2SNicolas Ferre * This program is free software; you can redistribute it and/or modify 7dc78baa2SNicolas Ferre * it under the terms of the GNU General Public License as published by 8dc78baa2SNicolas Ferre * the Free Software Foundation; either version 2 of the License, or 9dc78baa2SNicolas Ferre * (at your option) any later version. 10dc78baa2SNicolas Ferre * 11dc78baa2SNicolas Ferre * 129102d871SNicolas Ferre * This supports the Atmel AHB DMA Controller found in several Atmel SoCs. 139102d871SNicolas Ferre * The only Atmel DMA Controller that is not covered by this driver is the one 149102d871SNicolas Ferre * found on AT91SAM9263. 15dc78baa2SNicolas Ferre */ 16dc78baa2SNicolas Ferre 1762971b29SLudovic Desroches #include <dt-bindings/dma/at91.h> 18dc78baa2SNicolas Ferre #include <linux/clk.h> 19dc78baa2SNicolas Ferre #include <linux/dmaengine.h> 20dc78baa2SNicolas Ferre #include <linux/dma-mapping.h> 21dc78baa2SNicolas Ferre #include <linux/dmapool.h> 22dc78baa2SNicolas Ferre #include <linux/interrupt.h> 23dc78baa2SNicolas Ferre #include <linux/module.h> 24dc78baa2SNicolas Ferre #include <linux/platform_device.h> 255a0e3ad6STejun Heo #include <linux/slab.h> 26c5115953SNicolas Ferre #include <linux/of.h> 27c5115953SNicolas Ferre #include <linux/of_device.h> 28bbe89c8eSLudovic Desroches #include <linux/of_dma.h> 29dc78baa2SNicolas Ferre 30dc78baa2SNicolas Ferre #include "at_hdmac_regs.h" 31d2ebfb33SRussell King - ARM Linux #include "dmaengine.h" 32dc78baa2SNicolas Ferre 33dc78baa2SNicolas Ferre /* 34dc78baa2SNicolas Ferre * Glossary 35dc78baa2SNicolas Ferre * -------- 36dc78baa2SNicolas Ferre * 37dc78baa2SNicolas Ferre * at_hdmac : Name of the ATmel AHB DMA Controller 38dc78baa2SNicolas Ferre * at_dma_ / atdma : ATmel DMA controller entity related 39dc78baa2SNicolas Ferre * atc_ / atchan : ATmel DMA Channel entity related 40dc78baa2SNicolas Ferre */ 41dc78baa2SNicolas Ferre 42dc78baa2SNicolas Ferre #define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO) 43ae14d4b5SNicolas Ferre #define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \ 44ae14d4b5SNicolas Ferre |ATC_DIF(AT_DMA_MEM_IF)) 45816070edSLudovic Desroches #define ATC_DMA_BUSWIDTHS\ 46816070edSLudovic Desroches (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\ 47816070edSLudovic Desroches BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\ 48816070edSLudovic Desroches BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\ 49816070edSLudovic Desroches BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) 50dc78baa2SNicolas Ferre 51dc78baa2SNicolas Ferre /* 52dc78baa2SNicolas Ferre * Initial number of descriptors to allocate for each channel. This could 53dc78baa2SNicolas Ferre * be increased during dma usage. 54dc78baa2SNicolas Ferre */ 55dc78baa2SNicolas Ferre static unsigned int init_nr_desc_per_channel = 64; 56dc78baa2SNicolas Ferre module_param(init_nr_desc_per_channel, uint, 0644); 57dc78baa2SNicolas Ferre MODULE_PARM_DESC(init_nr_desc_per_channel, 58dc78baa2SNicolas Ferre "initial descriptors per channel (default: 64)"); 59dc78baa2SNicolas Ferre 60dc78baa2SNicolas Ferre 61dc78baa2SNicolas Ferre /* prototypes */ 62dc78baa2SNicolas Ferre static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx); 63d48de6f1SElen Song static void atc_issue_pending(struct dma_chan *chan); 64dc78baa2SNicolas Ferre 65dc78baa2SNicolas Ferre 66dc78baa2SNicolas Ferre /*----------------------------------------------------------------------*/ 67dc78baa2SNicolas Ferre 68265567fbSTorsten Fleischer static inline unsigned int atc_get_xfer_width(dma_addr_t src, dma_addr_t dst, 69265567fbSTorsten Fleischer size_t len) 70265567fbSTorsten Fleischer { 71265567fbSTorsten Fleischer unsigned int width; 72265567fbSTorsten Fleischer 73265567fbSTorsten Fleischer if (!((src | dst | len) & 3)) 74265567fbSTorsten Fleischer width = 2; 75265567fbSTorsten Fleischer else if (!((src | dst | len) & 1)) 76265567fbSTorsten Fleischer width = 1; 77265567fbSTorsten Fleischer else 78265567fbSTorsten Fleischer width = 0; 79265567fbSTorsten Fleischer 80265567fbSTorsten Fleischer return width; 81265567fbSTorsten Fleischer } 82265567fbSTorsten Fleischer 83dc78baa2SNicolas Ferre static struct at_desc *atc_first_active(struct at_dma_chan *atchan) 84dc78baa2SNicolas Ferre { 85dc78baa2SNicolas Ferre return list_first_entry(&atchan->active_list, 86dc78baa2SNicolas Ferre struct at_desc, desc_node); 87dc78baa2SNicolas Ferre } 88dc78baa2SNicolas Ferre 89dc78baa2SNicolas Ferre static struct at_desc *atc_first_queued(struct at_dma_chan *atchan) 90dc78baa2SNicolas Ferre { 91dc78baa2SNicolas Ferre return list_first_entry(&atchan->queue, 92dc78baa2SNicolas Ferre struct at_desc, desc_node); 93dc78baa2SNicolas Ferre } 94dc78baa2SNicolas Ferre 95dc78baa2SNicolas Ferre /** 96421f91d2SUwe Kleine-König * atc_alloc_descriptor - allocate and return an initialized descriptor 97dc78baa2SNicolas Ferre * @chan: the channel to allocate descriptors for 98dc78baa2SNicolas Ferre * @gfp_flags: GFP allocation flags 99dc78baa2SNicolas Ferre * 100dc78baa2SNicolas Ferre * Note: The ack-bit is positioned in the descriptor flag at creation time 101dc78baa2SNicolas Ferre * to make initial allocation more convenient. This bit will be cleared 102dc78baa2SNicolas Ferre * and control will be given to client at usage time (during 103dc78baa2SNicolas Ferre * preparation functions). 104dc78baa2SNicolas Ferre */ 105dc78baa2SNicolas Ferre static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan, 106dc78baa2SNicolas Ferre gfp_t gfp_flags) 107dc78baa2SNicolas Ferre { 108dc78baa2SNicolas Ferre struct at_desc *desc = NULL; 109dc78baa2SNicolas Ferre struct at_dma *atdma = to_at_dma(chan->device); 110dc78baa2SNicolas Ferre dma_addr_t phys; 111dc78baa2SNicolas Ferre 112dc78baa2SNicolas Ferre desc = dma_pool_alloc(atdma->dma_desc_pool, gfp_flags, &phys); 113dc78baa2SNicolas Ferre if (desc) { 114dc78baa2SNicolas Ferre memset(desc, 0, sizeof(struct at_desc)); 115285a3c71SDan Williams INIT_LIST_HEAD(&desc->tx_list); 116dc78baa2SNicolas Ferre dma_async_tx_descriptor_init(&desc->txd, chan); 117dc78baa2SNicolas Ferre /* txd.flags will be overwritten in prep functions */ 118dc78baa2SNicolas Ferre desc->txd.flags = DMA_CTRL_ACK; 119dc78baa2SNicolas Ferre desc->txd.tx_submit = atc_tx_submit; 120dc78baa2SNicolas Ferre desc->txd.phys = phys; 121dc78baa2SNicolas Ferre } 122dc78baa2SNicolas Ferre 123dc78baa2SNicolas Ferre return desc; 124dc78baa2SNicolas Ferre } 125dc78baa2SNicolas Ferre 126dc78baa2SNicolas Ferre /** 127af901ca1SAndré Goddard Rosa * atc_desc_get - get an unused descriptor from free_list 128dc78baa2SNicolas Ferre * @atchan: channel we want a new descriptor for 129dc78baa2SNicolas Ferre */ 130dc78baa2SNicolas Ferre static struct at_desc *atc_desc_get(struct at_dma_chan *atchan) 131dc78baa2SNicolas Ferre { 132dc78baa2SNicolas Ferre struct at_desc *desc, *_desc; 133dc78baa2SNicolas Ferre struct at_desc *ret = NULL; 134d8cb04b0SNicolas Ferre unsigned long flags; 135dc78baa2SNicolas Ferre unsigned int i = 0; 136dc78baa2SNicolas Ferre LIST_HEAD(tmp_list); 137dc78baa2SNicolas Ferre 138d8cb04b0SNicolas Ferre spin_lock_irqsave(&atchan->lock, flags); 139dc78baa2SNicolas Ferre list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) { 140dc78baa2SNicolas Ferre i++; 141dc78baa2SNicolas Ferre if (async_tx_test_ack(&desc->txd)) { 142dc78baa2SNicolas Ferre list_del(&desc->desc_node); 143dc78baa2SNicolas Ferre ret = desc; 144dc78baa2SNicolas Ferre break; 145dc78baa2SNicolas Ferre } 146dc78baa2SNicolas Ferre dev_dbg(chan2dev(&atchan->chan_common), 147dc78baa2SNicolas Ferre "desc %p not ACKed\n", desc); 148dc78baa2SNicolas Ferre } 149d8cb04b0SNicolas Ferre spin_unlock_irqrestore(&atchan->lock, flags); 150dc78baa2SNicolas Ferre dev_vdbg(chan2dev(&atchan->chan_common), 151dc78baa2SNicolas Ferre "scanned %u descriptors on freelist\n", i); 152dc78baa2SNicolas Ferre 153dc78baa2SNicolas Ferre /* no more descriptor available in initial pool: create one more */ 154dc78baa2SNicolas Ferre if (!ret) { 155dc78baa2SNicolas Ferre ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC); 156dc78baa2SNicolas Ferre if (ret) { 157d8cb04b0SNicolas Ferre spin_lock_irqsave(&atchan->lock, flags); 158dc78baa2SNicolas Ferre atchan->descs_allocated++; 159d8cb04b0SNicolas Ferre spin_unlock_irqrestore(&atchan->lock, flags); 160dc78baa2SNicolas Ferre } else { 161dc78baa2SNicolas Ferre dev_err(chan2dev(&atchan->chan_common), 162dc78baa2SNicolas Ferre "not enough descriptors available\n"); 163dc78baa2SNicolas Ferre } 164dc78baa2SNicolas Ferre } 165dc78baa2SNicolas Ferre 166dc78baa2SNicolas Ferre return ret; 167dc78baa2SNicolas Ferre } 168dc78baa2SNicolas Ferre 169dc78baa2SNicolas Ferre /** 170dc78baa2SNicolas Ferre * atc_desc_put - move a descriptor, including any children, to the free list 171dc78baa2SNicolas Ferre * @atchan: channel we work on 172dc78baa2SNicolas Ferre * @desc: descriptor, at the head of a chain, to move to free list 173dc78baa2SNicolas Ferre */ 174dc78baa2SNicolas Ferre static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc) 175dc78baa2SNicolas Ferre { 176dc78baa2SNicolas Ferre if (desc) { 177dc78baa2SNicolas Ferre struct at_desc *child; 178d8cb04b0SNicolas Ferre unsigned long flags; 179dc78baa2SNicolas Ferre 180d8cb04b0SNicolas Ferre spin_lock_irqsave(&atchan->lock, flags); 181285a3c71SDan Williams list_for_each_entry(child, &desc->tx_list, desc_node) 182dc78baa2SNicolas Ferre dev_vdbg(chan2dev(&atchan->chan_common), 183dc78baa2SNicolas Ferre "moving child desc %p to freelist\n", 184dc78baa2SNicolas Ferre child); 185285a3c71SDan Williams list_splice_init(&desc->tx_list, &atchan->free_list); 186dc78baa2SNicolas Ferre dev_vdbg(chan2dev(&atchan->chan_common), 187dc78baa2SNicolas Ferre "moving desc %p to freelist\n", desc); 188dc78baa2SNicolas Ferre list_add(&desc->desc_node, &atchan->free_list); 189d8cb04b0SNicolas Ferre spin_unlock_irqrestore(&atchan->lock, flags); 190dc78baa2SNicolas Ferre } 191dc78baa2SNicolas Ferre } 192dc78baa2SNicolas Ferre 193dc78baa2SNicolas Ferre /** 194d73111c6SMasanari Iida * atc_desc_chain - build chain adding a descriptor 195d73111c6SMasanari Iida * @first: address of first descriptor of the chain 196d73111c6SMasanari Iida * @prev: address of previous descriptor of the chain 19753830cc7SNicolas Ferre * @desc: descriptor to queue 19853830cc7SNicolas Ferre * 19953830cc7SNicolas Ferre * Called from prep_* functions 20053830cc7SNicolas Ferre */ 20153830cc7SNicolas Ferre static void atc_desc_chain(struct at_desc **first, struct at_desc **prev, 20253830cc7SNicolas Ferre struct at_desc *desc) 20353830cc7SNicolas Ferre { 20453830cc7SNicolas Ferre if (!(*first)) { 20553830cc7SNicolas Ferre *first = desc; 20653830cc7SNicolas Ferre } else { 20753830cc7SNicolas Ferre /* inform the HW lli about chaining */ 20853830cc7SNicolas Ferre (*prev)->lli.dscr = desc->txd.phys; 20953830cc7SNicolas Ferre /* insert the link descriptor to the LD ring */ 21053830cc7SNicolas Ferre list_add_tail(&desc->desc_node, 21153830cc7SNicolas Ferre &(*first)->tx_list); 21253830cc7SNicolas Ferre } 21353830cc7SNicolas Ferre *prev = desc; 21453830cc7SNicolas Ferre } 21553830cc7SNicolas Ferre 21653830cc7SNicolas Ferre /** 217dc78baa2SNicolas Ferre * atc_dostart - starts the DMA engine for real 218dc78baa2SNicolas Ferre * @atchan: the channel we want to start 219dc78baa2SNicolas Ferre * @first: first descriptor in the list we want to begin with 220dc78baa2SNicolas Ferre * 221dc78baa2SNicolas Ferre * Called with atchan->lock held and bh disabled 222dc78baa2SNicolas Ferre */ 223dc78baa2SNicolas Ferre static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first) 224dc78baa2SNicolas Ferre { 225dc78baa2SNicolas Ferre struct at_dma *atdma = to_at_dma(atchan->chan_common.device); 226dc78baa2SNicolas Ferre 227dc78baa2SNicolas Ferre /* ASSERT: channel is idle */ 228dc78baa2SNicolas Ferre if (atc_chan_is_enabled(atchan)) { 229dc78baa2SNicolas Ferre dev_err(chan2dev(&atchan->chan_common), 230dc78baa2SNicolas Ferre "BUG: Attempted to start non-idle channel\n"); 231dc78baa2SNicolas Ferre dev_err(chan2dev(&atchan->chan_common), 232dc78baa2SNicolas Ferre " channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n", 233dc78baa2SNicolas Ferre channel_readl(atchan, SADDR), 234dc78baa2SNicolas Ferre channel_readl(atchan, DADDR), 235dc78baa2SNicolas Ferre channel_readl(atchan, CTRLA), 236dc78baa2SNicolas Ferre channel_readl(atchan, CTRLB), 237dc78baa2SNicolas Ferre channel_readl(atchan, DSCR)); 238dc78baa2SNicolas Ferre 239dc78baa2SNicolas Ferre /* The tasklet will hopefully advance the queue... */ 240dc78baa2SNicolas Ferre return; 241dc78baa2SNicolas Ferre } 242dc78baa2SNicolas Ferre 243dc78baa2SNicolas Ferre vdbg_dump_regs(atchan); 244dc78baa2SNicolas Ferre 245dc78baa2SNicolas Ferre channel_writel(atchan, SADDR, 0); 246dc78baa2SNicolas Ferre channel_writel(atchan, DADDR, 0); 247dc78baa2SNicolas Ferre channel_writel(atchan, CTRLA, 0); 248dc78baa2SNicolas Ferre channel_writel(atchan, CTRLB, 0); 249dc78baa2SNicolas Ferre channel_writel(atchan, DSCR, first->txd.phys); 2505abecfa5SMaxime Ripard channel_writel(atchan, SPIP, ATC_SPIP_HOLE(first->src_hole) | 2515abecfa5SMaxime Ripard ATC_SPIP_BOUNDARY(first->boundary)); 2525abecfa5SMaxime Ripard channel_writel(atchan, DPIP, ATC_DPIP_HOLE(first->dst_hole) | 2535abecfa5SMaxime Ripard ATC_DPIP_BOUNDARY(first->boundary)); 254dc78baa2SNicolas Ferre dma_writel(atdma, CHER, atchan->mask); 255dc78baa2SNicolas Ferre 256dc78baa2SNicolas Ferre vdbg_dump_regs(atchan); 257dc78baa2SNicolas Ferre } 258dc78baa2SNicolas Ferre 259d48de6f1SElen Song /* 260bdf6c792STorsten Fleischer * atc_get_desc_by_cookie - get the descriptor of a cookie 261bdf6c792STorsten Fleischer * @atchan: the DMA channel 262bdf6c792STorsten Fleischer * @cookie: the cookie to get the descriptor for 263d48de6f1SElen Song */ 264bdf6c792STorsten Fleischer static struct at_desc *atc_get_desc_by_cookie(struct at_dma_chan *atchan, 265bdf6c792STorsten Fleischer dma_cookie_t cookie) 266d48de6f1SElen Song { 267bdf6c792STorsten Fleischer struct at_desc *desc, *_desc; 268bdf6c792STorsten Fleischer 269bdf6c792STorsten Fleischer list_for_each_entry_safe(desc, _desc, &atchan->queue, desc_node) { 270bdf6c792STorsten Fleischer if (desc->txd.cookie == cookie) 271bdf6c792STorsten Fleischer return desc; 272bdf6c792STorsten Fleischer } 273d48de6f1SElen Song 274d48de6f1SElen Song list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) { 275bdf6c792STorsten Fleischer if (desc->txd.cookie == cookie) 276bdf6c792STorsten Fleischer return desc; 277d48de6f1SElen Song } 278d48de6f1SElen Song 279bdf6c792STorsten Fleischer return NULL; 280d48de6f1SElen Song } 281d48de6f1SElen Song 282bdf6c792STorsten Fleischer /** 283bdf6c792STorsten Fleischer * atc_calc_bytes_left - calculates the number of bytes left according to the 284bdf6c792STorsten Fleischer * value read from CTRLA. 285bdf6c792STorsten Fleischer * 286bdf6c792STorsten Fleischer * @current_len: the number of bytes left before reading CTRLA 287bdf6c792STorsten Fleischer * @ctrla: the value of CTRLA 288bdf6c792STorsten Fleischer * @desc: the descriptor containing the transfer width 289d48de6f1SElen Song */ 290bdf6c792STorsten Fleischer static inline int atc_calc_bytes_left(int current_len, u32 ctrla, 291bdf6c792STorsten Fleischer struct at_desc *desc) 292bdf6c792STorsten Fleischer { 293bdf6c792STorsten Fleischer return current_len - ((ctrla & ATC_BTSIZE_MAX) << desc->tx_width); 294bdf6c792STorsten Fleischer } 295bdf6c792STorsten Fleischer 296bdf6c792STorsten Fleischer /** 297bdf6c792STorsten Fleischer * atc_calc_bytes_left_from_reg - calculates the number of bytes left according 298bdf6c792STorsten Fleischer * to the current value of CTRLA. 299bdf6c792STorsten Fleischer * 300bdf6c792STorsten Fleischer * @current_len: the number of bytes left before reading CTRLA 301bdf6c792STorsten Fleischer * @atchan: the channel to read CTRLA for 302bdf6c792STorsten Fleischer * @desc: the descriptor containing the transfer width 303bdf6c792STorsten Fleischer */ 304bdf6c792STorsten Fleischer static inline int atc_calc_bytes_left_from_reg(int current_len, 305bdf6c792STorsten Fleischer struct at_dma_chan *atchan, struct at_desc *desc) 306bdf6c792STorsten Fleischer { 307bdf6c792STorsten Fleischer u32 ctrla = channel_readl(atchan, CTRLA); 308bdf6c792STorsten Fleischer 309bdf6c792STorsten Fleischer return atc_calc_bytes_left(current_len, ctrla, desc); 310bdf6c792STorsten Fleischer } 311bdf6c792STorsten Fleischer 312bdf6c792STorsten Fleischer /** 313bdf6c792STorsten Fleischer * atc_get_bytes_left - get the number of bytes residue for a cookie 314bdf6c792STorsten Fleischer * @chan: DMA channel 315bdf6c792STorsten Fleischer * @cookie: transaction identifier to check status of 316bdf6c792STorsten Fleischer */ 317bdf6c792STorsten Fleischer static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie) 318d48de6f1SElen Song { 319d48de6f1SElen Song struct at_dma_chan *atchan = to_at_dma_chan(chan); 320d48de6f1SElen Song struct at_desc *desc_first = atc_first_active(atchan); 321bdf6c792STorsten Fleischer struct at_desc *desc; 322bdf6c792STorsten Fleischer int ret; 323bdf6c792STorsten Fleischer u32 ctrla, dscr; 324d48de6f1SElen Song 325d48de6f1SElen Song /* 326bdf6c792STorsten Fleischer * If the cookie doesn't match to the currently running transfer then 327bdf6c792STorsten Fleischer * we can return the total length of the associated DMA transfer, 328bdf6c792STorsten Fleischer * because it is still queued. 329d48de6f1SElen Song */ 330bdf6c792STorsten Fleischer desc = atc_get_desc_by_cookie(atchan, cookie); 331bdf6c792STorsten Fleischer if (desc == NULL) 332bdf6c792STorsten Fleischer return -EINVAL; 333bdf6c792STorsten Fleischer else if (desc != desc_first) 334bdf6c792STorsten Fleischer return desc->total_len; 335bdf6c792STorsten Fleischer 336bdf6c792STorsten Fleischer /* cookie matches to the currently running transfer */ 337bdf6c792STorsten Fleischer ret = desc_first->total_len; 338bdf6c792STorsten Fleischer 339bdf6c792STorsten Fleischer if (desc_first->lli.dscr) { 340bdf6c792STorsten Fleischer /* hardware linked list transfer */ 341d48de6f1SElen Song 342d48de6f1SElen Song /* 343bdf6c792STorsten Fleischer * Calculate the residue by removing the length of the child 344bdf6c792STorsten Fleischer * descriptors already transferred from the total length. 345bdf6c792STorsten Fleischer * To get the current child descriptor we can use the value of 346bdf6c792STorsten Fleischer * the channel's DSCR register and compare it against the value 347bdf6c792STorsten Fleischer * of the hardware linked list structure of each child 348bdf6c792STorsten Fleischer * descriptor. 349d48de6f1SElen Song */ 350bdf6c792STorsten Fleischer 351bdf6c792STorsten Fleischer ctrla = channel_readl(atchan, CTRLA); 352bdf6c792STorsten Fleischer rmb(); /* ensure CTRLA is read before DSCR */ 353bdf6c792STorsten Fleischer dscr = channel_readl(atchan, DSCR); 354bdf6c792STorsten Fleischer 355bdf6c792STorsten Fleischer /* for the first descriptor we can be more accurate */ 356bdf6c792STorsten Fleischer if (desc_first->lli.dscr == dscr) 357bdf6c792STorsten Fleischer return atc_calc_bytes_left(ret, ctrla, desc_first); 358bdf6c792STorsten Fleischer 359bdf6c792STorsten Fleischer ret -= desc_first->len; 360bdf6c792STorsten Fleischer list_for_each_entry(desc, &desc_first->tx_list, desc_node) { 361bdf6c792STorsten Fleischer if (desc->lli.dscr == dscr) 362bdf6c792STorsten Fleischer break; 363bdf6c792STorsten Fleischer 364bdf6c792STorsten Fleischer ret -= desc->len; 365d48de6f1SElen Song } 3666758ddafSAlexandre Belloni 367bdf6c792STorsten Fleischer /* 368bdf6c792STorsten Fleischer * For the last descriptor in the chain we can calculate 369bdf6c792STorsten Fleischer * the remaining bytes using the channel's register. 370bdf6c792STorsten Fleischer * Note that the transfer width of the first and last 371bdf6c792STorsten Fleischer * descriptor may differ. 372bdf6c792STorsten Fleischer */ 373bdf6c792STorsten Fleischer if (!desc->lli.dscr) 374bdf6c792STorsten Fleischer ret = atc_calc_bytes_left_from_reg(ret, atchan, desc); 375d48de6f1SElen Song } else { 376bdf6c792STorsten Fleischer /* single transfer */ 377bdf6c792STorsten Fleischer ret = atc_calc_bytes_left_from_reg(ret, atchan, desc_first); 378d48de6f1SElen Song } 379d48de6f1SElen Song 380d48de6f1SElen Song return ret; 381d48de6f1SElen Song } 382d48de6f1SElen Song 383dc78baa2SNicolas Ferre /** 384dc78baa2SNicolas Ferre * atc_chain_complete - finish work for one transaction chain 385dc78baa2SNicolas Ferre * @atchan: channel we work on 386dc78baa2SNicolas Ferre * @desc: descriptor at the head of the chain we want do complete 387dc78baa2SNicolas Ferre * 388dc78baa2SNicolas Ferre * Called with atchan->lock held and bh disabled */ 389dc78baa2SNicolas Ferre static void 390dc78baa2SNicolas Ferre atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) 391dc78baa2SNicolas Ferre { 392dc78baa2SNicolas Ferre struct dma_async_tx_descriptor *txd = &desc->txd; 3934d112426SMaxime Ripard struct at_dma *atdma = to_at_dma(atchan->chan_common.device); 394dc78baa2SNicolas Ferre 395dc78baa2SNicolas Ferre dev_vdbg(chan2dev(&atchan->chan_common), 396dc78baa2SNicolas Ferre "descriptor %u complete\n", txd->cookie); 397dc78baa2SNicolas Ferre 398d4116052SVinod Koul /* mark the descriptor as complete for non cyclic cases only */ 399d4116052SVinod Koul if (!atc_chan_is_cyclic(atchan)) 400f7fbce07SRussell King - ARM Linux dma_cookie_complete(txd); 401dc78baa2SNicolas Ferre 4024d112426SMaxime Ripard /* If the transfer was a memset, free our temporary buffer */ 4034d112426SMaxime Ripard if (desc->memset) { 4044d112426SMaxime Ripard dma_pool_free(atdma->memset_pool, desc->memset_vaddr, 4054d112426SMaxime Ripard desc->memset_paddr); 4064d112426SMaxime Ripard desc->memset = false; 4074d112426SMaxime Ripard } 4084d112426SMaxime Ripard 409dc78baa2SNicolas Ferre /* move children to free_list */ 410285a3c71SDan Williams list_splice_init(&desc->tx_list, &atchan->free_list); 411dc78baa2SNicolas Ferre /* move myself to free_list */ 412dc78baa2SNicolas Ferre list_move(&desc->desc_node, &atchan->free_list); 413dc78baa2SNicolas Ferre 414d38a8c62SDan Williams dma_descriptor_unmap(txd); 41553830cc7SNicolas Ferre /* for cyclic transfers, 41653830cc7SNicolas Ferre * no need to replay callback function while stopping */ 4173c477482SNicolas Ferre if (!atc_chan_is_cyclic(atchan)) { 41853830cc7SNicolas Ferre dma_async_tx_callback callback = txd->callback; 41953830cc7SNicolas Ferre void *param = txd->callback_param; 42053830cc7SNicolas Ferre 421dc78baa2SNicolas Ferre /* 422dc78baa2SNicolas Ferre * The API requires that no submissions are done from a 423dc78baa2SNicolas Ferre * callback, so we don't need to drop the lock here 424dc78baa2SNicolas Ferre */ 425dc78baa2SNicolas Ferre if (callback) 426dc78baa2SNicolas Ferre callback(param); 42753830cc7SNicolas Ferre } 428dc78baa2SNicolas Ferre 429dc78baa2SNicolas Ferre dma_run_dependencies(txd); 430dc78baa2SNicolas Ferre } 431dc78baa2SNicolas Ferre 432dc78baa2SNicolas Ferre /** 433dc78baa2SNicolas Ferre * atc_complete_all - finish work for all transactions 434dc78baa2SNicolas Ferre * @atchan: channel to complete transactions for 435dc78baa2SNicolas Ferre * 436dc78baa2SNicolas Ferre * Eventually submit queued descriptors if any 437dc78baa2SNicolas Ferre * 438dc78baa2SNicolas Ferre * Assume channel is idle while calling this function 439dc78baa2SNicolas Ferre * Called with atchan->lock held and bh disabled 440dc78baa2SNicolas Ferre */ 441dc78baa2SNicolas Ferre static void atc_complete_all(struct at_dma_chan *atchan) 442dc78baa2SNicolas Ferre { 443dc78baa2SNicolas Ferre struct at_desc *desc, *_desc; 444dc78baa2SNicolas Ferre LIST_HEAD(list); 445dc78baa2SNicolas Ferre 446dc78baa2SNicolas Ferre dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n"); 447dc78baa2SNicolas Ferre 448dc78baa2SNicolas Ferre /* 449dc78baa2SNicolas Ferre * Submit queued descriptors ASAP, i.e. before we go through 450dc78baa2SNicolas Ferre * the completed ones. 451dc78baa2SNicolas Ferre */ 452dc78baa2SNicolas Ferre if (!list_empty(&atchan->queue)) 453dc78baa2SNicolas Ferre atc_dostart(atchan, atc_first_queued(atchan)); 454dc78baa2SNicolas Ferre /* empty active_list now it is completed */ 455dc78baa2SNicolas Ferre list_splice_init(&atchan->active_list, &list); 456dc78baa2SNicolas Ferre /* empty queue list by moving descriptors (if any) to active_list */ 457dc78baa2SNicolas Ferre list_splice_init(&atchan->queue, &atchan->active_list); 458dc78baa2SNicolas Ferre 459dc78baa2SNicolas Ferre list_for_each_entry_safe(desc, _desc, &list, desc_node) 460dc78baa2SNicolas Ferre atc_chain_complete(atchan, desc); 461dc78baa2SNicolas Ferre } 462dc78baa2SNicolas Ferre 463dc78baa2SNicolas Ferre /** 464dc78baa2SNicolas Ferre * atc_advance_work - at the end of a transaction, move forward 465dc78baa2SNicolas Ferre * @atchan: channel where the transaction ended 466dc78baa2SNicolas Ferre * 467dc78baa2SNicolas Ferre * Called with atchan->lock held and bh disabled 468dc78baa2SNicolas Ferre */ 469dc78baa2SNicolas Ferre static void atc_advance_work(struct at_dma_chan *atchan) 470dc78baa2SNicolas Ferre { 471dc78baa2SNicolas Ferre dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n"); 472dc78baa2SNicolas Ferre 473d202f051SLudovic Desroches if (atc_chan_is_enabled(atchan)) 474d202f051SLudovic Desroches return; 475d202f051SLudovic Desroches 476dc78baa2SNicolas Ferre if (list_empty(&atchan->active_list) || 477dc78baa2SNicolas Ferre list_is_singular(&atchan->active_list)) { 478dc78baa2SNicolas Ferre atc_complete_all(atchan); 479dc78baa2SNicolas Ferre } else { 480dc78baa2SNicolas Ferre atc_chain_complete(atchan, atc_first_active(atchan)); 481dc78baa2SNicolas Ferre /* advance work */ 482dc78baa2SNicolas Ferre atc_dostart(atchan, atc_first_active(atchan)); 483dc78baa2SNicolas Ferre } 484dc78baa2SNicolas Ferre } 485dc78baa2SNicolas Ferre 486dc78baa2SNicolas Ferre 487dc78baa2SNicolas Ferre /** 488dc78baa2SNicolas Ferre * atc_handle_error - handle errors reported by DMA controller 489dc78baa2SNicolas Ferre * @atchan: channel where error occurs 490dc78baa2SNicolas Ferre * 491dc78baa2SNicolas Ferre * Called with atchan->lock held and bh disabled 492dc78baa2SNicolas Ferre */ 493dc78baa2SNicolas Ferre static void atc_handle_error(struct at_dma_chan *atchan) 494dc78baa2SNicolas Ferre { 495dc78baa2SNicolas Ferre struct at_desc *bad_desc; 496dc78baa2SNicolas Ferre struct at_desc *child; 497dc78baa2SNicolas Ferre 498dc78baa2SNicolas Ferre /* 499dc78baa2SNicolas Ferre * The descriptor currently at the head of the active list is 500dc78baa2SNicolas Ferre * broked. Since we don't have any way to report errors, we'll 501dc78baa2SNicolas Ferre * just have to scream loudly and try to carry on. 502dc78baa2SNicolas Ferre */ 503dc78baa2SNicolas Ferre bad_desc = atc_first_active(atchan); 504dc78baa2SNicolas Ferre list_del_init(&bad_desc->desc_node); 505dc78baa2SNicolas Ferre 506dc78baa2SNicolas Ferre /* As we are stopped, take advantage to push queued descriptors 507dc78baa2SNicolas Ferre * in active_list */ 508dc78baa2SNicolas Ferre list_splice_init(&atchan->queue, atchan->active_list.prev); 509dc78baa2SNicolas Ferre 510dc78baa2SNicolas Ferre /* Try to restart the controller */ 511dc78baa2SNicolas Ferre if (!list_empty(&atchan->active_list)) 512dc78baa2SNicolas Ferre atc_dostart(atchan, atc_first_active(atchan)); 513dc78baa2SNicolas Ferre 514dc78baa2SNicolas Ferre /* 515dc78baa2SNicolas Ferre * KERN_CRITICAL may seem harsh, but since this only happens 516dc78baa2SNicolas Ferre * when someone submits a bad physical address in a 517dc78baa2SNicolas Ferre * descriptor, we should consider ourselves lucky that the 518dc78baa2SNicolas Ferre * controller flagged an error instead of scribbling over 519dc78baa2SNicolas Ferre * random memory locations. 520dc78baa2SNicolas Ferre */ 521dc78baa2SNicolas Ferre dev_crit(chan2dev(&atchan->chan_common), 522dc78baa2SNicolas Ferre "Bad descriptor submitted for DMA!\n"); 523dc78baa2SNicolas Ferre dev_crit(chan2dev(&atchan->chan_common), 524dc78baa2SNicolas Ferre " cookie: %d\n", bad_desc->txd.cookie); 525dc78baa2SNicolas Ferre atc_dump_lli(atchan, &bad_desc->lli); 526285a3c71SDan Williams list_for_each_entry(child, &bad_desc->tx_list, desc_node) 527dc78baa2SNicolas Ferre atc_dump_lli(atchan, &child->lli); 528dc78baa2SNicolas Ferre 529dc78baa2SNicolas Ferre /* Pretend the descriptor completed successfully */ 530dc78baa2SNicolas Ferre atc_chain_complete(atchan, bad_desc); 531dc78baa2SNicolas Ferre } 532dc78baa2SNicolas Ferre 53353830cc7SNicolas Ferre /** 53453830cc7SNicolas Ferre * atc_handle_cyclic - at the end of a period, run callback function 53553830cc7SNicolas Ferre * @atchan: channel used for cyclic operations 53653830cc7SNicolas Ferre * 53753830cc7SNicolas Ferre * Called with atchan->lock held and bh disabled 53853830cc7SNicolas Ferre */ 53953830cc7SNicolas Ferre static void atc_handle_cyclic(struct at_dma_chan *atchan) 54053830cc7SNicolas Ferre { 54153830cc7SNicolas Ferre struct at_desc *first = atc_first_active(atchan); 54253830cc7SNicolas Ferre struct dma_async_tx_descriptor *txd = &first->txd; 54353830cc7SNicolas Ferre dma_async_tx_callback callback = txd->callback; 54453830cc7SNicolas Ferre void *param = txd->callback_param; 54553830cc7SNicolas Ferre 54653830cc7SNicolas Ferre dev_vdbg(chan2dev(&atchan->chan_common), 54753830cc7SNicolas Ferre "new cyclic period llp 0x%08x\n", 54853830cc7SNicolas Ferre channel_readl(atchan, DSCR)); 54953830cc7SNicolas Ferre 55053830cc7SNicolas Ferre if (callback) 55153830cc7SNicolas Ferre callback(param); 55253830cc7SNicolas Ferre } 553dc78baa2SNicolas Ferre 554dc78baa2SNicolas Ferre /*-- IRQ & Tasklet ---------------------------------------------------*/ 555dc78baa2SNicolas Ferre 556dc78baa2SNicolas Ferre static void atc_tasklet(unsigned long data) 557dc78baa2SNicolas Ferre { 558dc78baa2SNicolas Ferre struct at_dma_chan *atchan = (struct at_dma_chan *)data; 559d8cb04b0SNicolas Ferre unsigned long flags; 560dc78baa2SNicolas Ferre 561d8cb04b0SNicolas Ferre spin_lock_irqsave(&atchan->lock, flags); 56253830cc7SNicolas Ferre if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status)) 563dc78baa2SNicolas Ferre atc_handle_error(atchan); 5643c477482SNicolas Ferre else if (atc_chan_is_cyclic(atchan)) 56553830cc7SNicolas Ferre atc_handle_cyclic(atchan); 566dc78baa2SNicolas Ferre else 567dc78baa2SNicolas Ferre atc_advance_work(atchan); 568dc78baa2SNicolas Ferre 569d8cb04b0SNicolas Ferre spin_unlock_irqrestore(&atchan->lock, flags); 570dc78baa2SNicolas Ferre } 571dc78baa2SNicolas Ferre 572dc78baa2SNicolas Ferre static irqreturn_t at_dma_interrupt(int irq, void *dev_id) 573dc78baa2SNicolas Ferre { 574dc78baa2SNicolas Ferre struct at_dma *atdma = (struct at_dma *)dev_id; 575dc78baa2SNicolas Ferre struct at_dma_chan *atchan; 576dc78baa2SNicolas Ferre int i; 577dc78baa2SNicolas Ferre u32 status, pending, imr; 578dc78baa2SNicolas Ferre int ret = IRQ_NONE; 579dc78baa2SNicolas Ferre 580dc78baa2SNicolas Ferre do { 581dc78baa2SNicolas Ferre imr = dma_readl(atdma, EBCIMR); 582dc78baa2SNicolas Ferre status = dma_readl(atdma, EBCISR); 583dc78baa2SNicolas Ferre pending = status & imr; 584dc78baa2SNicolas Ferre 585dc78baa2SNicolas Ferre if (!pending) 586dc78baa2SNicolas Ferre break; 587dc78baa2SNicolas Ferre 588dc78baa2SNicolas Ferre dev_vdbg(atdma->dma_common.dev, 589dc78baa2SNicolas Ferre "interrupt: status = 0x%08x, 0x%08x, 0x%08x\n", 590dc78baa2SNicolas Ferre status, imr, pending); 591dc78baa2SNicolas Ferre 592dc78baa2SNicolas Ferre for (i = 0; i < atdma->dma_common.chancnt; i++) { 593dc78baa2SNicolas Ferre atchan = &atdma->chan[i]; 5949b3aa589SNicolas Ferre if (pending & (AT_DMA_BTC(i) | AT_DMA_ERR(i))) { 595dc78baa2SNicolas Ferre if (pending & AT_DMA_ERR(i)) { 596dc78baa2SNicolas Ferre /* Disable channel on AHB error */ 59723b5e3adSNicolas Ferre dma_writel(atdma, CHDR, 59823b5e3adSNicolas Ferre AT_DMA_RES(i) | atchan->mask); 599dc78baa2SNicolas Ferre /* Give information to tasklet */ 60053830cc7SNicolas Ferre set_bit(ATC_IS_ERROR, &atchan->status); 601dc78baa2SNicolas Ferre } 602dc78baa2SNicolas Ferre tasklet_schedule(&atchan->tasklet); 603dc78baa2SNicolas Ferre ret = IRQ_HANDLED; 604dc78baa2SNicolas Ferre } 605dc78baa2SNicolas Ferre } 606dc78baa2SNicolas Ferre 607dc78baa2SNicolas Ferre } while (pending); 608dc78baa2SNicolas Ferre 609dc78baa2SNicolas Ferre return ret; 610dc78baa2SNicolas Ferre } 611dc78baa2SNicolas Ferre 612dc78baa2SNicolas Ferre 613dc78baa2SNicolas Ferre /*-- DMA Engine API --------------------------------------------------*/ 614dc78baa2SNicolas Ferre 615dc78baa2SNicolas Ferre /** 616dc78baa2SNicolas Ferre * atc_tx_submit - set the prepared descriptor(s) to be executed by the engine 617dc78baa2SNicolas Ferre * @desc: descriptor at the head of the transaction chain 618dc78baa2SNicolas Ferre * 619dc78baa2SNicolas Ferre * Queue chain if DMA engine is working already 620dc78baa2SNicolas Ferre * 621dc78baa2SNicolas Ferre * Cookie increment and adding to active_list or queue must be atomic 622dc78baa2SNicolas Ferre */ 623dc78baa2SNicolas Ferre static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx) 624dc78baa2SNicolas Ferre { 625dc78baa2SNicolas Ferre struct at_desc *desc = txd_to_at_desc(tx); 626dc78baa2SNicolas Ferre struct at_dma_chan *atchan = to_at_dma_chan(tx->chan); 627dc78baa2SNicolas Ferre dma_cookie_t cookie; 628d8cb04b0SNicolas Ferre unsigned long flags; 629dc78baa2SNicolas Ferre 630d8cb04b0SNicolas Ferre spin_lock_irqsave(&atchan->lock, flags); 631884485e1SRussell King - ARM Linux cookie = dma_cookie_assign(tx); 632dc78baa2SNicolas Ferre 633dc78baa2SNicolas Ferre if (list_empty(&atchan->active_list)) { 634dc78baa2SNicolas Ferre dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n", 635dc78baa2SNicolas Ferre desc->txd.cookie); 636dc78baa2SNicolas Ferre atc_dostart(atchan, desc); 637dc78baa2SNicolas Ferre list_add_tail(&desc->desc_node, &atchan->active_list); 638dc78baa2SNicolas Ferre } else { 639dc78baa2SNicolas Ferre dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n", 640dc78baa2SNicolas Ferre desc->txd.cookie); 641dc78baa2SNicolas Ferre list_add_tail(&desc->desc_node, &atchan->queue); 642dc78baa2SNicolas Ferre } 643dc78baa2SNicolas Ferre 644d8cb04b0SNicolas Ferre spin_unlock_irqrestore(&atchan->lock, flags); 645dc78baa2SNicolas Ferre 646dc78baa2SNicolas Ferre return cookie; 647dc78baa2SNicolas Ferre } 648dc78baa2SNicolas Ferre 649dc78baa2SNicolas Ferre /** 6505abecfa5SMaxime Ripard * atc_prep_dma_interleaved - prepare memory to memory interleaved operation 6515abecfa5SMaxime Ripard * @chan: the channel to prepare operation on 6525abecfa5SMaxime Ripard * @xt: Interleaved transfer template 6535abecfa5SMaxime Ripard * @flags: tx descriptor status flags 6545abecfa5SMaxime Ripard */ 6555abecfa5SMaxime Ripard static struct dma_async_tx_descriptor * 6565abecfa5SMaxime Ripard atc_prep_dma_interleaved(struct dma_chan *chan, 6575abecfa5SMaxime Ripard struct dma_interleaved_template *xt, 6585abecfa5SMaxime Ripard unsigned long flags) 6595abecfa5SMaxime Ripard { 6605abecfa5SMaxime Ripard struct at_dma_chan *atchan = to_at_dma_chan(chan); 6615abecfa5SMaxime Ripard struct data_chunk *first = xt->sgl; 6625abecfa5SMaxime Ripard struct at_desc *desc = NULL; 6635abecfa5SMaxime Ripard size_t xfer_count; 6645abecfa5SMaxime Ripard unsigned int dwidth; 6655abecfa5SMaxime Ripard u32 ctrla; 6665abecfa5SMaxime Ripard u32 ctrlb; 6675abecfa5SMaxime Ripard size_t len = 0; 6685abecfa5SMaxime Ripard int i; 6695abecfa5SMaxime Ripard 6704483320eSManinder Singh if (unlikely(!xt || xt->numf != 1 || !xt->frame_size)) 6714483320eSManinder Singh return NULL; 6724483320eSManinder Singh 6735abecfa5SMaxime Ripard dev_info(chan2dev(chan), 6745abecfa5SMaxime Ripard "%s: src=0x%08x, dest=0x%08x, numf=%d, frame_size=%d, flags=0x%lx\n", 6755abecfa5SMaxime Ripard __func__, xt->src_start, xt->dst_start, xt->numf, 6765abecfa5SMaxime Ripard xt->frame_size, flags); 6775abecfa5SMaxime Ripard 6785abecfa5SMaxime Ripard /* 6795abecfa5SMaxime Ripard * The controller can only "skip" X bytes every Y bytes, so we 6805abecfa5SMaxime Ripard * need to make sure we are given a template that fit that 6815abecfa5SMaxime Ripard * description, ie a template with chunks that always have the 6825abecfa5SMaxime Ripard * same size, with the same ICGs. 6835abecfa5SMaxime Ripard */ 6845abecfa5SMaxime Ripard for (i = 0; i < xt->frame_size; i++) { 6855abecfa5SMaxime Ripard struct data_chunk *chunk = xt->sgl + i; 6865abecfa5SMaxime Ripard 6875abecfa5SMaxime Ripard if ((chunk->size != xt->sgl->size) || 6885abecfa5SMaxime Ripard (dmaengine_get_dst_icg(xt, chunk) != dmaengine_get_dst_icg(xt, first)) || 6895abecfa5SMaxime Ripard (dmaengine_get_src_icg(xt, chunk) != dmaengine_get_src_icg(xt, first))) { 6905abecfa5SMaxime Ripard dev_err(chan2dev(chan), 6915abecfa5SMaxime Ripard "%s: the controller can transfer only identical chunks\n", 6925abecfa5SMaxime Ripard __func__); 6935abecfa5SMaxime Ripard return NULL; 6945abecfa5SMaxime Ripard } 6955abecfa5SMaxime Ripard 6965abecfa5SMaxime Ripard len += chunk->size; 6975abecfa5SMaxime Ripard } 6985abecfa5SMaxime Ripard 6995abecfa5SMaxime Ripard dwidth = atc_get_xfer_width(xt->src_start, 7005abecfa5SMaxime Ripard xt->dst_start, len); 7015abecfa5SMaxime Ripard 7025abecfa5SMaxime Ripard xfer_count = len >> dwidth; 7035abecfa5SMaxime Ripard if (xfer_count > ATC_BTSIZE_MAX) { 7045abecfa5SMaxime Ripard dev_err(chan2dev(chan), "%s: buffer is too big\n", __func__); 7055abecfa5SMaxime Ripard return NULL; 7065abecfa5SMaxime Ripard } 7075abecfa5SMaxime Ripard 7085abecfa5SMaxime Ripard ctrla = ATC_SRC_WIDTH(dwidth) | 7095abecfa5SMaxime Ripard ATC_DST_WIDTH(dwidth); 7105abecfa5SMaxime Ripard 7115abecfa5SMaxime Ripard ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN 7125abecfa5SMaxime Ripard | ATC_SRC_ADDR_MODE_INCR 7135abecfa5SMaxime Ripard | ATC_DST_ADDR_MODE_INCR 7145abecfa5SMaxime Ripard | ATC_SRC_PIP 7155abecfa5SMaxime Ripard | ATC_DST_PIP 7165abecfa5SMaxime Ripard | ATC_FC_MEM2MEM; 7175abecfa5SMaxime Ripard 7185abecfa5SMaxime Ripard /* create the transfer */ 7195abecfa5SMaxime Ripard desc = atc_desc_get(atchan); 7205abecfa5SMaxime Ripard if (!desc) { 7215abecfa5SMaxime Ripard dev_err(chan2dev(chan), 7225abecfa5SMaxime Ripard "%s: couldn't allocate our descriptor\n", __func__); 7235abecfa5SMaxime Ripard return NULL; 7245abecfa5SMaxime Ripard } 7255abecfa5SMaxime Ripard 7265abecfa5SMaxime Ripard desc->lli.saddr = xt->src_start; 7275abecfa5SMaxime Ripard desc->lli.daddr = xt->dst_start; 7285abecfa5SMaxime Ripard desc->lli.ctrla = ctrla | xfer_count; 7295abecfa5SMaxime Ripard desc->lli.ctrlb = ctrlb; 7305abecfa5SMaxime Ripard 7315abecfa5SMaxime Ripard desc->boundary = first->size >> dwidth; 7325abecfa5SMaxime Ripard desc->dst_hole = (dmaengine_get_dst_icg(xt, first) >> dwidth) + 1; 7335abecfa5SMaxime Ripard desc->src_hole = (dmaengine_get_src_icg(xt, first) >> dwidth) + 1; 7345abecfa5SMaxime Ripard 7355abecfa5SMaxime Ripard desc->txd.cookie = -EBUSY; 7365abecfa5SMaxime Ripard desc->total_len = desc->len = len; 7375abecfa5SMaxime Ripard desc->tx_width = dwidth; 7385abecfa5SMaxime Ripard 7395abecfa5SMaxime Ripard /* set end-of-link to the last link descriptor of list*/ 7405abecfa5SMaxime Ripard set_desc_eol(desc); 7415abecfa5SMaxime Ripard 7425abecfa5SMaxime Ripard desc->txd.flags = flags; /* client is in control of this ack */ 7435abecfa5SMaxime Ripard 7445abecfa5SMaxime Ripard return &desc->txd; 7455abecfa5SMaxime Ripard } 7465abecfa5SMaxime Ripard 7475abecfa5SMaxime Ripard /** 748dc78baa2SNicolas Ferre * atc_prep_dma_memcpy - prepare a memcpy operation 749dc78baa2SNicolas Ferre * @chan: the channel to prepare operation on 750dc78baa2SNicolas Ferre * @dest: operation virtual destination address 751dc78baa2SNicolas Ferre * @src: operation virtual source address 752dc78baa2SNicolas Ferre * @len: operation length 753dc78baa2SNicolas Ferre * @flags: tx descriptor status flags 754dc78baa2SNicolas Ferre */ 755dc78baa2SNicolas Ferre static struct dma_async_tx_descriptor * 756dc78baa2SNicolas Ferre atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 757dc78baa2SNicolas Ferre size_t len, unsigned long flags) 758dc78baa2SNicolas Ferre { 759dc78baa2SNicolas Ferre struct at_dma_chan *atchan = to_at_dma_chan(chan); 760dc78baa2SNicolas Ferre struct at_desc *desc = NULL; 761dc78baa2SNicolas Ferre struct at_desc *first = NULL; 762dc78baa2SNicolas Ferre struct at_desc *prev = NULL; 763dc78baa2SNicolas Ferre size_t xfer_count; 764dc78baa2SNicolas Ferre size_t offset; 765dc78baa2SNicolas Ferre unsigned int src_width; 766dc78baa2SNicolas Ferre unsigned int dst_width; 767dc78baa2SNicolas Ferre u32 ctrla; 768dc78baa2SNicolas Ferre u32 ctrlb; 769dc78baa2SNicolas Ferre 770dc78baa2SNicolas Ferre dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d0x%x s0x%x l0x%zx f0x%lx\n", 771dc78baa2SNicolas Ferre dest, src, len, flags); 772dc78baa2SNicolas Ferre 773dc78baa2SNicolas Ferre if (unlikely(!len)) { 774dc78baa2SNicolas Ferre dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); 775dc78baa2SNicolas Ferre return NULL; 776dc78baa2SNicolas Ferre } 777dc78baa2SNicolas Ferre 7789b3aa589SNicolas Ferre ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN 779dc78baa2SNicolas Ferre | ATC_SRC_ADDR_MODE_INCR 780dc78baa2SNicolas Ferre | ATC_DST_ADDR_MODE_INCR 781dc78baa2SNicolas Ferre | ATC_FC_MEM2MEM; 782dc78baa2SNicolas Ferre 783dc78baa2SNicolas Ferre /* 784dc78baa2SNicolas Ferre * We can be a lot more clever here, but this should take care 785dc78baa2SNicolas Ferre * of the most common optimization. 786dc78baa2SNicolas Ferre */ 787265567fbSTorsten Fleischer src_width = dst_width = atc_get_xfer_width(src, dest, len); 788265567fbSTorsten Fleischer 789265567fbSTorsten Fleischer ctrla = ATC_SRC_WIDTH(src_width) | 790265567fbSTorsten Fleischer ATC_DST_WIDTH(dst_width); 791dc78baa2SNicolas Ferre 792dc78baa2SNicolas Ferre for (offset = 0; offset < len; offset += xfer_count << src_width) { 793dc78baa2SNicolas Ferre xfer_count = min_t(size_t, (len - offset) >> src_width, 794dc78baa2SNicolas Ferre ATC_BTSIZE_MAX); 795dc78baa2SNicolas Ferre 796dc78baa2SNicolas Ferre desc = atc_desc_get(atchan); 797dc78baa2SNicolas Ferre if (!desc) 798dc78baa2SNicolas Ferre goto err_desc_get; 799dc78baa2SNicolas Ferre 800dc78baa2SNicolas Ferre desc->lli.saddr = src + offset; 801dc78baa2SNicolas Ferre desc->lli.daddr = dest + offset; 802dc78baa2SNicolas Ferre desc->lli.ctrla = ctrla | xfer_count; 803dc78baa2SNicolas Ferre desc->lli.ctrlb = ctrlb; 804dc78baa2SNicolas Ferre 805dc78baa2SNicolas Ferre desc->txd.cookie = 0; 806bdf6c792STorsten Fleischer desc->len = xfer_count << src_width; 807dc78baa2SNicolas Ferre 808e257e156SNicolas Ferre atc_desc_chain(&first, &prev, desc); 809dc78baa2SNicolas Ferre } 810dc78baa2SNicolas Ferre 811dc78baa2SNicolas Ferre /* First descriptor of the chain embedds additional information */ 812dc78baa2SNicolas Ferre first->txd.cookie = -EBUSY; 813bdf6c792STorsten Fleischer first->total_len = len; 814bdf6c792STorsten Fleischer 815bdf6c792STorsten Fleischer /* set transfer width for the calculation of the residue */ 816d088c33bSElen Song first->tx_width = src_width; 817bdf6c792STorsten Fleischer prev->tx_width = src_width; 818dc78baa2SNicolas Ferre 819dc78baa2SNicolas Ferre /* set end-of-link to the last link descriptor of list*/ 820dc78baa2SNicolas Ferre set_desc_eol(desc); 821dc78baa2SNicolas Ferre 822568f7f0cSNicolas Ferre first->txd.flags = flags; /* client is in control of this ack */ 823dc78baa2SNicolas Ferre 824dc78baa2SNicolas Ferre return &first->txd; 825dc78baa2SNicolas Ferre 826dc78baa2SNicolas Ferre err_desc_get: 827dc78baa2SNicolas Ferre atc_desc_put(atchan, first); 828dc78baa2SNicolas Ferre return NULL; 829dc78baa2SNicolas Ferre } 830dc78baa2SNicolas Ferre 8314d112426SMaxime Ripard /** 8324d112426SMaxime Ripard * atc_prep_dma_memset - prepare a memcpy operation 8334d112426SMaxime Ripard * @chan: the channel to prepare operation on 8344d112426SMaxime Ripard * @dest: operation virtual destination address 8354d112426SMaxime Ripard * @value: value to set memory buffer to 8364d112426SMaxime Ripard * @len: operation length 8374d112426SMaxime Ripard * @flags: tx descriptor status flags 8384d112426SMaxime Ripard */ 8394d112426SMaxime Ripard static struct dma_async_tx_descriptor * 8404d112426SMaxime Ripard atc_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value, 8414d112426SMaxime Ripard size_t len, unsigned long flags) 8424d112426SMaxime Ripard { 8434d112426SMaxime Ripard struct at_dma_chan *atchan = to_at_dma_chan(chan); 8444d112426SMaxime Ripard struct at_dma *atdma = to_at_dma(chan->device); 8454d112426SMaxime Ripard struct at_desc *desc = NULL; 8464d112426SMaxime Ripard size_t xfer_count; 8474d112426SMaxime Ripard u32 ctrla; 8484d112426SMaxime Ripard u32 ctrlb; 8494d112426SMaxime Ripard 8504d112426SMaxime Ripard dev_vdbg(chan2dev(chan), "%s: d0x%x v0x%x l0x%zx f0x%lx\n", __func__, 8514d112426SMaxime Ripard dest, value, len, flags); 8524d112426SMaxime Ripard 8534d112426SMaxime Ripard if (unlikely(!len)) { 8544d112426SMaxime Ripard dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__); 8554d112426SMaxime Ripard return NULL; 8564d112426SMaxime Ripard } 8574d112426SMaxime Ripard 8584d112426SMaxime Ripard if (!is_dma_fill_aligned(chan->device, dest, 0, len)) { 8594d112426SMaxime Ripard dev_dbg(chan2dev(chan), "%s: buffer is not aligned\n", 8604d112426SMaxime Ripard __func__); 8614d112426SMaxime Ripard return NULL; 8624d112426SMaxime Ripard } 8634d112426SMaxime Ripard 8644d112426SMaxime Ripard xfer_count = len >> 2; 8654d112426SMaxime Ripard if (xfer_count > ATC_BTSIZE_MAX) { 8664d112426SMaxime Ripard dev_err(chan2dev(chan), "%s: buffer is too big\n", 8674d112426SMaxime Ripard __func__); 8684d112426SMaxime Ripard return NULL; 8694d112426SMaxime Ripard } 8704d112426SMaxime Ripard 8714d112426SMaxime Ripard ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN 8724d112426SMaxime Ripard | ATC_SRC_ADDR_MODE_FIXED 8734d112426SMaxime Ripard | ATC_DST_ADDR_MODE_INCR 8744d112426SMaxime Ripard | ATC_FC_MEM2MEM; 8754d112426SMaxime Ripard 8764d112426SMaxime Ripard ctrla = ATC_SRC_WIDTH(2) | 8774d112426SMaxime Ripard ATC_DST_WIDTH(2); 8784d112426SMaxime Ripard 8794d112426SMaxime Ripard desc = atc_desc_get(atchan); 8804d112426SMaxime Ripard if (!desc) { 8814d112426SMaxime Ripard dev_err(chan2dev(chan), "%s: can't get a descriptor\n", 8824d112426SMaxime Ripard __func__); 8834d112426SMaxime Ripard return NULL; 8844d112426SMaxime Ripard } 8854d112426SMaxime Ripard 8864d112426SMaxime Ripard desc->memset_vaddr = dma_pool_alloc(atdma->memset_pool, GFP_ATOMIC, 8874d112426SMaxime Ripard &desc->memset_paddr); 8884d112426SMaxime Ripard if (!desc->memset_vaddr) { 8894d112426SMaxime Ripard dev_err(chan2dev(chan), "%s: couldn't allocate buffer\n", 8904d112426SMaxime Ripard __func__); 8914d112426SMaxime Ripard goto err_put_desc; 8924d112426SMaxime Ripard } 8934d112426SMaxime Ripard 8944d112426SMaxime Ripard *desc->memset_vaddr = value; 8954d112426SMaxime Ripard desc->memset = true; 8964d112426SMaxime Ripard 8974d112426SMaxime Ripard desc->lli.saddr = desc->memset_paddr; 8984d112426SMaxime Ripard desc->lli.daddr = dest; 8994d112426SMaxime Ripard desc->lli.ctrla = ctrla | xfer_count; 9004d112426SMaxime Ripard desc->lli.ctrlb = ctrlb; 9014d112426SMaxime Ripard 9024d112426SMaxime Ripard desc->txd.cookie = -EBUSY; 9034d112426SMaxime Ripard desc->len = len; 9044d112426SMaxime Ripard desc->total_len = len; 9054d112426SMaxime Ripard 9064d112426SMaxime Ripard /* set end-of-link on the descriptor */ 9074d112426SMaxime Ripard set_desc_eol(desc); 9084d112426SMaxime Ripard 9094d112426SMaxime Ripard desc->txd.flags = flags; 9104d112426SMaxime Ripard 9114d112426SMaxime Ripard return &desc->txd; 9124d112426SMaxime Ripard 9134d112426SMaxime Ripard err_put_desc: 9144d112426SMaxime Ripard atc_desc_put(atchan, desc); 9154d112426SMaxime Ripard return NULL; 9164d112426SMaxime Ripard } 9174d112426SMaxime Ripard 918808347f6SNicolas Ferre 919808347f6SNicolas Ferre /** 920808347f6SNicolas Ferre * atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction 921808347f6SNicolas Ferre * @chan: DMA channel 922808347f6SNicolas Ferre * @sgl: scatterlist to transfer to/from 923808347f6SNicolas Ferre * @sg_len: number of entries in @scatterlist 924808347f6SNicolas Ferre * @direction: DMA direction 925808347f6SNicolas Ferre * @flags: tx descriptor status flags 926185ecb5fSAlexandre Bounine * @context: transaction context (ignored) 927808347f6SNicolas Ferre */ 928808347f6SNicolas Ferre static struct dma_async_tx_descriptor * 929808347f6SNicolas Ferre atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, 930db8196dfSVinod Koul unsigned int sg_len, enum dma_transfer_direction direction, 931185ecb5fSAlexandre Bounine unsigned long flags, void *context) 932808347f6SNicolas Ferre { 933808347f6SNicolas Ferre struct at_dma_chan *atchan = to_at_dma_chan(chan); 934808347f6SNicolas Ferre struct at_dma_slave *atslave = chan->private; 935beeaa103SNicolas Ferre struct dma_slave_config *sconfig = &atchan->dma_sconfig; 936808347f6SNicolas Ferre struct at_desc *first = NULL; 937808347f6SNicolas Ferre struct at_desc *prev = NULL; 938808347f6SNicolas Ferre u32 ctrla; 939808347f6SNicolas Ferre u32 ctrlb; 940808347f6SNicolas Ferre dma_addr_t reg; 941808347f6SNicolas Ferre unsigned int reg_width; 942808347f6SNicolas Ferre unsigned int mem_width; 943808347f6SNicolas Ferre unsigned int i; 944808347f6SNicolas Ferre struct scatterlist *sg; 945808347f6SNicolas Ferre size_t total_len = 0; 946808347f6SNicolas Ferre 947cc52a10aSNicolas Ferre dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n", 948cc52a10aSNicolas Ferre sg_len, 949db8196dfSVinod Koul direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE", 950808347f6SNicolas Ferre flags); 951808347f6SNicolas Ferre 952808347f6SNicolas Ferre if (unlikely(!atslave || !sg_len)) { 953c618a9beSNicolas Ferre dev_dbg(chan2dev(chan), "prep_slave_sg: sg length is zero!\n"); 954808347f6SNicolas Ferre return NULL; 955808347f6SNicolas Ferre } 956808347f6SNicolas Ferre 9571dd1ea8eSNicolas Ferre ctrla = ATC_SCSIZE(sconfig->src_maxburst) 9581dd1ea8eSNicolas Ferre | ATC_DCSIZE(sconfig->dst_maxburst); 959ae14d4b5SNicolas Ferre ctrlb = ATC_IEN; 960808347f6SNicolas Ferre 961808347f6SNicolas Ferre switch (direction) { 962db8196dfSVinod Koul case DMA_MEM_TO_DEV: 963beeaa103SNicolas Ferre reg_width = convert_buswidth(sconfig->dst_addr_width); 964808347f6SNicolas Ferre ctrla |= ATC_DST_WIDTH(reg_width); 965808347f6SNicolas Ferre ctrlb |= ATC_DST_ADDR_MODE_FIXED 966808347f6SNicolas Ferre | ATC_SRC_ADDR_MODE_INCR 967ae14d4b5SNicolas Ferre | ATC_FC_MEM2PER 968bbe89c8eSLudovic Desroches | ATC_SIF(atchan->mem_if) | ATC_DIF(atchan->per_if); 969beeaa103SNicolas Ferre reg = sconfig->dst_addr; 970808347f6SNicolas Ferre for_each_sg(sgl, sg, sg_len, i) { 971808347f6SNicolas Ferre struct at_desc *desc; 972808347f6SNicolas Ferre u32 len; 973808347f6SNicolas Ferre u32 mem; 974808347f6SNicolas Ferre 975808347f6SNicolas Ferre desc = atc_desc_get(atchan); 976808347f6SNicolas Ferre if (!desc) 977808347f6SNicolas Ferre goto err_desc_get; 978808347f6SNicolas Ferre 9790f70e8ceSNicolas Ferre mem = sg_dma_address(sg); 980808347f6SNicolas Ferre len = sg_dma_len(sg); 981c4567976SNicolas Ferre if (unlikely(!len)) { 982c4567976SNicolas Ferre dev_dbg(chan2dev(chan), 983c4567976SNicolas Ferre "prep_slave_sg: sg(%d) data length is zero\n", i); 984c4567976SNicolas Ferre goto err; 985c4567976SNicolas Ferre } 986808347f6SNicolas Ferre mem_width = 2; 987808347f6SNicolas Ferre if (unlikely(mem & 3 || len & 3)) 988808347f6SNicolas Ferre mem_width = 0; 989808347f6SNicolas Ferre 990808347f6SNicolas Ferre desc->lli.saddr = mem; 991808347f6SNicolas Ferre desc->lli.daddr = reg; 992808347f6SNicolas Ferre desc->lli.ctrla = ctrla 993808347f6SNicolas Ferre | ATC_SRC_WIDTH(mem_width) 994808347f6SNicolas Ferre | len >> mem_width; 995808347f6SNicolas Ferre desc->lli.ctrlb = ctrlb; 996bdf6c792STorsten Fleischer desc->len = len; 997808347f6SNicolas Ferre 998e257e156SNicolas Ferre atc_desc_chain(&first, &prev, desc); 999808347f6SNicolas Ferre total_len += len; 1000808347f6SNicolas Ferre } 1001808347f6SNicolas Ferre break; 1002db8196dfSVinod Koul case DMA_DEV_TO_MEM: 1003beeaa103SNicolas Ferre reg_width = convert_buswidth(sconfig->src_addr_width); 1004808347f6SNicolas Ferre ctrla |= ATC_SRC_WIDTH(reg_width); 1005808347f6SNicolas Ferre ctrlb |= ATC_DST_ADDR_MODE_INCR 1006808347f6SNicolas Ferre | ATC_SRC_ADDR_MODE_FIXED 1007ae14d4b5SNicolas Ferre | ATC_FC_PER2MEM 1008bbe89c8eSLudovic Desroches | ATC_SIF(atchan->per_if) | ATC_DIF(atchan->mem_if); 1009808347f6SNicolas Ferre 1010beeaa103SNicolas Ferre reg = sconfig->src_addr; 1011808347f6SNicolas Ferre for_each_sg(sgl, sg, sg_len, i) { 1012808347f6SNicolas Ferre struct at_desc *desc; 1013808347f6SNicolas Ferre u32 len; 1014808347f6SNicolas Ferre u32 mem; 1015808347f6SNicolas Ferre 1016808347f6SNicolas Ferre desc = atc_desc_get(atchan); 1017808347f6SNicolas Ferre if (!desc) 1018808347f6SNicolas Ferre goto err_desc_get; 1019808347f6SNicolas Ferre 10200f70e8ceSNicolas Ferre mem = sg_dma_address(sg); 1021808347f6SNicolas Ferre len = sg_dma_len(sg); 1022c4567976SNicolas Ferre if (unlikely(!len)) { 1023c4567976SNicolas Ferre dev_dbg(chan2dev(chan), 1024c4567976SNicolas Ferre "prep_slave_sg: sg(%d) data length is zero\n", i); 1025c4567976SNicolas Ferre goto err; 1026c4567976SNicolas Ferre } 1027808347f6SNicolas Ferre mem_width = 2; 1028808347f6SNicolas Ferre if (unlikely(mem & 3 || len & 3)) 1029808347f6SNicolas Ferre mem_width = 0; 1030808347f6SNicolas Ferre 1031808347f6SNicolas Ferre desc->lli.saddr = reg; 1032808347f6SNicolas Ferre desc->lli.daddr = mem; 1033808347f6SNicolas Ferre desc->lli.ctrla = ctrla 1034808347f6SNicolas Ferre | ATC_DST_WIDTH(mem_width) 103559a609d9SNicolas Ferre | len >> reg_width; 1036808347f6SNicolas Ferre desc->lli.ctrlb = ctrlb; 1037bdf6c792STorsten Fleischer desc->len = len; 1038808347f6SNicolas Ferre 1039e257e156SNicolas Ferre atc_desc_chain(&first, &prev, desc); 1040808347f6SNicolas Ferre total_len += len; 1041808347f6SNicolas Ferre } 1042808347f6SNicolas Ferre break; 1043808347f6SNicolas Ferre default: 1044808347f6SNicolas Ferre return NULL; 1045808347f6SNicolas Ferre } 1046808347f6SNicolas Ferre 1047808347f6SNicolas Ferre /* set end-of-link to the last link descriptor of list*/ 1048808347f6SNicolas Ferre set_desc_eol(prev); 1049808347f6SNicolas Ferre 1050808347f6SNicolas Ferre /* First descriptor of the chain embedds additional information */ 1051808347f6SNicolas Ferre first->txd.cookie = -EBUSY; 1052bdf6c792STorsten Fleischer first->total_len = total_len; 1053bdf6c792STorsten Fleischer 1054bdf6c792STorsten Fleischer /* set transfer width for the calculation of the residue */ 1055d088c33bSElen Song first->tx_width = reg_width; 1056bdf6c792STorsten Fleischer prev->tx_width = reg_width; 1057808347f6SNicolas Ferre 1058568f7f0cSNicolas Ferre /* first link descriptor of list is responsible of flags */ 1059568f7f0cSNicolas Ferre first->txd.flags = flags; /* client is in control of this ack */ 1060808347f6SNicolas Ferre 1061808347f6SNicolas Ferre return &first->txd; 1062808347f6SNicolas Ferre 1063808347f6SNicolas Ferre err_desc_get: 1064808347f6SNicolas Ferre dev_err(chan2dev(chan), "not enough descriptors available\n"); 1065c4567976SNicolas Ferre err: 1066808347f6SNicolas Ferre atc_desc_put(atchan, first); 1067808347f6SNicolas Ferre return NULL; 1068808347f6SNicolas Ferre } 1069808347f6SNicolas Ferre 107053830cc7SNicolas Ferre /** 1071265567fbSTorsten Fleischer * atc_prep_dma_sg - prepare memory to memory scather-gather operation 1072265567fbSTorsten Fleischer * @chan: the channel to prepare operation on 1073265567fbSTorsten Fleischer * @dst_sg: destination scatterlist 1074265567fbSTorsten Fleischer * @dst_nents: number of destination scatterlist entries 1075265567fbSTorsten Fleischer * @src_sg: source scatterlist 1076265567fbSTorsten Fleischer * @src_nents: number of source scatterlist entries 1077265567fbSTorsten Fleischer * @flags: tx descriptor status flags 1078265567fbSTorsten Fleischer */ 1079265567fbSTorsten Fleischer static struct dma_async_tx_descriptor * 1080265567fbSTorsten Fleischer atc_prep_dma_sg(struct dma_chan *chan, 1081265567fbSTorsten Fleischer struct scatterlist *dst_sg, unsigned int dst_nents, 1082265567fbSTorsten Fleischer struct scatterlist *src_sg, unsigned int src_nents, 1083265567fbSTorsten Fleischer unsigned long flags) 1084265567fbSTorsten Fleischer { 1085265567fbSTorsten Fleischer struct at_dma_chan *atchan = to_at_dma_chan(chan); 1086265567fbSTorsten Fleischer struct at_desc *desc = NULL; 1087265567fbSTorsten Fleischer struct at_desc *first = NULL; 1088265567fbSTorsten Fleischer struct at_desc *prev = NULL; 1089265567fbSTorsten Fleischer unsigned int src_width; 1090265567fbSTorsten Fleischer unsigned int dst_width; 1091265567fbSTorsten Fleischer size_t xfer_count; 1092265567fbSTorsten Fleischer u32 ctrla; 1093265567fbSTorsten Fleischer u32 ctrlb; 1094265567fbSTorsten Fleischer size_t dst_len = 0, src_len = 0; 1095265567fbSTorsten Fleischer dma_addr_t dst = 0, src = 0; 1096265567fbSTorsten Fleischer size_t len = 0, total_len = 0; 1097265567fbSTorsten Fleischer 1098265567fbSTorsten Fleischer if (unlikely(dst_nents == 0 || src_nents == 0)) 1099265567fbSTorsten Fleischer return NULL; 1100265567fbSTorsten Fleischer 1101265567fbSTorsten Fleischer if (unlikely(dst_sg == NULL || src_sg == NULL)) 1102265567fbSTorsten Fleischer return NULL; 1103265567fbSTorsten Fleischer 1104265567fbSTorsten Fleischer ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN 1105265567fbSTorsten Fleischer | ATC_SRC_ADDR_MODE_INCR 1106265567fbSTorsten Fleischer | ATC_DST_ADDR_MODE_INCR 1107265567fbSTorsten Fleischer | ATC_FC_MEM2MEM; 1108265567fbSTorsten Fleischer 1109265567fbSTorsten Fleischer /* 1110265567fbSTorsten Fleischer * loop until there is either no more source or no more destination 1111265567fbSTorsten Fleischer * scatterlist entry 1112265567fbSTorsten Fleischer */ 1113265567fbSTorsten Fleischer while (true) { 1114265567fbSTorsten Fleischer 1115265567fbSTorsten Fleischer /* prepare the next transfer */ 1116265567fbSTorsten Fleischer if (dst_len == 0) { 1117265567fbSTorsten Fleischer 1118265567fbSTorsten Fleischer /* no more destination scatterlist entries */ 1119265567fbSTorsten Fleischer if (!dst_sg || !dst_nents) 1120265567fbSTorsten Fleischer break; 1121265567fbSTorsten Fleischer 1122265567fbSTorsten Fleischer dst = sg_dma_address(dst_sg); 1123265567fbSTorsten Fleischer dst_len = sg_dma_len(dst_sg); 1124265567fbSTorsten Fleischer 1125265567fbSTorsten Fleischer dst_sg = sg_next(dst_sg); 1126265567fbSTorsten Fleischer dst_nents--; 1127265567fbSTorsten Fleischer } 1128265567fbSTorsten Fleischer 1129265567fbSTorsten Fleischer if (src_len == 0) { 1130265567fbSTorsten Fleischer 1131265567fbSTorsten Fleischer /* no more source scatterlist entries */ 1132265567fbSTorsten Fleischer if (!src_sg || !src_nents) 1133265567fbSTorsten Fleischer break; 1134265567fbSTorsten Fleischer 1135265567fbSTorsten Fleischer src = sg_dma_address(src_sg); 1136265567fbSTorsten Fleischer src_len = sg_dma_len(src_sg); 1137265567fbSTorsten Fleischer 1138265567fbSTorsten Fleischer src_sg = sg_next(src_sg); 1139265567fbSTorsten Fleischer src_nents--; 1140265567fbSTorsten Fleischer } 1141265567fbSTorsten Fleischer 1142265567fbSTorsten Fleischer len = min_t(size_t, src_len, dst_len); 1143265567fbSTorsten Fleischer if (len == 0) 1144265567fbSTorsten Fleischer continue; 1145265567fbSTorsten Fleischer 1146265567fbSTorsten Fleischer /* take care for the alignment */ 1147265567fbSTorsten Fleischer src_width = dst_width = atc_get_xfer_width(src, dst, len); 1148265567fbSTorsten Fleischer 1149265567fbSTorsten Fleischer ctrla = ATC_SRC_WIDTH(src_width) | 1150265567fbSTorsten Fleischer ATC_DST_WIDTH(dst_width); 1151265567fbSTorsten Fleischer 1152265567fbSTorsten Fleischer /* 1153265567fbSTorsten Fleischer * The number of transfers to set up refer to the source width 1154265567fbSTorsten Fleischer * that depends on the alignment. 1155265567fbSTorsten Fleischer */ 1156265567fbSTorsten Fleischer xfer_count = len >> src_width; 1157265567fbSTorsten Fleischer if (xfer_count > ATC_BTSIZE_MAX) { 1158265567fbSTorsten Fleischer xfer_count = ATC_BTSIZE_MAX; 1159265567fbSTorsten Fleischer len = ATC_BTSIZE_MAX << src_width; 1160265567fbSTorsten Fleischer } 1161265567fbSTorsten Fleischer 1162265567fbSTorsten Fleischer /* create the transfer */ 1163265567fbSTorsten Fleischer desc = atc_desc_get(atchan); 1164265567fbSTorsten Fleischer if (!desc) 1165265567fbSTorsten Fleischer goto err_desc_get; 1166265567fbSTorsten Fleischer 1167265567fbSTorsten Fleischer desc->lli.saddr = src; 1168265567fbSTorsten Fleischer desc->lli.daddr = dst; 1169265567fbSTorsten Fleischer desc->lli.ctrla = ctrla | xfer_count; 1170265567fbSTorsten Fleischer desc->lli.ctrlb = ctrlb; 1171265567fbSTorsten Fleischer 1172265567fbSTorsten Fleischer desc->txd.cookie = 0; 1173265567fbSTorsten Fleischer desc->len = len; 1174265567fbSTorsten Fleischer 1175265567fbSTorsten Fleischer /* 1176265567fbSTorsten Fleischer * Although we only need the transfer width for the first and 1177265567fbSTorsten Fleischer * the last descriptor, its easier to set it to all descriptors. 1178265567fbSTorsten Fleischer */ 1179265567fbSTorsten Fleischer desc->tx_width = src_width; 1180265567fbSTorsten Fleischer 1181265567fbSTorsten Fleischer atc_desc_chain(&first, &prev, desc); 1182265567fbSTorsten Fleischer 1183265567fbSTorsten Fleischer /* update the lengths and addresses for the next loop cycle */ 1184265567fbSTorsten Fleischer dst_len -= len; 1185265567fbSTorsten Fleischer src_len -= len; 1186265567fbSTorsten Fleischer dst += len; 1187265567fbSTorsten Fleischer src += len; 1188265567fbSTorsten Fleischer 1189265567fbSTorsten Fleischer total_len += len; 1190265567fbSTorsten Fleischer } 1191265567fbSTorsten Fleischer 1192265567fbSTorsten Fleischer /* First descriptor of the chain embedds additional information */ 1193265567fbSTorsten Fleischer first->txd.cookie = -EBUSY; 1194265567fbSTorsten Fleischer first->total_len = total_len; 1195265567fbSTorsten Fleischer 1196265567fbSTorsten Fleischer /* set end-of-link to the last link descriptor of list*/ 1197265567fbSTorsten Fleischer set_desc_eol(desc); 1198265567fbSTorsten Fleischer 1199265567fbSTorsten Fleischer first->txd.flags = flags; /* client is in control of this ack */ 1200265567fbSTorsten Fleischer 1201265567fbSTorsten Fleischer return &first->txd; 1202265567fbSTorsten Fleischer 1203265567fbSTorsten Fleischer err_desc_get: 1204265567fbSTorsten Fleischer atc_desc_put(atchan, first); 1205265567fbSTorsten Fleischer return NULL; 1206265567fbSTorsten Fleischer } 1207265567fbSTorsten Fleischer 1208265567fbSTorsten Fleischer /** 120953830cc7SNicolas Ferre * atc_dma_cyclic_check_values 121053830cc7SNicolas Ferre * Check for too big/unaligned periods and unaligned DMA buffer 121153830cc7SNicolas Ferre */ 121253830cc7SNicolas Ferre static int 121353830cc7SNicolas Ferre atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr, 12140e7264ccSAndy Shevchenko size_t period_len) 121553830cc7SNicolas Ferre { 121653830cc7SNicolas Ferre if (period_len > (ATC_BTSIZE_MAX << reg_width)) 121753830cc7SNicolas Ferre goto err_out; 121853830cc7SNicolas Ferre if (unlikely(period_len & ((1 << reg_width) - 1))) 121953830cc7SNicolas Ferre goto err_out; 122053830cc7SNicolas Ferre if (unlikely(buf_addr & ((1 << reg_width) - 1))) 122153830cc7SNicolas Ferre goto err_out; 122253830cc7SNicolas Ferre 122353830cc7SNicolas Ferre return 0; 122453830cc7SNicolas Ferre 122553830cc7SNicolas Ferre err_out: 122653830cc7SNicolas Ferre return -EINVAL; 122753830cc7SNicolas Ferre } 122853830cc7SNicolas Ferre 122953830cc7SNicolas Ferre /** 1230d73111c6SMasanari Iida * atc_dma_cyclic_fill_desc - Fill one period descriptor 123153830cc7SNicolas Ferre */ 123253830cc7SNicolas Ferre static int 1233beeaa103SNicolas Ferre atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc, 123453830cc7SNicolas Ferre unsigned int period_index, dma_addr_t buf_addr, 1235beeaa103SNicolas Ferre unsigned int reg_width, size_t period_len, 1236beeaa103SNicolas Ferre enum dma_transfer_direction direction) 123753830cc7SNicolas Ferre { 1238beeaa103SNicolas Ferre struct at_dma_chan *atchan = to_at_dma_chan(chan); 1239beeaa103SNicolas Ferre struct dma_slave_config *sconfig = &atchan->dma_sconfig; 124053830cc7SNicolas Ferre u32 ctrla; 124153830cc7SNicolas Ferre 124253830cc7SNicolas Ferre /* prepare common CRTLA value */ 12431dd1ea8eSNicolas Ferre ctrla = ATC_SCSIZE(sconfig->src_maxburst) 12441dd1ea8eSNicolas Ferre | ATC_DCSIZE(sconfig->dst_maxburst) 124553830cc7SNicolas Ferre | ATC_DST_WIDTH(reg_width) 124653830cc7SNicolas Ferre | ATC_SRC_WIDTH(reg_width) 124753830cc7SNicolas Ferre | period_len >> reg_width; 124853830cc7SNicolas Ferre 124953830cc7SNicolas Ferre switch (direction) { 1250db8196dfSVinod Koul case DMA_MEM_TO_DEV: 125153830cc7SNicolas Ferre desc->lli.saddr = buf_addr + (period_len * period_index); 1252beeaa103SNicolas Ferre desc->lli.daddr = sconfig->dst_addr; 125353830cc7SNicolas Ferre desc->lli.ctrla = ctrla; 1254ae14d4b5SNicolas Ferre desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED 125553830cc7SNicolas Ferre | ATC_SRC_ADDR_MODE_INCR 1256ae14d4b5SNicolas Ferre | ATC_FC_MEM2PER 1257bbe89c8eSLudovic Desroches | ATC_SIF(atchan->mem_if) 1258bbe89c8eSLudovic Desroches | ATC_DIF(atchan->per_if); 1259bdf6c792STorsten Fleischer desc->len = period_len; 126053830cc7SNicolas Ferre break; 126153830cc7SNicolas Ferre 1262db8196dfSVinod Koul case DMA_DEV_TO_MEM: 1263beeaa103SNicolas Ferre desc->lli.saddr = sconfig->src_addr; 126453830cc7SNicolas Ferre desc->lli.daddr = buf_addr + (period_len * period_index); 126553830cc7SNicolas Ferre desc->lli.ctrla = ctrla; 1266ae14d4b5SNicolas Ferre desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR 126753830cc7SNicolas Ferre | ATC_SRC_ADDR_MODE_FIXED 1268ae14d4b5SNicolas Ferre | ATC_FC_PER2MEM 1269bbe89c8eSLudovic Desroches | ATC_SIF(atchan->per_if) 1270bbe89c8eSLudovic Desroches | ATC_DIF(atchan->mem_if); 1271bdf6c792STorsten Fleischer desc->len = period_len; 127253830cc7SNicolas Ferre break; 127353830cc7SNicolas Ferre 127453830cc7SNicolas Ferre default: 127553830cc7SNicolas Ferre return -EINVAL; 127653830cc7SNicolas Ferre } 127753830cc7SNicolas Ferre 127853830cc7SNicolas Ferre return 0; 127953830cc7SNicolas Ferre } 128053830cc7SNicolas Ferre 128153830cc7SNicolas Ferre /** 128253830cc7SNicolas Ferre * atc_prep_dma_cyclic - prepare the cyclic DMA transfer 128353830cc7SNicolas Ferre * @chan: the DMA channel to prepare 128453830cc7SNicolas Ferre * @buf_addr: physical DMA address where the buffer starts 128553830cc7SNicolas Ferre * @buf_len: total number of bytes for the entire buffer 128653830cc7SNicolas Ferre * @period_len: number of bytes for each period 128753830cc7SNicolas Ferre * @direction: transfer direction, to or from device 1288ec8b5e48SPeter Ujfalusi * @flags: tx descriptor status flags 128953830cc7SNicolas Ferre */ 129053830cc7SNicolas Ferre static struct dma_async_tx_descriptor * 129153830cc7SNicolas Ferre atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, 1292185ecb5fSAlexandre Bounine size_t period_len, enum dma_transfer_direction direction, 129331c1e5a1SLaurent Pinchart unsigned long flags) 129453830cc7SNicolas Ferre { 129553830cc7SNicolas Ferre struct at_dma_chan *atchan = to_at_dma_chan(chan); 129653830cc7SNicolas Ferre struct at_dma_slave *atslave = chan->private; 1297beeaa103SNicolas Ferre struct dma_slave_config *sconfig = &atchan->dma_sconfig; 129853830cc7SNicolas Ferre struct at_desc *first = NULL; 129953830cc7SNicolas Ferre struct at_desc *prev = NULL; 130053830cc7SNicolas Ferre unsigned long was_cyclic; 1301beeaa103SNicolas Ferre unsigned int reg_width; 130253830cc7SNicolas Ferre unsigned int periods = buf_len / period_len; 130353830cc7SNicolas Ferre unsigned int i; 130453830cc7SNicolas Ferre 130553830cc7SNicolas Ferre dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n", 1306db8196dfSVinod Koul direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE", 130753830cc7SNicolas Ferre buf_addr, 130853830cc7SNicolas Ferre periods, buf_len, period_len); 130953830cc7SNicolas Ferre 131053830cc7SNicolas Ferre if (unlikely(!atslave || !buf_len || !period_len)) { 131153830cc7SNicolas Ferre dev_dbg(chan2dev(chan), "prep_dma_cyclic: length is zero!\n"); 131253830cc7SNicolas Ferre return NULL; 131353830cc7SNicolas Ferre } 131453830cc7SNicolas Ferre 131553830cc7SNicolas Ferre was_cyclic = test_and_set_bit(ATC_IS_CYCLIC, &atchan->status); 131653830cc7SNicolas Ferre if (was_cyclic) { 131753830cc7SNicolas Ferre dev_dbg(chan2dev(chan), "prep_dma_cyclic: channel in use!\n"); 131853830cc7SNicolas Ferre return NULL; 131953830cc7SNicolas Ferre } 132053830cc7SNicolas Ferre 13210e7264ccSAndy Shevchenko if (unlikely(!is_slave_direction(direction))) 13220e7264ccSAndy Shevchenko goto err_out; 13230e7264ccSAndy Shevchenko 1324beeaa103SNicolas Ferre if (sconfig->direction == DMA_MEM_TO_DEV) 1325beeaa103SNicolas Ferre reg_width = convert_buswidth(sconfig->dst_addr_width); 1326beeaa103SNicolas Ferre else 1327beeaa103SNicolas Ferre reg_width = convert_buswidth(sconfig->src_addr_width); 1328beeaa103SNicolas Ferre 132953830cc7SNicolas Ferre /* Check for too big/unaligned periods and unaligned DMA buffer */ 13300e7264ccSAndy Shevchenko if (atc_dma_cyclic_check_values(reg_width, buf_addr, period_len)) 133153830cc7SNicolas Ferre goto err_out; 133253830cc7SNicolas Ferre 133353830cc7SNicolas Ferre /* build cyclic linked list */ 133453830cc7SNicolas Ferre for (i = 0; i < periods; i++) { 133553830cc7SNicolas Ferre struct at_desc *desc; 133653830cc7SNicolas Ferre 133753830cc7SNicolas Ferre desc = atc_desc_get(atchan); 133853830cc7SNicolas Ferre if (!desc) 133953830cc7SNicolas Ferre goto err_desc_get; 134053830cc7SNicolas Ferre 1341beeaa103SNicolas Ferre if (atc_dma_cyclic_fill_desc(chan, desc, i, buf_addr, 1342beeaa103SNicolas Ferre reg_width, period_len, direction)) 134353830cc7SNicolas Ferre goto err_desc_get; 134453830cc7SNicolas Ferre 134553830cc7SNicolas Ferre atc_desc_chain(&first, &prev, desc); 134653830cc7SNicolas Ferre } 134753830cc7SNicolas Ferre 134853830cc7SNicolas Ferre /* lets make a cyclic list */ 134953830cc7SNicolas Ferre prev->lli.dscr = first->txd.phys; 135053830cc7SNicolas Ferre 135153830cc7SNicolas Ferre /* First descriptor of the chain embedds additional information */ 135253830cc7SNicolas Ferre first->txd.cookie = -EBUSY; 1353bdf6c792STorsten Fleischer first->total_len = buf_len; 1354d088c33bSElen Song first->tx_width = reg_width; 135553830cc7SNicolas Ferre 135653830cc7SNicolas Ferre return &first->txd; 135753830cc7SNicolas Ferre 135853830cc7SNicolas Ferre err_desc_get: 135953830cc7SNicolas Ferre dev_err(chan2dev(chan), "not enough descriptors available\n"); 136053830cc7SNicolas Ferre atc_desc_put(atchan, first); 136153830cc7SNicolas Ferre err_out: 136253830cc7SNicolas Ferre clear_bit(ATC_IS_CYCLIC, &atchan->status); 136353830cc7SNicolas Ferre return NULL; 136453830cc7SNicolas Ferre } 136553830cc7SNicolas Ferre 13664facfe7fSMaxime Ripard static int atc_config(struct dma_chan *chan, 1367beeaa103SNicolas Ferre struct dma_slave_config *sconfig) 1368beeaa103SNicolas Ferre { 1369beeaa103SNicolas Ferre struct at_dma_chan *atchan = to_at_dma_chan(chan); 1370beeaa103SNicolas Ferre 13714facfe7fSMaxime Ripard dev_vdbg(chan2dev(chan), "%s\n", __func__); 13724facfe7fSMaxime Ripard 1373beeaa103SNicolas Ferre /* Check if it is chan is configured for slave transfers */ 1374beeaa103SNicolas Ferre if (!chan->private) 1375beeaa103SNicolas Ferre return -EINVAL; 1376beeaa103SNicolas Ferre 1377beeaa103SNicolas Ferre memcpy(&atchan->dma_sconfig, sconfig, sizeof(*sconfig)); 1378beeaa103SNicolas Ferre 1379beeaa103SNicolas Ferre convert_burst(&atchan->dma_sconfig.src_maxburst); 1380beeaa103SNicolas Ferre convert_burst(&atchan->dma_sconfig.dst_maxburst); 1381beeaa103SNicolas Ferre 1382beeaa103SNicolas Ferre return 0; 1383beeaa103SNicolas Ferre } 1384beeaa103SNicolas Ferre 13854facfe7fSMaxime Ripard static int atc_pause(struct dma_chan *chan) 1386808347f6SNicolas Ferre { 1387808347f6SNicolas Ferre struct at_dma_chan *atchan = to_at_dma_chan(chan); 1388808347f6SNicolas Ferre struct at_dma *atdma = to_at_dma(chan->device); 138923b5e3adSNicolas Ferre int chan_id = atchan->chan_common.chan_id; 1390d8cb04b0SNicolas Ferre unsigned long flags; 139123b5e3adSNicolas Ferre 1392808347f6SNicolas Ferre LIST_HEAD(list); 1393808347f6SNicolas Ferre 13944facfe7fSMaxime Ripard dev_vdbg(chan2dev(chan), "%s\n", __func__); 1395c3635c78SLinus Walleij 1396d8cb04b0SNicolas Ferre spin_lock_irqsave(&atchan->lock, flags); 139723b5e3adSNicolas Ferre 139823b5e3adSNicolas Ferre dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id)); 139923b5e3adSNicolas Ferre set_bit(ATC_IS_PAUSED, &atchan->status); 140023b5e3adSNicolas Ferre 1401d8cb04b0SNicolas Ferre spin_unlock_irqrestore(&atchan->lock, flags); 14024facfe7fSMaxime Ripard 14034facfe7fSMaxime Ripard return 0; 14044facfe7fSMaxime Ripard } 14054facfe7fSMaxime Ripard 14064facfe7fSMaxime Ripard static int atc_resume(struct dma_chan *chan) 14074facfe7fSMaxime Ripard { 14084facfe7fSMaxime Ripard struct at_dma_chan *atchan = to_at_dma_chan(chan); 14094facfe7fSMaxime Ripard struct at_dma *atdma = to_at_dma(chan->device); 14104facfe7fSMaxime Ripard int chan_id = atchan->chan_common.chan_id; 14114facfe7fSMaxime Ripard unsigned long flags; 14124facfe7fSMaxime Ripard 14134facfe7fSMaxime Ripard LIST_HEAD(list); 14144facfe7fSMaxime Ripard 14154facfe7fSMaxime Ripard dev_vdbg(chan2dev(chan), "%s\n", __func__); 14164facfe7fSMaxime Ripard 14173c477482SNicolas Ferre if (!atc_chan_is_paused(atchan)) 141823b5e3adSNicolas Ferre return 0; 141923b5e3adSNicolas Ferre 1420d8cb04b0SNicolas Ferre spin_lock_irqsave(&atchan->lock, flags); 142123b5e3adSNicolas Ferre 142223b5e3adSNicolas Ferre dma_writel(atdma, CHDR, AT_DMA_RES(chan_id)); 142323b5e3adSNicolas Ferre clear_bit(ATC_IS_PAUSED, &atchan->status); 142423b5e3adSNicolas Ferre 1425d8cb04b0SNicolas Ferre spin_unlock_irqrestore(&atchan->lock, flags); 14264facfe7fSMaxime Ripard 14274facfe7fSMaxime Ripard return 0; 14284facfe7fSMaxime Ripard } 14294facfe7fSMaxime Ripard 14304facfe7fSMaxime Ripard static int atc_terminate_all(struct dma_chan *chan) 14314facfe7fSMaxime Ripard { 14324facfe7fSMaxime Ripard struct at_dma_chan *atchan = to_at_dma_chan(chan); 14334facfe7fSMaxime Ripard struct at_dma *atdma = to_at_dma(chan->device); 14344facfe7fSMaxime Ripard int chan_id = atchan->chan_common.chan_id; 143523b5e3adSNicolas Ferre struct at_desc *desc, *_desc; 14364facfe7fSMaxime Ripard unsigned long flags; 14374facfe7fSMaxime Ripard 14384facfe7fSMaxime Ripard LIST_HEAD(list); 14394facfe7fSMaxime Ripard 14404facfe7fSMaxime Ripard dev_vdbg(chan2dev(chan), "%s\n", __func__); 14414facfe7fSMaxime Ripard 1442808347f6SNicolas Ferre /* 1443808347f6SNicolas Ferre * This is only called when something went wrong elsewhere, so 1444808347f6SNicolas Ferre * we don't really care about the data. Just disable the 1445808347f6SNicolas Ferre * channel. We still have to poll the channel enable bit due 1446808347f6SNicolas Ferre * to AHB/HSB limitations. 1447808347f6SNicolas Ferre */ 1448d8cb04b0SNicolas Ferre spin_lock_irqsave(&atchan->lock, flags); 1449808347f6SNicolas Ferre 145023b5e3adSNicolas Ferre /* disabling channel: must also remove suspend state */ 145123b5e3adSNicolas Ferre dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask); 1452808347f6SNicolas Ferre 1453808347f6SNicolas Ferre /* confirm that this channel is disabled */ 1454808347f6SNicolas Ferre while (dma_readl(atdma, CHSR) & atchan->mask) 1455808347f6SNicolas Ferre cpu_relax(); 1456808347f6SNicolas Ferre 1457808347f6SNicolas Ferre /* active_list entries will end up before queued entries */ 1458808347f6SNicolas Ferre list_splice_init(&atchan->queue, &list); 1459808347f6SNicolas Ferre list_splice_init(&atchan->active_list, &list); 1460808347f6SNicolas Ferre 1461808347f6SNicolas Ferre /* Flush all pending and queued descriptors */ 1462808347f6SNicolas Ferre list_for_each_entry_safe(desc, _desc, &list, desc_node) 1463808347f6SNicolas Ferre atc_chain_complete(atchan, desc); 1464c3635c78SLinus Walleij 146523b5e3adSNicolas Ferre clear_bit(ATC_IS_PAUSED, &atchan->status); 146653830cc7SNicolas Ferre /* if channel dedicated to cyclic operations, free it */ 146753830cc7SNicolas Ferre clear_bit(ATC_IS_CYCLIC, &atchan->status); 146853830cc7SNicolas Ferre 1469d8cb04b0SNicolas Ferre spin_unlock_irqrestore(&atchan->lock, flags); 1470b0ebeb9cSYong Wang 1471c3635c78SLinus Walleij return 0; 1472808347f6SNicolas Ferre } 1473808347f6SNicolas Ferre 1474dc78baa2SNicolas Ferre /** 147507934481SLinus Walleij * atc_tx_status - poll for transaction completion 1476dc78baa2SNicolas Ferre * @chan: DMA channel 1477dc78baa2SNicolas Ferre * @cookie: transaction identifier to check status of 147807934481SLinus Walleij * @txstate: if not %NULL updated with transaction state 1479dc78baa2SNicolas Ferre * 148007934481SLinus Walleij * If @txstate is passed in, upon return it reflect the driver 1481dc78baa2SNicolas Ferre * internal state and can be used with dma_async_is_complete() to check 1482dc78baa2SNicolas Ferre * the status of multiple cookies without re-checking hardware state. 1483dc78baa2SNicolas Ferre */ 1484dc78baa2SNicolas Ferre static enum dma_status 148507934481SLinus Walleij atc_tx_status(struct dma_chan *chan, 1486dc78baa2SNicolas Ferre dma_cookie_t cookie, 148707934481SLinus Walleij struct dma_tx_state *txstate) 1488dc78baa2SNicolas Ferre { 1489dc78baa2SNicolas Ferre struct at_dma_chan *atchan = to_at_dma_chan(chan); 1490d8cb04b0SNicolas Ferre unsigned long flags; 1491dc78baa2SNicolas Ferre enum dma_status ret; 1492d48de6f1SElen Song int bytes = 0; 1493d48de6f1SElen Song 1494d48de6f1SElen Song ret = dma_cookie_status(chan, cookie, txstate); 14956d203d1eSVinod Koul if (ret == DMA_COMPLETE) 1496d48de6f1SElen Song return ret; 1497d48de6f1SElen Song /* 1498d48de6f1SElen Song * There's no point calculating the residue if there's 1499d48de6f1SElen Song * no txstate to store the value. 1500d48de6f1SElen Song */ 1501d48de6f1SElen Song if (!txstate) 1502d48de6f1SElen Song return DMA_ERROR; 1503dc78baa2SNicolas Ferre 1504d8cb04b0SNicolas Ferre spin_lock_irqsave(&atchan->lock, flags); 1505dc78baa2SNicolas Ferre 1506d48de6f1SElen Song /* Get number of bytes left in the active transactions */ 1507bdf6c792STorsten Fleischer bytes = atc_get_bytes_left(chan, cookie); 1508dc78baa2SNicolas Ferre 1509d8cb04b0SNicolas Ferre spin_unlock_irqrestore(&atchan->lock, flags); 1510dc78baa2SNicolas Ferre 1511d48de6f1SElen Song if (unlikely(bytes < 0)) { 1512d48de6f1SElen Song dev_vdbg(chan2dev(chan), "get residual bytes error\n"); 1513d48de6f1SElen Song return DMA_ERROR; 1514c3dbc60cSNicolas Ferre } else { 1515d48de6f1SElen Song dma_set_residue(txstate, bytes); 1516c3dbc60cSNicolas Ferre } 1517543aabc7SNicolas Ferre 1518d48de6f1SElen Song dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d residue = %d\n", 1519d48de6f1SElen Song ret, cookie, bytes); 1520dc78baa2SNicolas Ferre 1521dc78baa2SNicolas Ferre return ret; 1522dc78baa2SNicolas Ferre } 1523dc78baa2SNicolas Ferre 1524dc78baa2SNicolas Ferre /** 1525dc78baa2SNicolas Ferre * atc_issue_pending - try to finish work 1526dc78baa2SNicolas Ferre * @chan: target DMA channel 1527dc78baa2SNicolas Ferre */ 1528dc78baa2SNicolas Ferre static void atc_issue_pending(struct dma_chan *chan) 1529dc78baa2SNicolas Ferre { 1530dc78baa2SNicolas Ferre struct at_dma_chan *atchan = to_at_dma_chan(chan); 1531d8cb04b0SNicolas Ferre unsigned long flags; 1532dc78baa2SNicolas Ferre 1533dc78baa2SNicolas Ferre dev_vdbg(chan2dev(chan), "issue_pending\n"); 1534dc78baa2SNicolas Ferre 153553830cc7SNicolas Ferre /* Not needed for cyclic transfers */ 15363c477482SNicolas Ferre if (atc_chan_is_cyclic(atchan)) 153753830cc7SNicolas Ferre return; 153853830cc7SNicolas Ferre 1539d8cb04b0SNicolas Ferre spin_lock_irqsave(&atchan->lock, flags); 1540dc78baa2SNicolas Ferre atc_advance_work(atchan); 1541d8cb04b0SNicolas Ferre spin_unlock_irqrestore(&atchan->lock, flags); 1542dc78baa2SNicolas Ferre } 1543dc78baa2SNicolas Ferre 1544dc78baa2SNicolas Ferre /** 1545dc78baa2SNicolas Ferre * atc_alloc_chan_resources - allocate resources for DMA channel 1546dc78baa2SNicolas Ferre * @chan: allocate descriptor resources for this channel 1547dc78baa2SNicolas Ferre * @client: current client requesting the channel be ready for requests 1548dc78baa2SNicolas Ferre * 1549dc78baa2SNicolas Ferre * return - the number of allocated descriptors 1550dc78baa2SNicolas Ferre */ 1551dc78baa2SNicolas Ferre static int atc_alloc_chan_resources(struct dma_chan *chan) 1552dc78baa2SNicolas Ferre { 1553dc78baa2SNicolas Ferre struct at_dma_chan *atchan = to_at_dma_chan(chan); 1554dc78baa2SNicolas Ferre struct at_dma *atdma = to_at_dma(chan->device); 1555dc78baa2SNicolas Ferre struct at_desc *desc; 1556808347f6SNicolas Ferre struct at_dma_slave *atslave; 1557d8cb04b0SNicolas Ferre unsigned long flags; 1558dc78baa2SNicolas Ferre int i; 1559808347f6SNicolas Ferre u32 cfg; 1560dc78baa2SNicolas Ferre LIST_HEAD(tmp_list); 1561dc78baa2SNicolas Ferre 1562dc78baa2SNicolas Ferre dev_vdbg(chan2dev(chan), "alloc_chan_resources\n"); 1563dc78baa2SNicolas Ferre 1564dc78baa2SNicolas Ferre /* ASSERT: channel is idle */ 1565dc78baa2SNicolas Ferre if (atc_chan_is_enabled(atchan)) { 1566dc78baa2SNicolas Ferre dev_dbg(chan2dev(chan), "DMA channel not idle ?\n"); 1567dc78baa2SNicolas Ferre return -EIO; 1568dc78baa2SNicolas Ferre } 1569dc78baa2SNicolas Ferre 1570808347f6SNicolas Ferre cfg = ATC_DEFAULT_CFG; 1571808347f6SNicolas Ferre 1572808347f6SNicolas Ferre atslave = chan->private; 1573808347f6SNicolas Ferre if (atslave) { 1574808347f6SNicolas Ferre /* 1575808347f6SNicolas Ferre * We need controller-specific data to set up slave 1576808347f6SNicolas Ferre * transfers. 1577808347f6SNicolas Ferre */ 1578808347f6SNicolas Ferre BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev); 1579808347f6SNicolas Ferre 1580ea7e7906SNicolas Ferre /* if cfg configuration specified take it instead of default */ 1581808347f6SNicolas Ferre if (atslave->cfg) 1582808347f6SNicolas Ferre cfg = atslave->cfg; 1583808347f6SNicolas Ferre } 1584808347f6SNicolas Ferre 1585808347f6SNicolas Ferre /* have we already been set up? 1586808347f6SNicolas Ferre * reconfigure channel but no need to reallocate descriptors */ 1587dc78baa2SNicolas Ferre if (!list_empty(&atchan->free_list)) 1588dc78baa2SNicolas Ferre return atchan->descs_allocated; 1589dc78baa2SNicolas Ferre 1590dc78baa2SNicolas Ferre /* Allocate initial pool of descriptors */ 1591dc78baa2SNicolas Ferre for (i = 0; i < init_nr_desc_per_channel; i++) { 1592dc78baa2SNicolas Ferre desc = atc_alloc_descriptor(chan, GFP_KERNEL); 1593dc78baa2SNicolas Ferre if (!desc) { 1594dc78baa2SNicolas Ferre dev_err(atdma->dma_common.dev, 1595dc78baa2SNicolas Ferre "Only %d initial descriptors\n", i); 1596dc78baa2SNicolas Ferre break; 1597dc78baa2SNicolas Ferre } 1598dc78baa2SNicolas Ferre list_add_tail(&desc->desc_node, &tmp_list); 1599dc78baa2SNicolas Ferre } 1600dc78baa2SNicolas Ferre 1601d8cb04b0SNicolas Ferre spin_lock_irqsave(&atchan->lock, flags); 1602dc78baa2SNicolas Ferre atchan->descs_allocated = i; 1603dc78baa2SNicolas Ferre list_splice(&tmp_list, &atchan->free_list); 1604d3ee98cdSRussell King - ARM Linux dma_cookie_init(chan); 1605d8cb04b0SNicolas Ferre spin_unlock_irqrestore(&atchan->lock, flags); 1606dc78baa2SNicolas Ferre 1607dc78baa2SNicolas Ferre /* channel parameters */ 1608808347f6SNicolas Ferre channel_writel(atchan, CFG, cfg); 1609dc78baa2SNicolas Ferre 1610dc78baa2SNicolas Ferre dev_dbg(chan2dev(chan), 1611dc78baa2SNicolas Ferre "alloc_chan_resources: allocated %d descriptors\n", 1612dc78baa2SNicolas Ferre atchan->descs_allocated); 1613dc78baa2SNicolas Ferre 1614dc78baa2SNicolas Ferre return atchan->descs_allocated; 1615dc78baa2SNicolas Ferre } 1616dc78baa2SNicolas Ferre 1617dc78baa2SNicolas Ferre /** 1618dc78baa2SNicolas Ferre * atc_free_chan_resources - free all channel resources 1619dc78baa2SNicolas Ferre * @chan: DMA channel 1620dc78baa2SNicolas Ferre */ 1621dc78baa2SNicolas Ferre static void atc_free_chan_resources(struct dma_chan *chan) 1622dc78baa2SNicolas Ferre { 1623dc78baa2SNicolas Ferre struct at_dma_chan *atchan = to_at_dma_chan(chan); 1624dc78baa2SNicolas Ferre struct at_dma *atdma = to_at_dma(chan->device); 1625dc78baa2SNicolas Ferre struct at_desc *desc, *_desc; 1626dc78baa2SNicolas Ferre LIST_HEAD(list); 1627dc78baa2SNicolas Ferre 1628dc78baa2SNicolas Ferre dev_dbg(chan2dev(chan), "free_chan_resources: (descs allocated=%u)\n", 1629dc78baa2SNicolas Ferre atchan->descs_allocated); 1630dc78baa2SNicolas Ferre 1631dc78baa2SNicolas Ferre /* ASSERT: channel is idle */ 1632dc78baa2SNicolas Ferre BUG_ON(!list_empty(&atchan->active_list)); 1633dc78baa2SNicolas Ferre BUG_ON(!list_empty(&atchan->queue)); 1634dc78baa2SNicolas Ferre BUG_ON(atc_chan_is_enabled(atchan)); 1635dc78baa2SNicolas Ferre 1636dc78baa2SNicolas Ferre list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) { 1637dc78baa2SNicolas Ferre dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); 1638dc78baa2SNicolas Ferre list_del(&desc->desc_node); 1639dc78baa2SNicolas Ferre /* free link descriptor */ 1640dc78baa2SNicolas Ferre dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys); 1641dc78baa2SNicolas Ferre } 1642dc78baa2SNicolas Ferre list_splice_init(&atchan->free_list, &list); 1643dc78baa2SNicolas Ferre atchan->descs_allocated = 0; 164453830cc7SNicolas Ferre atchan->status = 0; 1645dc78baa2SNicolas Ferre 1646dc78baa2SNicolas Ferre dev_vdbg(chan2dev(chan), "free_chan_resources: done\n"); 1647dc78baa2SNicolas Ferre } 1648dc78baa2SNicolas Ferre 1649bbe89c8eSLudovic Desroches #ifdef CONFIG_OF 1650bbe89c8eSLudovic Desroches static bool at_dma_filter(struct dma_chan *chan, void *slave) 1651bbe89c8eSLudovic Desroches { 1652bbe89c8eSLudovic Desroches struct at_dma_slave *atslave = slave; 1653bbe89c8eSLudovic Desroches 1654bbe89c8eSLudovic Desroches if (atslave->dma_dev == chan->device->dev) { 1655bbe89c8eSLudovic Desroches chan->private = atslave; 1656bbe89c8eSLudovic Desroches return true; 1657bbe89c8eSLudovic Desroches } else { 1658bbe89c8eSLudovic Desroches return false; 1659bbe89c8eSLudovic Desroches } 1660bbe89c8eSLudovic Desroches } 1661bbe89c8eSLudovic Desroches 1662bbe89c8eSLudovic Desroches static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec, 1663bbe89c8eSLudovic Desroches struct of_dma *of_dma) 1664bbe89c8eSLudovic Desroches { 1665bbe89c8eSLudovic Desroches struct dma_chan *chan; 1666bbe89c8eSLudovic Desroches struct at_dma_chan *atchan; 1667bbe89c8eSLudovic Desroches struct at_dma_slave *atslave; 1668bbe89c8eSLudovic Desroches dma_cap_mask_t mask; 1669bbe89c8eSLudovic Desroches unsigned int per_id; 1670bbe89c8eSLudovic Desroches struct platform_device *dmac_pdev; 1671bbe89c8eSLudovic Desroches 1672bbe89c8eSLudovic Desroches if (dma_spec->args_count != 2) 1673bbe89c8eSLudovic Desroches return NULL; 1674bbe89c8eSLudovic Desroches 1675bbe89c8eSLudovic Desroches dmac_pdev = of_find_device_by_node(dma_spec->np); 1676bbe89c8eSLudovic Desroches 1677bbe89c8eSLudovic Desroches dma_cap_zero(mask); 1678bbe89c8eSLudovic Desroches dma_cap_set(DMA_SLAVE, mask); 1679bbe89c8eSLudovic Desroches 1680bbe89c8eSLudovic Desroches atslave = devm_kzalloc(&dmac_pdev->dev, sizeof(*atslave), GFP_KERNEL); 1681bbe89c8eSLudovic Desroches if (!atslave) 1682bbe89c8eSLudovic Desroches return NULL; 168362971b29SLudovic Desroches 168462971b29SLudovic Desroches atslave->cfg = ATC_DST_H2SEL_HW | ATC_SRC_H2SEL_HW; 1685bbe89c8eSLudovic Desroches /* 1686bbe89c8eSLudovic Desroches * We can fill both SRC_PER and DST_PER, one of these fields will be 1687bbe89c8eSLudovic Desroches * ignored depending on DMA transfer direction. 1688bbe89c8eSLudovic Desroches */ 168962971b29SLudovic Desroches per_id = dma_spec->args[1] & AT91_DMA_CFG_PER_ID_MASK; 169062971b29SLudovic Desroches atslave->cfg |= ATC_DST_PER_MSB(per_id) | ATC_DST_PER(per_id) 16916c22770fSNicolas Ferre | ATC_SRC_PER_MSB(per_id) | ATC_SRC_PER(per_id); 169262971b29SLudovic Desroches /* 169362971b29SLudovic Desroches * We have to translate the value we get from the device tree since 169462971b29SLudovic Desroches * the half FIFO configuration value had to be 0 to keep backward 169562971b29SLudovic Desroches * compatibility. 169662971b29SLudovic Desroches */ 169762971b29SLudovic Desroches switch (dma_spec->args[1] & AT91_DMA_CFG_FIFOCFG_MASK) { 169862971b29SLudovic Desroches case AT91_DMA_CFG_FIFOCFG_ALAP: 169962971b29SLudovic Desroches atslave->cfg |= ATC_FIFOCFG_LARGESTBURST; 170062971b29SLudovic Desroches break; 170162971b29SLudovic Desroches case AT91_DMA_CFG_FIFOCFG_ASAP: 170262971b29SLudovic Desroches atslave->cfg |= ATC_FIFOCFG_ENOUGHSPACE; 170362971b29SLudovic Desroches break; 170462971b29SLudovic Desroches case AT91_DMA_CFG_FIFOCFG_HALF: 170562971b29SLudovic Desroches default: 170662971b29SLudovic Desroches atslave->cfg |= ATC_FIFOCFG_HALFFIFO; 170762971b29SLudovic Desroches } 1708bbe89c8eSLudovic Desroches atslave->dma_dev = &dmac_pdev->dev; 1709bbe89c8eSLudovic Desroches 1710bbe89c8eSLudovic Desroches chan = dma_request_channel(mask, at_dma_filter, atslave); 1711bbe89c8eSLudovic Desroches if (!chan) 1712bbe89c8eSLudovic Desroches return NULL; 1713bbe89c8eSLudovic Desroches 1714bbe89c8eSLudovic Desroches atchan = to_at_dma_chan(chan); 1715bbe89c8eSLudovic Desroches atchan->per_if = dma_spec->args[0] & 0xff; 1716bbe89c8eSLudovic Desroches atchan->mem_if = (dma_spec->args[0] >> 16) & 0xff; 1717bbe89c8eSLudovic Desroches 1718bbe89c8eSLudovic Desroches return chan; 1719bbe89c8eSLudovic Desroches } 1720bbe89c8eSLudovic Desroches #else 1721bbe89c8eSLudovic Desroches static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec, 1722bbe89c8eSLudovic Desroches struct of_dma *of_dma) 1723bbe89c8eSLudovic Desroches { 1724bbe89c8eSLudovic Desroches return NULL; 1725bbe89c8eSLudovic Desroches } 1726bbe89c8eSLudovic Desroches #endif 1727dc78baa2SNicolas Ferre 1728dc78baa2SNicolas Ferre /*-- Module Management -----------------------------------------------*/ 1729dc78baa2SNicolas Ferre 173002f88be9SNicolas Ferre /* cap_mask is a multi-u32 bitfield, fill it with proper C code. */ 173102f88be9SNicolas Ferre static struct at_dma_platform_data at91sam9rl_config = { 173202f88be9SNicolas Ferre .nr_channels = 2, 173302f88be9SNicolas Ferre }; 173402f88be9SNicolas Ferre static struct at_dma_platform_data at91sam9g45_config = { 173502f88be9SNicolas Ferre .nr_channels = 8, 173602f88be9SNicolas Ferre }; 173702f88be9SNicolas Ferre 1738c5115953SNicolas Ferre #if defined(CONFIG_OF) 1739c5115953SNicolas Ferre static const struct of_device_id atmel_dma_dt_ids[] = { 1740c5115953SNicolas Ferre { 1741c5115953SNicolas Ferre .compatible = "atmel,at91sam9rl-dma", 174202f88be9SNicolas Ferre .data = &at91sam9rl_config, 1743c5115953SNicolas Ferre }, { 1744c5115953SNicolas Ferre .compatible = "atmel,at91sam9g45-dma", 174502f88be9SNicolas Ferre .data = &at91sam9g45_config, 1746dcc81734SNicolas Ferre }, { 1747dcc81734SNicolas Ferre /* sentinel */ 1748dcc81734SNicolas Ferre } 1749c5115953SNicolas Ferre }; 1750c5115953SNicolas Ferre 1751c5115953SNicolas Ferre MODULE_DEVICE_TABLE(of, atmel_dma_dt_ids); 1752c5115953SNicolas Ferre #endif 1753c5115953SNicolas Ferre 17540ab88a01SNicolas Ferre static const struct platform_device_id atdma_devtypes[] = { 175567348450SNicolas Ferre { 175667348450SNicolas Ferre .name = "at91sam9rl_dma", 175702f88be9SNicolas Ferre .driver_data = (unsigned long) &at91sam9rl_config, 175867348450SNicolas Ferre }, { 175967348450SNicolas Ferre .name = "at91sam9g45_dma", 176002f88be9SNicolas Ferre .driver_data = (unsigned long) &at91sam9g45_config, 176167348450SNicolas Ferre }, { 176267348450SNicolas Ferre /* sentinel */ 176367348450SNicolas Ferre } 176467348450SNicolas Ferre }; 176567348450SNicolas Ferre 17667fd63ccdSUwe Kleine-König static inline const struct at_dma_platform_data * __init at_dma_get_driver_data( 1767c5115953SNicolas Ferre struct platform_device *pdev) 1768c5115953SNicolas Ferre { 1769c5115953SNicolas Ferre if (pdev->dev.of_node) { 1770c5115953SNicolas Ferre const struct of_device_id *match; 1771c5115953SNicolas Ferre match = of_match_node(atmel_dma_dt_ids, pdev->dev.of_node); 1772c5115953SNicolas Ferre if (match == NULL) 177302f88be9SNicolas Ferre return NULL; 177402f88be9SNicolas Ferre return match->data; 1775c5115953SNicolas Ferre } 177602f88be9SNicolas Ferre return (struct at_dma_platform_data *) 177702f88be9SNicolas Ferre platform_get_device_id(pdev)->driver_data; 1778c5115953SNicolas Ferre } 1779c5115953SNicolas Ferre 1780dc78baa2SNicolas Ferre /** 1781dc78baa2SNicolas Ferre * at_dma_off - disable DMA controller 1782dc78baa2SNicolas Ferre * @atdma: the Atmel HDAMC device 1783dc78baa2SNicolas Ferre */ 1784dc78baa2SNicolas Ferre static void at_dma_off(struct at_dma *atdma) 1785dc78baa2SNicolas Ferre { 1786dc78baa2SNicolas Ferre dma_writel(atdma, EN, 0); 1787dc78baa2SNicolas Ferre 1788dc78baa2SNicolas Ferre /* disable all interrupts */ 1789dc78baa2SNicolas Ferre dma_writel(atdma, EBCIDR, -1L); 1790dc78baa2SNicolas Ferre 1791dc78baa2SNicolas Ferre /* confirm that all channels are disabled */ 1792dc78baa2SNicolas Ferre while (dma_readl(atdma, CHSR) & atdma->all_chan_mask) 1793dc78baa2SNicolas Ferre cpu_relax(); 1794dc78baa2SNicolas Ferre } 1795dc78baa2SNicolas Ferre 1796dc78baa2SNicolas Ferre static int __init at_dma_probe(struct platform_device *pdev) 1797dc78baa2SNicolas Ferre { 1798dc78baa2SNicolas Ferre struct resource *io; 1799dc78baa2SNicolas Ferre struct at_dma *atdma; 1800dc78baa2SNicolas Ferre size_t size; 1801dc78baa2SNicolas Ferre int irq; 1802dc78baa2SNicolas Ferre int err; 1803dc78baa2SNicolas Ferre int i; 18047fd63ccdSUwe Kleine-König const struct at_dma_platform_data *plat_dat; 1805dc78baa2SNicolas Ferre 180602f88be9SNicolas Ferre /* setup platform data for each SoC */ 180702f88be9SNicolas Ferre dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask); 1808265567fbSTorsten Fleischer dma_cap_set(DMA_SG, at91sam9rl_config.cap_mask); 18095abecfa5SMaxime Ripard dma_cap_set(DMA_INTERLEAVE, at91sam9g45_config.cap_mask); 181002f88be9SNicolas Ferre dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask); 18114d112426SMaxime Ripard dma_cap_set(DMA_MEMSET, at91sam9g45_config.cap_mask); 18124d112426SMaxime Ripard dma_cap_set(DMA_PRIVATE, at91sam9g45_config.cap_mask); 181302f88be9SNicolas Ferre dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask); 1814265567fbSTorsten Fleischer dma_cap_set(DMA_SG, at91sam9g45_config.cap_mask); 181567348450SNicolas Ferre 181667348450SNicolas Ferre /* get DMA parameters from controller type */ 181702f88be9SNicolas Ferre plat_dat = at_dma_get_driver_data(pdev); 181802f88be9SNicolas Ferre if (!plat_dat) 181902f88be9SNicolas Ferre return -ENODEV; 1820dc78baa2SNicolas Ferre 1821dc78baa2SNicolas Ferre io = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1822dc78baa2SNicolas Ferre if (!io) 1823dc78baa2SNicolas Ferre return -EINVAL; 1824dc78baa2SNicolas Ferre 1825dc78baa2SNicolas Ferre irq = platform_get_irq(pdev, 0); 1826dc78baa2SNicolas Ferre if (irq < 0) 1827dc78baa2SNicolas Ferre return irq; 1828dc78baa2SNicolas Ferre 1829dc78baa2SNicolas Ferre size = sizeof(struct at_dma); 183002f88be9SNicolas Ferre size += plat_dat->nr_channels * sizeof(struct at_dma_chan); 1831dc78baa2SNicolas Ferre atdma = kzalloc(size, GFP_KERNEL); 1832dc78baa2SNicolas Ferre if (!atdma) 1833dc78baa2SNicolas Ferre return -ENOMEM; 1834dc78baa2SNicolas Ferre 183567348450SNicolas Ferre /* discover transaction capabilities */ 183602f88be9SNicolas Ferre atdma->dma_common.cap_mask = plat_dat->cap_mask; 183702f88be9SNicolas Ferre atdma->all_chan_mask = (1 << plat_dat->nr_channels) - 1; 1838dc78baa2SNicolas Ferre 1839114df7d6SH Hartley Sweeten size = resource_size(io); 1840dc78baa2SNicolas Ferre if (!request_mem_region(io->start, size, pdev->dev.driver->name)) { 1841dc78baa2SNicolas Ferre err = -EBUSY; 1842dc78baa2SNicolas Ferre goto err_kfree; 1843dc78baa2SNicolas Ferre } 1844dc78baa2SNicolas Ferre 1845dc78baa2SNicolas Ferre atdma->regs = ioremap(io->start, size); 1846dc78baa2SNicolas Ferre if (!atdma->regs) { 1847dc78baa2SNicolas Ferre err = -ENOMEM; 1848dc78baa2SNicolas Ferre goto err_release_r; 1849dc78baa2SNicolas Ferre } 1850dc78baa2SNicolas Ferre 1851dc78baa2SNicolas Ferre atdma->clk = clk_get(&pdev->dev, "dma_clk"); 1852dc78baa2SNicolas Ferre if (IS_ERR(atdma->clk)) { 1853dc78baa2SNicolas Ferre err = PTR_ERR(atdma->clk); 1854dc78baa2SNicolas Ferre goto err_clk; 1855dc78baa2SNicolas Ferre } 1856f784d9c9SBoris BREZILLON err = clk_prepare_enable(atdma->clk); 1857f784d9c9SBoris BREZILLON if (err) 1858f784d9c9SBoris BREZILLON goto err_clk_prepare; 1859dc78baa2SNicolas Ferre 1860dc78baa2SNicolas Ferre /* force dma off, just in case */ 1861dc78baa2SNicolas Ferre at_dma_off(atdma); 1862dc78baa2SNicolas Ferre 1863dc78baa2SNicolas Ferre err = request_irq(irq, at_dma_interrupt, 0, "at_hdmac", atdma); 1864dc78baa2SNicolas Ferre if (err) 1865dc78baa2SNicolas Ferre goto err_irq; 1866dc78baa2SNicolas Ferre 1867dc78baa2SNicolas Ferre platform_set_drvdata(pdev, atdma); 1868dc78baa2SNicolas Ferre 1869dc78baa2SNicolas Ferre /* create a pool of consistent memory blocks for hardware descriptors */ 1870dc78baa2SNicolas Ferre atdma->dma_desc_pool = dma_pool_create("at_hdmac_desc_pool", 1871dc78baa2SNicolas Ferre &pdev->dev, sizeof(struct at_desc), 1872dc78baa2SNicolas Ferre 4 /* word alignment */, 0); 1873dc78baa2SNicolas Ferre if (!atdma->dma_desc_pool) { 1874dc78baa2SNicolas Ferre dev_err(&pdev->dev, "No memory for descriptors dma pool\n"); 1875dc78baa2SNicolas Ferre err = -ENOMEM; 18764d112426SMaxime Ripard goto err_desc_pool_create; 18774d112426SMaxime Ripard } 18784d112426SMaxime Ripard 18794d112426SMaxime Ripard /* create a pool of consistent memory blocks for memset blocks */ 18804d112426SMaxime Ripard atdma->memset_pool = dma_pool_create("at_hdmac_memset_pool", 18814d112426SMaxime Ripard &pdev->dev, sizeof(int), 4, 0); 18824d112426SMaxime Ripard if (!atdma->memset_pool) { 18834d112426SMaxime Ripard dev_err(&pdev->dev, "No memory for memset dma pool\n"); 18844d112426SMaxime Ripard err = -ENOMEM; 18854d112426SMaxime Ripard goto err_memset_pool_create; 1886dc78baa2SNicolas Ferre } 1887dc78baa2SNicolas Ferre 1888dc78baa2SNicolas Ferre /* clear any pending interrupt */ 1889dc78baa2SNicolas Ferre while (dma_readl(atdma, EBCISR)) 1890dc78baa2SNicolas Ferre cpu_relax(); 1891dc78baa2SNicolas Ferre 1892dc78baa2SNicolas Ferre /* initialize channels related values */ 1893dc78baa2SNicolas Ferre INIT_LIST_HEAD(&atdma->dma_common.channels); 189402f88be9SNicolas Ferre for (i = 0; i < plat_dat->nr_channels; i++) { 1895dc78baa2SNicolas Ferre struct at_dma_chan *atchan = &atdma->chan[i]; 1896dc78baa2SNicolas Ferre 1897bbe89c8eSLudovic Desroches atchan->mem_if = AT_DMA_MEM_IF; 1898bbe89c8eSLudovic Desroches atchan->per_if = AT_DMA_PER_IF; 1899dc78baa2SNicolas Ferre atchan->chan_common.device = &atdma->dma_common; 1900d3ee98cdSRussell King - ARM Linux dma_cookie_init(&atchan->chan_common); 1901dc78baa2SNicolas Ferre list_add_tail(&atchan->chan_common.device_node, 1902dc78baa2SNicolas Ferre &atdma->dma_common.channels); 1903dc78baa2SNicolas Ferre 1904dc78baa2SNicolas Ferre atchan->ch_regs = atdma->regs + ch_regs(i); 1905dc78baa2SNicolas Ferre spin_lock_init(&atchan->lock); 1906dc78baa2SNicolas Ferre atchan->mask = 1 << i; 1907dc78baa2SNicolas Ferre 1908dc78baa2SNicolas Ferre INIT_LIST_HEAD(&atchan->active_list); 1909dc78baa2SNicolas Ferre INIT_LIST_HEAD(&atchan->queue); 1910dc78baa2SNicolas Ferre INIT_LIST_HEAD(&atchan->free_list); 1911dc78baa2SNicolas Ferre 1912dc78baa2SNicolas Ferre tasklet_init(&atchan->tasklet, atc_tasklet, 1913dc78baa2SNicolas Ferre (unsigned long)atchan); 1914bda3a47cSNikolaus Voss atc_enable_chan_irq(atdma, i); 1915dc78baa2SNicolas Ferre } 1916dc78baa2SNicolas Ferre 1917dc78baa2SNicolas Ferre /* set base routines */ 1918dc78baa2SNicolas Ferre atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources; 1919dc78baa2SNicolas Ferre atdma->dma_common.device_free_chan_resources = atc_free_chan_resources; 192007934481SLinus Walleij atdma->dma_common.device_tx_status = atc_tx_status; 1921dc78baa2SNicolas Ferre atdma->dma_common.device_issue_pending = atc_issue_pending; 1922dc78baa2SNicolas Ferre atdma->dma_common.dev = &pdev->dev; 1923dc78baa2SNicolas Ferre 1924dc78baa2SNicolas Ferre /* set prep routines based on capability */ 19255abecfa5SMaxime Ripard if (dma_has_cap(DMA_INTERLEAVE, atdma->dma_common.cap_mask)) 19265abecfa5SMaxime Ripard atdma->dma_common.device_prep_interleaved_dma = atc_prep_dma_interleaved; 19275abecfa5SMaxime Ripard 1928dc78baa2SNicolas Ferre if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask)) 1929dc78baa2SNicolas Ferre atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy; 1930dc78baa2SNicolas Ferre 19314d112426SMaxime Ripard if (dma_has_cap(DMA_MEMSET, atdma->dma_common.cap_mask)) { 19324d112426SMaxime Ripard atdma->dma_common.device_prep_dma_memset = atc_prep_dma_memset; 19334d112426SMaxime Ripard atdma->dma_common.fill_align = DMAENGINE_ALIGN_4_BYTES; 19344d112426SMaxime Ripard } 19354d112426SMaxime Ripard 1936d7db8080SNicolas Ferre if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) { 1937808347f6SNicolas Ferre atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg; 1938d7db8080SNicolas Ferre /* controller can do slave DMA: can trigger cyclic transfers */ 1939d7db8080SNicolas Ferre dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask); 194053830cc7SNicolas Ferre atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic; 19414facfe7fSMaxime Ripard atdma->dma_common.device_config = atc_config; 19424facfe7fSMaxime Ripard atdma->dma_common.device_pause = atc_pause; 19434facfe7fSMaxime Ripard atdma->dma_common.device_resume = atc_resume; 19444facfe7fSMaxime Ripard atdma->dma_common.device_terminate_all = atc_terminate_all; 1945816070edSLudovic Desroches atdma->dma_common.src_addr_widths = ATC_DMA_BUSWIDTHS; 1946816070edSLudovic Desroches atdma->dma_common.dst_addr_widths = ATC_DMA_BUSWIDTHS; 1947816070edSLudovic Desroches atdma->dma_common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); 1948816070edSLudovic Desroches atdma->dma_common.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; 1949d7db8080SNicolas Ferre } 1950808347f6SNicolas Ferre 1951265567fbSTorsten Fleischer if (dma_has_cap(DMA_SG, atdma->dma_common.cap_mask)) 1952265567fbSTorsten Fleischer atdma->dma_common.device_prep_dma_sg = atc_prep_dma_sg; 1953265567fbSTorsten Fleischer 1954dc78baa2SNicolas Ferre dma_writel(atdma, EN, AT_DMA_ENABLE); 1955dc78baa2SNicolas Ferre 19564d112426SMaxime Ripard dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s%s%s), %d channels\n", 1957dc78baa2SNicolas Ferre dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "", 19584d112426SMaxime Ripard dma_has_cap(DMA_MEMSET, atdma->dma_common.cap_mask) ? "set " : "", 1959dc78baa2SNicolas Ferre dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "", 1960265567fbSTorsten Fleischer dma_has_cap(DMA_SG, atdma->dma_common.cap_mask) ? "sg-cpy " : "", 196102f88be9SNicolas Ferre plat_dat->nr_channels); 1962dc78baa2SNicolas Ferre 1963dc78baa2SNicolas Ferre dma_async_device_register(&atdma->dma_common); 1964dc78baa2SNicolas Ferre 1965bbe89c8eSLudovic Desroches /* 1966bbe89c8eSLudovic Desroches * Do not return an error if the dmac node is not present in order to 1967bbe89c8eSLudovic Desroches * not break the existing way of requesting channel with 1968bbe89c8eSLudovic Desroches * dma_request_channel(). 1969bbe89c8eSLudovic Desroches */ 1970bbe89c8eSLudovic Desroches if (pdev->dev.of_node) { 1971bbe89c8eSLudovic Desroches err = of_dma_controller_register(pdev->dev.of_node, 1972bbe89c8eSLudovic Desroches at_dma_xlate, atdma); 1973bbe89c8eSLudovic Desroches if (err) { 1974bbe89c8eSLudovic Desroches dev_err(&pdev->dev, "could not register of_dma_controller\n"); 1975bbe89c8eSLudovic Desroches goto err_of_dma_controller_register; 1976bbe89c8eSLudovic Desroches } 1977bbe89c8eSLudovic Desroches } 1978bbe89c8eSLudovic Desroches 1979dc78baa2SNicolas Ferre return 0; 1980dc78baa2SNicolas Ferre 1981bbe89c8eSLudovic Desroches err_of_dma_controller_register: 1982bbe89c8eSLudovic Desroches dma_async_device_unregister(&atdma->dma_common); 19834d112426SMaxime Ripard dma_pool_destroy(atdma->memset_pool); 19844d112426SMaxime Ripard err_memset_pool_create: 1985bbe89c8eSLudovic Desroches dma_pool_destroy(atdma->dma_desc_pool); 19864d112426SMaxime Ripard err_desc_pool_create: 1987dc78baa2SNicolas Ferre free_irq(platform_get_irq(pdev, 0), atdma); 1988dc78baa2SNicolas Ferre err_irq: 1989f784d9c9SBoris BREZILLON clk_disable_unprepare(atdma->clk); 1990f784d9c9SBoris BREZILLON err_clk_prepare: 1991dc78baa2SNicolas Ferre clk_put(atdma->clk); 1992dc78baa2SNicolas Ferre err_clk: 1993dc78baa2SNicolas Ferre iounmap(atdma->regs); 1994dc78baa2SNicolas Ferre atdma->regs = NULL; 1995dc78baa2SNicolas Ferre err_release_r: 1996dc78baa2SNicolas Ferre release_mem_region(io->start, size); 1997dc78baa2SNicolas Ferre err_kfree: 1998dc78baa2SNicolas Ferre kfree(atdma); 1999dc78baa2SNicolas Ferre return err; 2000dc78baa2SNicolas Ferre } 2001dc78baa2SNicolas Ferre 20021d1bbd30SMaxin B. John static int at_dma_remove(struct platform_device *pdev) 2003dc78baa2SNicolas Ferre { 2004dc78baa2SNicolas Ferre struct at_dma *atdma = platform_get_drvdata(pdev); 2005dc78baa2SNicolas Ferre struct dma_chan *chan, *_chan; 2006dc78baa2SNicolas Ferre struct resource *io; 2007dc78baa2SNicolas Ferre 2008dc78baa2SNicolas Ferre at_dma_off(atdma); 2009dc78baa2SNicolas Ferre dma_async_device_unregister(&atdma->dma_common); 2010dc78baa2SNicolas Ferre 20114d112426SMaxime Ripard dma_pool_destroy(atdma->memset_pool); 2012dc78baa2SNicolas Ferre dma_pool_destroy(atdma->dma_desc_pool); 2013dc78baa2SNicolas Ferre free_irq(platform_get_irq(pdev, 0), atdma); 2014dc78baa2SNicolas Ferre 2015dc78baa2SNicolas Ferre list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, 2016dc78baa2SNicolas Ferre device_node) { 2017dc78baa2SNicolas Ferre struct at_dma_chan *atchan = to_at_dma_chan(chan); 2018dc78baa2SNicolas Ferre 2019dc78baa2SNicolas Ferre /* Disable interrupts */ 2020bda3a47cSNikolaus Voss atc_disable_chan_irq(atdma, chan->chan_id); 2021dc78baa2SNicolas Ferre 2022dc78baa2SNicolas Ferre tasklet_kill(&atchan->tasklet); 2023dc78baa2SNicolas Ferre list_del(&chan->device_node); 2024dc78baa2SNicolas Ferre } 2025dc78baa2SNicolas Ferre 2026f784d9c9SBoris BREZILLON clk_disable_unprepare(atdma->clk); 2027dc78baa2SNicolas Ferre clk_put(atdma->clk); 2028dc78baa2SNicolas Ferre 2029dc78baa2SNicolas Ferre iounmap(atdma->regs); 2030dc78baa2SNicolas Ferre atdma->regs = NULL; 2031dc78baa2SNicolas Ferre 2032dc78baa2SNicolas Ferre io = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2033114df7d6SH Hartley Sweeten release_mem_region(io->start, resource_size(io)); 2034dc78baa2SNicolas Ferre 2035dc78baa2SNicolas Ferre kfree(atdma); 2036dc78baa2SNicolas Ferre 2037dc78baa2SNicolas Ferre return 0; 2038dc78baa2SNicolas Ferre } 2039dc78baa2SNicolas Ferre 2040dc78baa2SNicolas Ferre static void at_dma_shutdown(struct platform_device *pdev) 2041dc78baa2SNicolas Ferre { 2042dc78baa2SNicolas Ferre struct at_dma *atdma = platform_get_drvdata(pdev); 2043dc78baa2SNicolas Ferre 2044dc78baa2SNicolas Ferre at_dma_off(platform_get_drvdata(pdev)); 2045f784d9c9SBoris BREZILLON clk_disable_unprepare(atdma->clk); 2046dc78baa2SNicolas Ferre } 2047dc78baa2SNicolas Ferre 2048c0ba5947SNicolas Ferre static int at_dma_prepare(struct device *dev) 2049c0ba5947SNicolas Ferre { 2050c0ba5947SNicolas Ferre struct platform_device *pdev = to_platform_device(dev); 2051c0ba5947SNicolas Ferre struct at_dma *atdma = platform_get_drvdata(pdev); 2052c0ba5947SNicolas Ferre struct dma_chan *chan, *_chan; 2053c0ba5947SNicolas Ferre 2054c0ba5947SNicolas Ferre list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, 2055c0ba5947SNicolas Ferre device_node) { 2056c0ba5947SNicolas Ferre struct at_dma_chan *atchan = to_at_dma_chan(chan); 2057c0ba5947SNicolas Ferre /* wait for transaction completion (except in cyclic case) */ 20583c477482SNicolas Ferre if (atc_chan_is_enabled(atchan) && !atc_chan_is_cyclic(atchan)) 2059c0ba5947SNicolas Ferre return -EAGAIN; 2060c0ba5947SNicolas Ferre } 2061c0ba5947SNicolas Ferre return 0; 2062c0ba5947SNicolas Ferre } 2063c0ba5947SNicolas Ferre 2064c0ba5947SNicolas Ferre static void atc_suspend_cyclic(struct at_dma_chan *atchan) 2065c0ba5947SNicolas Ferre { 2066c0ba5947SNicolas Ferre struct dma_chan *chan = &atchan->chan_common; 2067c0ba5947SNicolas Ferre 2068c0ba5947SNicolas Ferre /* Channel should be paused by user 2069c0ba5947SNicolas Ferre * do it anyway even if it is not done already */ 20703c477482SNicolas Ferre if (!atc_chan_is_paused(atchan)) { 2071c0ba5947SNicolas Ferre dev_warn(chan2dev(chan), 2072c0ba5947SNicolas Ferre "cyclic channel not paused, should be done by channel user\n"); 20734facfe7fSMaxime Ripard atc_pause(chan); 2074c0ba5947SNicolas Ferre } 2075c0ba5947SNicolas Ferre 2076c0ba5947SNicolas Ferre /* now preserve additional data for cyclic operations */ 2077c0ba5947SNicolas Ferre /* next descriptor address in the cyclic list */ 2078c0ba5947SNicolas Ferre atchan->save_dscr = channel_readl(atchan, DSCR); 2079c0ba5947SNicolas Ferre 2080c0ba5947SNicolas Ferre vdbg_dump_regs(atchan); 2081c0ba5947SNicolas Ferre } 2082c0ba5947SNicolas Ferre 208333f82d14SDan Williams static int at_dma_suspend_noirq(struct device *dev) 2084dc78baa2SNicolas Ferre { 208533f82d14SDan Williams struct platform_device *pdev = to_platform_device(dev); 2086dc78baa2SNicolas Ferre struct at_dma *atdma = platform_get_drvdata(pdev); 2087c0ba5947SNicolas Ferre struct dma_chan *chan, *_chan; 2088dc78baa2SNicolas Ferre 2089c0ba5947SNicolas Ferre /* preserve data */ 2090c0ba5947SNicolas Ferre list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, 2091c0ba5947SNicolas Ferre device_node) { 2092c0ba5947SNicolas Ferre struct at_dma_chan *atchan = to_at_dma_chan(chan); 2093c0ba5947SNicolas Ferre 20943c477482SNicolas Ferre if (atc_chan_is_cyclic(atchan)) 2095c0ba5947SNicolas Ferre atc_suspend_cyclic(atchan); 2096c0ba5947SNicolas Ferre atchan->save_cfg = channel_readl(atchan, CFG); 2097c0ba5947SNicolas Ferre } 2098c0ba5947SNicolas Ferre atdma->save_imr = dma_readl(atdma, EBCIMR); 2099c0ba5947SNicolas Ferre 2100c0ba5947SNicolas Ferre /* disable DMA controller */ 2101c0ba5947SNicolas Ferre at_dma_off(atdma); 2102f784d9c9SBoris BREZILLON clk_disable_unprepare(atdma->clk); 2103dc78baa2SNicolas Ferre return 0; 2104dc78baa2SNicolas Ferre } 2105dc78baa2SNicolas Ferre 2106c0ba5947SNicolas Ferre static void atc_resume_cyclic(struct at_dma_chan *atchan) 2107c0ba5947SNicolas Ferre { 2108c0ba5947SNicolas Ferre struct at_dma *atdma = to_at_dma(atchan->chan_common.device); 2109c0ba5947SNicolas Ferre 2110c0ba5947SNicolas Ferre /* restore channel status for cyclic descriptors list: 2111c0ba5947SNicolas Ferre * next descriptor in the cyclic list at the time of suspend */ 2112c0ba5947SNicolas Ferre channel_writel(atchan, SADDR, 0); 2113c0ba5947SNicolas Ferre channel_writel(atchan, DADDR, 0); 2114c0ba5947SNicolas Ferre channel_writel(atchan, CTRLA, 0); 2115c0ba5947SNicolas Ferre channel_writel(atchan, CTRLB, 0); 2116c0ba5947SNicolas Ferre channel_writel(atchan, DSCR, atchan->save_dscr); 2117c0ba5947SNicolas Ferre dma_writel(atdma, CHER, atchan->mask); 2118c0ba5947SNicolas Ferre 2119c0ba5947SNicolas Ferre /* channel pause status should be removed by channel user 2120c0ba5947SNicolas Ferre * We cannot take the initiative to do it here */ 2121c0ba5947SNicolas Ferre 2122c0ba5947SNicolas Ferre vdbg_dump_regs(atchan); 2123c0ba5947SNicolas Ferre } 2124c0ba5947SNicolas Ferre 212533f82d14SDan Williams static int at_dma_resume_noirq(struct device *dev) 2126dc78baa2SNicolas Ferre { 212733f82d14SDan Williams struct platform_device *pdev = to_platform_device(dev); 2128dc78baa2SNicolas Ferre struct at_dma *atdma = platform_get_drvdata(pdev); 2129c0ba5947SNicolas Ferre struct dma_chan *chan, *_chan; 2130dc78baa2SNicolas Ferre 2131c0ba5947SNicolas Ferre /* bring back DMA controller */ 2132f784d9c9SBoris BREZILLON clk_prepare_enable(atdma->clk); 2133dc78baa2SNicolas Ferre dma_writel(atdma, EN, AT_DMA_ENABLE); 2134c0ba5947SNicolas Ferre 2135c0ba5947SNicolas Ferre /* clear any pending interrupt */ 2136c0ba5947SNicolas Ferre while (dma_readl(atdma, EBCISR)) 2137c0ba5947SNicolas Ferre cpu_relax(); 2138c0ba5947SNicolas Ferre 2139c0ba5947SNicolas Ferre /* restore saved data */ 2140c0ba5947SNicolas Ferre dma_writel(atdma, EBCIER, atdma->save_imr); 2141c0ba5947SNicolas Ferre list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, 2142c0ba5947SNicolas Ferre device_node) { 2143c0ba5947SNicolas Ferre struct at_dma_chan *atchan = to_at_dma_chan(chan); 2144c0ba5947SNicolas Ferre 2145c0ba5947SNicolas Ferre channel_writel(atchan, CFG, atchan->save_cfg); 21463c477482SNicolas Ferre if (atc_chan_is_cyclic(atchan)) 2147c0ba5947SNicolas Ferre atc_resume_cyclic(atchan); 2148c0ba5947SNicolas Ferre } 2149dc78baa2SNicolas Ferre return 0; 2150dc78baa2SNicolas Ferre } 2151dc78baa2SNicolas Ferre 215247145210SAlexey Dobriyan static const struct dev_pm_ops at_dma_dev_pm_ops = { 2153c0ba5947SNicolas Ferre .prepare = at_dma_prepare, 215433f82d14SDan Williams .suspend_noirq = at_dma_suspend_noirq, 215533f82d14SDan Williams .resume_noirq = at_dma_resume_noirq, 215633f82d14SDan Williams }; 215733f82d14SDan Williams 2158dc78baa2SNicolas Ferre static struct platform_driver at_dma_driver = { 21591d1bbd30SMaxin B. John .remove = at_dma_remove, 2160dc78baa2SNicolas Ferre .shutdown = at_dma_shutdown, 216167348450SNicolas Ferre .id_table = atdma_devtypes, 2162dc78baa2SNicolas Ferre .driver = { 2163dc78baa2SNicolas Ferre .name = "at_hdmac", 216433f82d14SDan Williams .pm = &at_dma_dev_pm_ops, 2165c5115953SNicolas Ferre .of_match_table = of_match_ptr(atmel_dma_dt_ids), 2166dc78baa2SNicolas Ferre }, 2167dc78baa2SNicolas Ferre }; 2168dc78baa2SNicolas Ferre 2169dc78baa2SNicolas Ferre static int __init at_dma_init(void) 2170dc78baa2SNicolas Ferre { 2171dc78baa2SNicolas Ferre return platform_driver_probe(&at_dma_driver, at_dma_probe); 2172dc78baa2SNicolas Ferre } 217393d0bec2SEric Xu subsys_initcall(at_dma_init); 2174dc78baa2SNicolas Ferre 2175dc78baa2SNicolas Ferre static void __exit at_dma_exit(void) 2176dc78baa2SNicolas Ferre { 2177dc78baa2SNicolas Ferre platform_driver_unregister(&at_dma_driver); 2178dc78baa2SNicolas Ferre } 2179dc78baa2SNicolas Ferre module_exit(at_dma_exit); 2180dc78baa2SNicolas Ferre 2181dc78baa2SNicolas Ferre MODULE_DESCRIPTION("Atmel AHB DMA Controller driver"); 2182dc78baa2SNicolas Ferre MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>"); 2183dc78baa2SNicolas Ferre MODULE_LICENSE("GPL"); 2184dc78baa2SNicolas Ferre MODULE_ALIAS("platform:at_hdmac"); 2185