1b9b0a74aSKuninori Morimoto // SPDX-License-Identifier: GPL-2.0 287244fe5SLaurent Pinchart /* 387244fe5SLaurent Pinchart * Renesas R-Car Gen2 DMA Controller Driver 487244fe5SLaurent Pinchart * 587244fe5SLaurent Pinchart * Copyright (C) 2014 Renesas Electronics Inc. 687244fe5SLaurent Pinchart * 787244fe5SLaurent Pinchart * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com> 887244fe5SLaurent Pinchart */ 987244fe5SLaurent Pinchart 10a8d46a7fSKuninori Morimoto #include <linux/delay.h> 11ccadee9bSLaurent Pinchart #include <linux/dma-mapping.h> 1287244fe5SLaurent Pinchart #include <linux/dmaengine.h> 1387244fe5SLaurent Pinchart #include <linux/interrupt.h> 1487244fe5SLaurent Pinchart #include <linux/list.h> 1587244fe5SLaurent Pinchart #include <linux/module.h> 1687244fe5SLaurent Pinchart #include <linux/mutex.h> 1787244fe5SLaurent Pinchart #include <linux/of.h> 1887244fe5SLaurent Pinchart #include <linux/of_dma.h> 1987244fe5SLaurent Pinchart #include <linux/of_platform.h> 2087244fe5SLaurent Pinchart #include <linux/platform_device.h> 2187244fe5SLaurent Pinchart #include <linux/pm_runtime.h> 2287244fe5SLaurent Pinchart #include <linux/slab.h> 2387244fe5SLaurent Pinchart #include <linux/spinlock.h> 2487244fe5SLaurent Pinchart 2587244fe5SLaurent Pinchart #include "../dmaengine.h" 2687244fe5SLaurent Pinchart 2787244fe5SLaurent Pinchart /* 2887244fe5SLaurent Pinchart * struct rcar_dmac_xfer_chunk - Descriptor for a hardware transfer 2987244fe5SLaurent Pinchart * @node: entry in the parent's chunks list 3087244fe5SLaurent Pinchart * @src_addr: device source address 3187244fe5SLaurent Pinchart * @dst_addr: device destination address 3287244fe5SLaurent Pinchart * @size: transfer size in bytes 3387244fe5SLaurent Pinchart */ 3487244fe5SLaurent Pinchart struct rcar_dmac_xfer_chunk { 3587244fe5SLaurent Pinchart struct list_head node; 3687244fe5SLaurent Pinchart 3787244fe5SLaurent Pinchart dma_addr_t src_addr; 3887244fe5SLaurent Pinchart dma_addr_t dst_addr; 3987244fe5SLaurent Pinchart u32 size; 4087244fe5SLaurent Pinchart }; 4187244fe5SLaurent Pinchart 4287244fe5SLaurent Pinchart /* 43ccadee9bSLaurent Pinchart * struct rcar_dmac_hw_desc - Hardware descriptor for a transfer chunk 44ccadee9bSLaurent Pinchart * @sar: value of the SAR register (source address) 45ccadee9bSLaurent Pinchart * @dar: value of the DAR register (destination address) 46ccadee9bSLaurent Pinchart * @tcr: value of the TCR register (transfer count) 47ccadee9bSLaurent Pinchart */ 48ccadee9bSLaurent Pinchart struct rcar_dmac_hw_desc { 49ccadee9bSLaurent Pinchart u32 sar; 50ccadee9bSLaurent Pinchart u32 dar; 51ccadee9bSLaurent Pinchart u32 tcr; 52ccadee9bSLaurent Pinchart u32 reserved; 53ccadee9bSLaurent Pinchart } __attribute__((__packed__)); 54ccadee9bSLaurent Pinchart 55ccadee9bSLaurent Pinchart /* 5687244fe5SLaurent Pinchart * struct rcar_dmac_desc - R-Car Gen2 DMA Transfer Descriptor 5787244fe5SLaurent Pinchart * @async_tx: base DMA asynchronous transaction descriptor 5887244fe5SLaurent Pinchart * @direction: direction of the DMA transfer 5987244fe5SLaurent Pinchart * @xfer_shift: log2 of the transfer size 6087244fe5SLaurent Pinchart * @chcr: value of the channel configuration register for this transfer 6187244fe5SLaurent Pinchart * @node: entry in the channel's descriptors lists 6287244fe5SLaurent Pinchart * @chunks: list of transfer chunks for this transfer 6387244fe5SLaurent Pinchart * @running: the transfer chunk being currently processed 64ccadee9bSLaurent Pinchart * @nchunks: number of transfer chunks for this transfer 651ed1315fSLaurent Pinchart * @hwdescs.use: whether the transfer descriptor uses hardware descriptors 66ccadee9bSLaurent Pinchart * @hwdescs.mem: hardware descriptors memory for the transfer 67ccadee9bSLaurent Pinchart * @hwdescs.dma: device address of the hardware descriptors memory 68ccadee9bSLaurent Pinchart * @hwdescs.size: size of the hardware descriptors in bytes 6987244fe5SLaurent Pinchart * @size: transfer size in bytes 7087244fe5SLaurent Pinchart * @cyclic: when set indicates that the DMA transfer is cyclic 7187244fe5SLaurent Pinchart */ 7287244fe5SLaurent Pinchart struct rcar_dmac_desc { 7387244fe5SLaurent Pinchart struct dma_async_tx_descriptor async_tx; 7487244fe5SLaurent Pinchart enum dma_transfer_direction direction; 7587244fe5SLaurent Pinchart unsigned int xfer_shift; 7687244fe5SLaurent Pinchart u32 chcr; 7787244fe5SLaurent Pinchart 7887244fe5SLaurent Pinchart struct list_head node; 7987244fe5SLaurent Pinchart struct list_head chunks; 8087244fe5SLaurent Pinchart struct rcar_dmac_xfer_chunk *running; 81ccadee9bSLaurent Pinchart unsigned int nchunks; 82ccadee9bSLaurent Pinchart 83ccadee9bSLaurent Pinchart struct { 841ed1315fSLaurent Pinchart bool use; 85ccadee9bSLaurent Pinchart struct rcar_dmac_hw_desc *mem; 86ccadee9bSLaurent Pinchart dma_addr_t dma; 87ccadee9bSLaurent Pinchart size_t size; 88ccadee9bSLaurent Pinchart } hwdescs; 8987244fe5SLaurent Pinchart 9087244fe5SLaurent Pinchart unsigned int size; 9187244fe5SLaurent Pinchart bool cyclic; 9287244fe5SLaurent Pinchart }; 9387244fe5SLaurent Pinchart 9487244fe5SLaurent Pinchart #define to_rcar_dmac_desc(d) container_of(d, struct rcar_dmac_desc, async_tx) 9587244fe5SLaurent Pinchart 9687244fe5SLaurent Pinchart /* 9787244fe5SLaurent Pinchart * struct rcar_dmac_desc_page - One page worth of descriptors 9887244fe5SLaurent Pinchart * @node: entry in the channel's pages list 9987244fe5SLaurent Pinchart * @descs: array of DMA descriptors 10087244fe5SLaurent Pinchart * @chunks: array of transfer chunk descriptors 10187244fe5SLaurent Pinchart */ 10287244fe5SLaurent Pinchart struct rcar_dmac_desc_page { 10387244fe5SLaurent Pinchart struct list_head node; 10487244fe5SLaurent Pinchart 10587244fe5SLaurent Pinchart union { 10687244fe5SLaurent Pinchart struct rcar_dmac_desc descs[0]; 10787244fe5SLaurent Pinchart struct rcar_dmac_xfer_chunk chunks[0]; 10887244fe5SLaurent Pinchart }; 10987244fe5SLaurent Pinchart }; 11087244fe5SLaurent Pinchart 11187244fe5SLaurent Pinchart #define RCAR_DMAC_DESCS_PER_PAGE \ 11287244fe5SLaurent Pinchart ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, descs)) / \ 11387244fe5SLaurent Pinchart sizeof(struct rcar_dmac_desc)) 11487244fe5SLaurent Pinchart #define RCAR_DMAC_XFER_CHUNKS_PER_PAGE \ 11587244fe5SLaurent Pinchart ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, chunks)) / \ 11687244fe5SLaurent Pinchart sizeof(struct rcar_dmac_xfer_chunk)) 11787244fe5SLaurent Pinchart 11887244fe5SLaurent Pinchart /* 119c5ed08e9SNiklas Söderlund * struct rcar_dmac_chan_slave - Slave configuration 120c5ed08e9SNiklas Söderlund * @slave_addr: slave memory address 121c5ed08e9SNiklas Söderlund * @xfer_size: size (in bytes) of hardware transfers 122c5ed08e9SNiklas Söderlund */ 123c5ed08e9SNiklas Söderlund struct rcar_dmac_chan_slave { 124c5ed08e9SNiklas Söderlund phys_addr_t slave_addr; 125c5ed08e9SNiklas Söderlund unsigned int xfer_size; 126c5ed08e9SNiklas Söderlund }; 127c5ed08e9SNiklas Söderlund 128c5ed08e9SNiklas Söderlund /* 1299f878603SNiklas Söderlund * struct rcar_dmac_chan_map - Map of slave device phys to dma address 1309f878603SNiklas Söderlund * @addr: slave dma address 1319f878603SNiklas Söderlund * @dir: direction of mapping 1329f878603SNiklas Söderlund * @slave: slave configuration that is mapped 1339f878603SNiklas Söderlund */ 1349f878603SNiklas Söderlund struct rcar_dmac_chan_map { 1359f878603SNiklas Söderlund dma_addr_t addr; 1369f878603SNiklas Söderlund enum dma_data_direction dir; 1379f878603SNiklas Söderlund struct rcar_dmac_chan_slave slave; 1389f878603SNiklas Söderlund }; 1399f878603SNiklas Söderlund 1409f878603SNiklas Söderlund /* 14187244fe5SLaurent Pinchart * struct rcar_dmac_chan - R-Car Gen2 DMA Controller Channel 14287244fe5SLaurent Pinchart * @chan: base DMA channel object 14387244fe5SLaurent Pinchart * @iomem: channel I/O memory base 14487244fe5SLaurent Pinchart * @index: index of this channel in the controller 145427d5ecdSNiklas Söderlund * @irq: channel IRQ 146c5ed08e9SNiklas Söderlund * @src: slave memory address and size on the source side 147c5ed08e9SNiklas Söderlund * @dst: slave memory address and size on the destination side 14887244fe5SLaurent Pinchart * @mid_rid: hardware MID/RID for the DMA client using this channel 14987244fe5SLaurent Pinchart * @lock: protects the channel CHCR register and the desc members 15087244fe5SLaurent Pinchart * @desc.free: list of free descriptors 15187244fe5SLaurent Pinchart * @desc.pending: list of pending descriptors (submitted with tx_submit) 15287244fe5SLaurent Pinchart * @desc.active: list of active descriptors (activated with issue_pending) 15387244fe5SLaurent Pinchart * @desc.done: list of completed descriptors 15487244fe5SLaurent Pinchart * @desc.wait: list of descriptors waiting for an ack 15587244fe5SLaurent Pinchart * @desc.running: the descriptor being processed (a member of the active list) 15687244fe5SLaurent Pinchart * @desc.chunks_free: list of free transfer chunk descriptors 15787244fe5SLaurent Pinchart * @desc.pages: list of pages used by allocated descriptors 15887244fe5SLaurent Pinchart */ 15987244fe5SLaurent Pinchart struct rcar_dmac_chan { 16087244fe5SLaurent Pinchart struct dma_chan chan; 16187244fe5SLaurent Pinchart void __iomem *iomem; 16287244fe5SLaurent Pinchart unsigned int index; 163427d5ecdSNiklas Söderlund int irq; 16487244fe5SLaurent Pinchart 165c5ed08e9SNiklas Söderlund struct rcar_dmac_chan_slave src; 166c5ed08e9SNiklas Söderlund struct rcar_dmac_chan_slave dst; 1679f878603SNiklas Söderlund struct rcar_dmac_chan_map map; 16887244fe5SLaurent Pinchart int mid_rid; 16987244fe5SLaurent Pinchart 17087244fe5SLaurent Pinchart spinlock_t lock; 17187244fe5SLaurent Pinchart 17287244fe5SLaurent Pinchart struct { 17387244fe5SLaurent Pinchart struct list_head free; 17487244fe5SLaurent Pinchart struct list_head pending; 17587244fe5SLaurent Pinchart struct list_head active; 17687244fe5SLaurent Pinchart struct list_head done; 17787244fe5SLaurent Pinchart struct list_head wait; 17887244fe5SLaurent Pinchart struct rcar_dmac_desc *running; 17987244fe5SLaurent Pinchart 18087244fe5SLaurent Pinchart struct list_head chunks_free; 18187244fe5SLaurent Pinchart 18287244fe5SLaurent Pinchart struct list_head pages; 18387244fe5SLaurent Pinchart } desc; 18487244fe5SLaurent Pinchart }; 18587244fe5SLaurent Pinchart 18687244fe5SLaurent Pinchart #define to_rcar_dmac_chan(c) container_of(c, struct rcar_dmac_chan, chan) 18787244fe5SLaurent Pinchart 18887244fe5SLaurent Pinchart /* 18987244fe5SLaurent Pinchart * struct rcar_dmac - R-Car Gen2 DMA Controller 19087244fe5SLaurent Pinchart * @engine: base DMA engine object 19187244fe5SLaurent Pinchart * @dev: the hardware device 19287244fe5SLaurent Pinchart * @iomem: remapped I/O memory base 19387244fe5SLaurent Pinchart * @n_channels: number of available channels 19487244fe5SLaurent Pinchart * @channels: array of DMAC channels 19587244fe5SLaurent Pinchart * @modules: bitmask of client modules in use 19687244fe5SLaurent Pinchart */ 19787244fe5SLaurent Pinchart struct rcar_dmac { 19887244fe5SLaurent Pinchart struct dma_device engine; 19987244fe5SLaurent Pinchart struct device *dev; 20087244fe5SLaurent Pinchart void __iomem *iomem; 20197d49c59SWolfram Sang struct device_dma_parameters parms; 20287244fe5SLaurent Pinchart 20387244fe5SLaurent Pinchart unsigned int n_channels; 20487244fe5SLaurent Pinchart struct rcar_dmac_chan *channels; 20587244fe5SLaurent Pinchart 20608acf38eSJoe Perches DECLARE_BITMAP(modules, 256); 20787244fe5SLaurent Pinchart }; 20887244fe5SLaurent Pinchart 20987244fe5SLaurent Pinchart #define to_rcar_dmac(d) container_of(d, struct rcar_dmac, engine) 21087244fe5SLaurent Pinchart 21187244fe5SLaurent Pinchart /* ----------------------------------------------------------------------------- 21287244fe5SLaurent Pinchart * Registers 21387244fe5SLaurent Pinchart */ 21487244fe5SLaurent Pinchart 21587244fe5SLaurent Pinchart #define RCAR_DMAC_CHAN_OFFSET(i) (0x8000 + 0x80 * (i)) 21687244fe5SLaurent Pinchart 21787244fe5SLaurent Pinchart #define RCAR_DMAISTA 0x0020 21887244fe5SLaurent Pinchart #define RCAR_DMASEC 0x0030 21987244fe5SLaurent Pinchart #define RCAR_DMAOR 0x0060 22087244fe5SLaurent Pinchart #define RCAR_DMAOR_PRI_FIXED (0 << 8) 22187244fe5SLaurent Pinchart #define RCAR_DMAOR_PRI_ROUND_ROBIN (3 << 8) 22287244fe5SLaurent Pinchart #define RCAR_DMAOR_AE (1 << 2) 22387244fe5SLaurent Pinchart #define RCAR_DMAOR_DME (1 << 0) 22487244fe5SLaurent Pinchart #define RCAR_DMACHCLR 0x0080 22587244fe5SLaurent Pinchart #define RCAR_DMADPSEC 0x00a0 22687244fe5SLaurent Pinchart 22787244fe5SLaurent Pinchart #define RCAR_DMASAR 0x0000 22887244fe5SLaurent Pinchart #define RCAR_DMADAR 0x0004 22987244fe5SLaurent Pinchart #define RCAR_DMATCR 0x0008 23087244fe5SLaurent Pinchart #define RCAR_DMATCR_MASK 0x00ffffff 23187244fe5SLaurent Pinchart #define RCAR_DMATSR 0x0028 23287244fe5SLaurent Pinchart #define RCAR_DMACHCR 0x000c 23387244fe5SLaurent Pinchart #define RCAR_DMACHCR_CAE (1 << 31) 23487244fe5SLaurent Pinchart #define RCAR_DMACHCR_CAIE (1 << 30) 23587244fe5SLaurent Pinchart #define RCAR_DMACHCR_DPM_DISABLED (0 << 28) 23687244fe5SLaurent Pinchart #define RCAR_DMACHCR_DPM_ENABLED (1 << 28) 23787244fe5SLaurent Pinchart #define RCAR_DMACHCR_DPM_REPEAT (2 << 28) 23887244fe5SLaurent Pinchart #define RCAR_DMACHCR_DPM_INFINITE (3 << 28) 23987244fe5SLaurent Pinchart #define RCAR_DMACHCR_RPT_SAR (1 << 27) 24087244fe5SLaurent Pinchart #define RCAR_DMACHCR_RPT_DAR (1 << 26) 24187244fe5SLaurent Pinchart #define RCAR_DMACHCR_RPT_TCR (1 << 25) 24287244fe5SLaurent Pinchart #define RCAR_DMACHCR_DPB (1 << 22) 24387244fe5SLaurent Pinchart #define RCAR_DMACHCR_DSE (1 << 19) 24487244fe5SLaurent Pinchart #define RCAR_DMACHCR_DSIE (1 << 18) 24587244fe5SLaurent Pinchart #define RCAR_DMACHCR_TS_1B ((0 << 20) | (0 << 3)) 24687244fe5SLaurent Pinchart #define RCAR_DMACHCR_TS_2B ((0 << 20) | (1 << 3)) 24787244fe5SLaurent Pinchart #define RCAR_DMACHCR_TS_4B ((0 << 20) | (2 << 3)) 24887244fe5SLaurent Pinchart #define RCAR_DMACHCR_TS_16B ((0 << 20) | (3 << 3)) 24987244fe5SLaurent Pinchart #define RCAR_DMACHCR_TS_32B ((1 << 20) | (0 << 3)) 25087244fe5SLaurent Pinchart #define RCAR_DMACHCR_TS_64B ((1 << 20) | (1 << 3)) 25187244fe5SLaurent Pinchart #define RCAR_DMACHCR_TS_8B ((1 << 20) | (3 << 3)) 25287244fe5SLaurent Pinchart #define RCAR_DMACHCR_DM_FIXED (0 << 14) 25387244fe5SLaurent Pinchart #define RCAR_DMACHCR_DM_INC (1 << 14) 25487244fe5SLaurent Pinchart #define RCAR_DMACHCR_DM_DEC (2 << 14) 25587244fe5SLaurent Pinchart #define RCAR_DMACHCR_SM_FIXED (0 << 12) 25687244fe5SLaurent Pinchart #define RCAR_DMACHCR_SM_INC (1 << 12) 25787244fe5SLaurent Pinchart #define RCAR_DMACHCR_SM_DEC (2 << 12) 25887244fe5SLaurent Pinchart #define RCAR_DMACHCR_RS_AUTO (4 << 8) 25987244fe5SLaurent Pinchart #define RCAR_DMACHCR_RS_DMARS (8 << 8) 26087244fe5SLaurent Pinchart #define RCAR_DMACHCR_IE (1 << 2) 26187244fe5SLaurent Pinchart #define RCAR_DMACHCR_TE (1 << 1) 26287244fe5SLaurent Pinchart #define RCAR_DMACHCR_DE (1 << 0) 26387244fe5SLaurent Pinchart #define RCAR_DMATCRB 0x0018 26487244fe5SLaurent Pinchart #define RCAR_DMATSRB 0x0038 26587244fe5SLaurent Pinchart #define RCAR_DMACHCRB 0x001c 26687244fe5SLaurent Pinchart #define RCAR_DMACHCRB_DCNT(n) ((n) << 24) 267ccadee9bSLaurent Pinchart #define RCAR_DMACHCRB_DPTR_MASK (0xff << 16) 268ccadee9bSLaurent Pinchart #define RCAR_DMACHCRB_DPTR_SHIFT 16 26987244fe5SLaurent Pinchart #define RCAR_DMACHCRB_DRST (1 << 15) 27087244fe5SLaurent Pinchart #define RCAR_DMACHCRB_DTS (1 << 8) 27187244fe5SLaurent Pinchart #define RCAR_DMACHCRB_SLM_NORMAL (0 << 4) 27287244fe5SLaurent Pinchart #define RCAR_DMACHCRB_SLM_CLK(n) ((8 | (n)) << 4) 27387244fe5SLaurent Pinchart #define RCAR_DMACHCRB_PRI(n) ((n) << 0) 27487244fe5SLaurent Pinchart #define RCAR_DMARS 0x0040 27587244fe5SLaurent Pinchart #define RCAR_DMABUFCR 0x0048 27687244fe5SLaurent Pinchart #define RCAR_DMABUFCR_MBU(n) ((n) << 16) 27787244fe5SLaurent Pinchart #define RCAR_DMABUFCR_ULB(n) ((n) << 0) 27887244fe5SLaurent Pinchart #define RCAR_DMADPBASE 0x0050 27987244fe5SLaurent Pinchart #define RCAR_DMADPBASE_MASK 0xfffffff0 28087244fe5SLaurent Pinchart #define RCAR_DMADPBASE_SEL (1 << 0) 28187244fe5SLaurent Pinchart #define RCAR_DMADPCR 0x0054 28287244fe5SLaurent Pinchart #define RCAR_DMADPCR_DIPT(n) ((n) << 24) 28387244fe5SLaurent Pinchart #define RCAR_DMAFIXSAR 0x0010 28487244fe5SLaurent Pinchart #define RCAR_DMAFIXDAR 0x0014 28587244fe5SLaurent Pinchart #define RCAR_DMAFIXDPBASE 0x0060 28687244fe5SLaurent Pinchart 28787244fe5SLaurent Pinchart /* Hardcode the MEMCPY transfer size to 4 bytes. */ 28887244fe5SLaurent Pinchart #define RCAR_DMAC_MEMCPY_XFER_SIZE 4 28987244fe5SLaurent Pinchart 29087244fe5SLaurent Pinchart /* ----------------------------------------------------------------------------- 29187244fe5SLaurent Pinchart * Device access 29287244fe5SLaurent Pinchart */ 29387244fe5SLaurent Pinchart 29487244fe5SLaurent Pinchart static void rcar_dmac_write(struct rcar_dmac *dmac, u32 reg, u32 data) 29587244fe5SLaurent Pinchart { 29687244fe5SLaurent Pinchart if (reg == RCAR_DMAOR) 29787244fe5SLaurent Pinchart writew(data, dmac->iomem + reg); 29887244fe5SLaurent Pinchart else 29987244fe5SLaurent Pinchart writel(data, dmac->iomem + reg); 30087244fe5SLaurent Pinchart } 30187244fe5SLaurent Pinchart 30287244fe5SLaurent Pinchart static u32 rcar_dmac_read(struct rcar_dmac *dmac, u32 reg) 30387244fe5SLaurent Pinchart { 30487244fe5SLaurent Pinchart if (reg == RCAR_DMAOR) 30587244fe5SLaurent Pinchart return readw(dmac->iomem + reg); 30687244fe5SLaurent Pinchart else 30787244fe5SLaurent Pinchart return readl(dmac->iomem + reg); 30887244fe5SLaurent Pinchart } 30987244fe5SLaurent Pinchart 31087244fe5SLaurent Pinchart static u32 rcar_dmac_chan_read(struct rcar_dmac_chan *chan, u32 reg) 31187244fe5SLaurent Pinchart { 31287244fe5SLaurent Pinchart if (reg == RCAR_DMARS) 31387244fe5SLaurent Pinchart return readw(chan->iomem + reg); 31487244fe5SLaurent Pinchart else 31587244fe5SLaurent Pinchart return readl(chan->iomem + reg); 31687244fe5SLaurent Pinchart } 31787244fe5SLaurent Pinchart 31887244fe5SLaurent Pinchart static void rcar_dmac_chan_write(struct rcar_dmac_chan *chan, u32 reg, u32 data) 31987244fe5SLaurent Pinchart { 32087244fe5SLaurent Pinchart if (reg == RCAR_DMARS) 32187244fe5SLaurent Pinchart writew(data, chan->iomem + reg); 32287244fe5SLaurent Pinchart else 32387244fe5SLaurent Pinchart writel(data, chan->iomem + reg); 32487244fe5SLaurent Pinchart } 32587244fe5SLaurent Pinchart 32687244fe5SLaurent Pinchart /* ----------------------------------------------------------------------------- 32787244fe5SLaurent Pinchart * Initialization and configuration 32887244fe5SLaurent Pinchart */ 32987244fe5SLaurent Pinchart 33087244fe5SLaurent Pinchart static bool rcar_dmac_chan_is_busy(struct rcar_dmac_chan *chan) 33187244fe5SLaurent Pinchart { 33287244fe5SLaurent Pinchart u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR); 33387244fe5SLaurent Pinchart 3340f78e3b5SNiklas Söderlund return !!(chcr & (RCAR_DMACHCR_DE | RCAR_DMACHCR_TE)); 33587244fe5SLaurent Pinchart } 33687244fe5SLaurent Pinchart 33787244fe5SLaurent Pinchart static void rcar_dmac_chan_start_xfer(struct rcar_dmac_chan *chan) 33887244fe5SLaurent Pinchart { 33987244fe5SLaurent Pinchart struct rcar_dmac_desc *desc = chan->desc.running; 340ccadee9bSLaurent Pinchart u32 chcr = desc->chcr; 341ccadee9bSLaurent Pinchart 342ccadee9bSLaurent Pinchart WARN_ON_ONCE(rcar_dmac_chan_is_busy(chan)); 343ccadee9bSLaurent Pinchart 344ccadee9bSLaurent Pinchart if (chan->mid_rid >= 0) 345ccadee9bSLaurent Pinchart rcar_dmac_chan_write(chan, RCAR_DMARS, chan->mid_rid); 346ccadee9bSLaurent Pinchart 3471ed1315fSLaurent Pinchart if (desc->hwdescs.use) { 3481175f83cSKuninori Morimoto struct rcar_dmac_xfer_chunk *chunk = 3491175f83cSKuninori Morimoto list_first_entry(&desc->chunks, 3501175f83cSKuninori Morimoto struct rcar_dmac_xfer_chunk, node); 3513f463061SLaurent Pinchart 352ccadee9bSLaurent Pinchart dev_dbg(chan->chan.device->dev, 353ccadee9bSLaurent Pinchart "chan%u: queue desc %p: %u@%pad\n", 354ccadee9bSLaurent Pinchart chan->index, desc, desc->nchunks, &desc->hwdescs.dma); 355ccadee9bSLaurent Pinchart 356ccadee9bSLaurent Pinchart #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3571175f83cSKuninori Morimoto rcar_dmac_chan_write(chan, RCAR_DMAFIXSAR, 3581175f83cSKuninori Morimoto chunk->src_addr >> 32); 3591175f83cSKuninori Morimoto rcar_dmac_chan_write(chan, RCAR_DMAFIXDAR, 3601175f83cSKuninori Morimoto chunk->dst_addr >> 32); 361ccadee9bSLaurent Pinchart rcar_dmac_chan_write(chan, RCAR_DMAFIXDPBASE, 362ccadee9bSLaurent Pinchart desc->hwdescs.dma >> 32); 363ccadee9bSLaurent Pinchart #endif 364ccadee9bSLaurent Pinchart rcar_dmac_chan_write(chan, RCAR_DMADPBASE, 365ccadee9bSLaurent Pinchart (desc->hwdescs.dma & 0xfffffff0) | 366ccadee9bSLaurent Pinchart RCAR_DMADPBASE_SEL); 367ccadee9bSLaurent Pinchart rcar_dmac_chan_write(chan, RCAR_DMACHCRB, 368ccadee9bSLaurent Pinchart RCAR_DMACHCRB_DCNT(desc->nchunks - 1) | 369ccadee9bSLaurent Pinchart RCAR_DMACHCRB_DRST); 370ccadee9bSLaurent Pinchart 371ccadee9bSLaurent Pinchart /* 3723f463061SLaurent Pinchart * Errata: When descriptor memory is accessed through an IOMMU 3733f463061SLaurent Pinchart * the DMADAR register isn't initialized automatically from the 3743f463061SLaurent Pinchart * first descriptor at beginning of transfer by the DMAC like it 3753f463061SLaurent Pinchart * should. Initialize it manually with the destination address 3763f463061SLaurent Pinchart * of the first chunk. 3773f463061SLaurent Pinchart */ 3783f463061SLaurent Pinchart rcar_dmac_chan_write(chan, RCAR_DMADAR, 3793f463061SLaurent Pinchart chunk->dst_addr & 0xffffffff); 3803f463061SLaurent Pinchart 3813f463061SLaurent Pinchart /* 382ccadee9bSLaurent Pinchart * Program the descriptor stage interrupt to occur after the end 383ccadee9bSLaurent Pinchart * of the first stage. 384ccadee9bSLaurent Pinchart */ 385ccadee9bSLaurent Pinchart rcar_dmac_chan_write(chan, RCAR_DMADPCR, RCAR_DMADPCR_DIPT(1)); 386ccadee9bSLaurent Pinchart 387ccadee9bSLaurent Pinchart chcr |= RCAR_DMACHCR_RPT_SAR | RCAR_DMACHCR_RPT_DAR 388ccadee9bSLaurent Pinchart | RCAR_DMACHCR_RPT_TCR | RCAR_DMACHCR_DPB; 389ccadee9bSLaurent Pinchart 390ccadee9bSLaurent Pinchart /* 391ccadee9bSLaurent Pinchart * If the descriptor isn't cyclic enable normal descriptor mode 392ccadee9bSLaurent Pinchart * and the transfer completion interrupt. 393ccadee9bSLaurent Pinchart */ 394ccadee9bSLaurent Pinchart if (!desc->cyclic) 395ccadee9bSLaurent Pinchart chcr |= RCAR_DMACHCR_DPM_ENABLED | RCAR_DMACHCR_IE; 396ccadee9bSLaurent Pinchart /* 397ccadee9bSLaurent Pinchart * If the descriptor is cyclic and has a callback enable the 398ccadee9bSLaurent Pinchart * descriptor stage interrupt in infinite repeat mode. 399ccadee9bSLaurent Pinchart */ 400ccadee9bSLaurent Pinchart else if (desc->async_tx.callback) 401ccadee9bSLaurent Pinchart chcr |= RCAR_DMACHCR_DPM_INFINITE | RCAR_DMACHCR_DSIE; 402ccadee9bSLaurent Pinchart /* 403ccadee9bSLaurent Pinchart * Otherwise just select infinite repeat mode without any 404ccadee9bSLaurent Pinchart * interrupt. 405ccadee9bSLaurent Pinchart */ 406ccadee9bSLaurent Pinchart else 407ccadee9bSLaurent Pinchart chcr |= RCAR_DMACHCR_DPM_INFINITE; 408ccadee9bSLaurent Pinchart } else { 40987244fe5SLaurent Pinchart struct rcar_dmac_xfer_chunk *chunk = desc->running; 41087244fe5SLaurent Pinchart 41187244fe5SLaurent Pinchart dev_dbg(chan->chan.device->dev, 41287244fe5SLaurent Pinchart "chan%u: queue chunk %p: %u@%pad -> %pad\n", 41387244fe5SLaurent Pinchart chan->index, chunk, chunk->size, &chunk->src_addr, 41487244fe5SLaurent Pinchart &chunk->dst_addr); 41587244fe5SLaurent Pinchart 41687244fe5SLaurent Pinchart #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 417ccadee9bSLaurent Pinchart rcar_dmac_chan_write(chan, RCAR_DMAFIXSAR, 418ccadee9bSLaurent Pinchart chunk->src_addr >> 32); 419ccadee9bSLaurent Pinchart rcar_dmac_chan_write(chan, RCAR_DMAFIXDAR, 420ccadee9bSLaurent Pinchart chunk->dst_addr >> 32); 42187244fe5SLaurent Pinchart #endif 422ccadee9bSLaurent Pinchart rcar_dmac_chan_write(chan, RCAR_DMASAR, 423ccadee9bSLaurent Pinchart chunk->src_addr & 0xffffffff); 424ccadee9bSLaurent Pinchart rcar_dmac_chan_write(chan, RCAR_DMADAR, 425ccadee9bSLaurent Pinchart chunk->dst_addr & 0xffffffff); 42687244fe5SLaurent Pinchart rcar_dmac_chan_write(chan, RCAR_DMATCR, 42787244fe5SLaurent Pinchart chunk->size >> desc->xfer_shift); 42887244fe5SLaurent Pinchart 429ccadee9bSLaurent Pinchart chcr |= RCAR_DMACHCR_DPM_DISABLED | RCAR_DMACHCR_IE; 430ccadee9bSLaurent Pinchart } 431ccadee9bSLaurent Pinchart 4329203dbecSKuninori Morimoto rcar_dmac_chan_write(chan, RCAR_DMACHCR, 4339203dbecSKuninori Morimoto chcr | RCAR_DMACHCR_DE | RCAR_DMACHCR_CAIE); 43487244fe5SLaurent Pinchart } 43587244fe5SLaurent Pinchart 43687244fe5SLaurent Pinchart static int rcar_dmac_init(struct rcar_dmac *dmac) 43787244fe5SLaurent Pinchart { 43887244fe5SLaurent Pinchart u16 dmaor; 43987244fe5SLaurent Pinchart 44087244fe5SLaurent Pinchart /* Clear all channels and enable the DMAC globally. */ 44120c169acSKuninori Morimoto rcar_dmac_write(dmac, RCAR_DMACHCLR, GENMASK(dmac->n_channels - 1, 0)); 44287244fe5SLaurent Pinchart rcar_dmac_write(dmac, RCAR_DMAOR, 44387244fe5SLaurent Pinchart RCAR_DMAOR_PRI_FIXED | RCAR_DMAOR_DME); 44487244fe5SLaurent Pinchart 44587244fe5SLaurent Pinchart dmaor = rcar_dmac_read(dmac, RCAR_DMAOR); 44687244fe5SLaurent Pinchart if ((dmaor & (RCAR_DMAOR_AE | RCAR_DMAOR_DME)) != RCAR_DMAOR_DME) { 44787244fe5SLaurent Pinchart dev_warn(dmac->dev, "DMAOR initialization failed.\n"); 44887244fe5SLaurent Pinchart return -EIO; 44987244fe5SLaurent Pinchart } 45087244fe5SLaurent Pinchart 45187244fe5SLaurent Pinchart return 0; 45287244fe5SLaurent Pinchart } 45387244fe5SLaurent Pinchart 45487244fe5SLaurent Pinchart /* ----------------------------------------------------------------------------- 45587244fe5SLaurent Pinchart * Descriptors submission 45687244fe5SLaurent Pinchart */ 45787244fe5SLaurent Pinchart 45887244fe5SLaurent Pinchart static dma_cookie_t rcar_dmac_tx_submit(struct dma_async_tx_descriptor *tx) 45987244fe5SLaurent Pinchart { 46087244fe5SLaurent Pinchart struct rcar_dmac_chan *chan = to_rcar_dmac_chan(tx->chan); 46187244fe5SLaurent Pinchart struct rcar_dmac_desc *desc = to_rcar_dmac_desc(tx); 46287244fe5SLaurent Pinchart unsigned long flags; 46387244fe5SLaurent Pinchart dma_cookie_t cookie; 46487244fe5SLaurent Pinchart 46587244fe5SLaurent Pinchart spin_lock_irqsave(&chan->lock, flags); 46687244fe5SLaurent Pinchart 46787244fe5SLaurent Pinchart cookie = dma_cookie_assign(tx); 46887244fe5SLaurent Pinchart 46987244fe5SLaurent Pinchart dev_dbg(chan->chan.device->dev, "chan%u: submit #%d@%p\n", 47087244fe5SLaurent Pinchart chan->index, tx->cookie, desc); 47187244fe5SLaurent Pinchart 47287244fe5SLaurent Pinchart list_add_tail(&desc->node, &chan->desc.pending); 47387244fe5SLaurent Pinchart desc->running = list_first_entry(&desc->chunks, 47487244fe5SLaurent Pinchart struct rcar_dmac_xfer_chunk, node); 47587244fe5SLaurent Pinchart 47687244fe5SLaurent Pinchart spin_unlock_irqrestore(&chan->lock, flags); 47787244fe5SLaurent Pinchart 47887244fe5SLaurent Pinchart return cookie; 47987244fe5SLaurent Pinchart } 48087244fe5SLaurent Pinchart 48187244fe5SLaurent Pinchart /* ----------------------------------------------------------------------------- 48287244fe5SLaurent Pinchart * Descriptors allocation and free 48387244fe5SLaurent Pinchart */ 48487244fe5SLaurent Pinchart 48587244fe5SLaurent Pinchart /* 48687244fe5SLaurent Pinchart * rcar_dmac_desc_alloc - Allocate a page worth of DMA descriptors 48787244fe5SLaurent Pinchart * @chan: the DMA channel 48887244fe5SLaurent Pinchart * @gfp: allocation flags 48987244fe5SLaurent Pinchart */ 49087244fe5SLaurent Pinchart static int rcar_dmac_desc_alloc(struct rcar_dmac_chan *chan, gfp_t gfp) 49187244fe5SLaurent Pinchart { 49287244fe5SLaurent Pinchart struct rcar_dmac_desc_page *page; 493d23c9a0aSKuninori Morimoto unsigned long flags; 49487244fe5SLaurent Pinchart LIST_HEAD(list); 49587244fe5SLaurent Pinchart unsigned int i; 49687244fe5SLaurent Pinchart 49787244fe5SLaurent Pinchart page = (void *)get_zeroed_page(gfp); 49887244fe5SLaurent Pinchart if (!page) 49987244fe5SLaurent Pinchart return -ENOMEM; 50087244fe5SLaurent Pinchart 50187244fe5SLaurent Pinchart for (i = 0; i < RCAR_DMAC_DESCS_PER_PAGE; ++i) { 50287244fe5SLaurent Pinchart struct rcar_dmac_desc *desc = &page->descs[i]; 50387244fe5SLaurent Pinchart 50487244fe5SLaurent Pinchart dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan); 50587244fe5SLaurent Pinchart desc->async_tx.tx_submit = rcar_dmac_tx_submit; 50687244fe5SLaurent Pinchart INIT_LIST_HEAD(&desc->chunks); 50787244fe5SLaurent Pinchart 50887244fe5SLaurent Pinchart list_add_tail(&desc->node, &list); 50987244fe5SLaurent Pinchart } 51087244fe5SLaurent Pinchart 511d23c9a0aSKuninori Morimoto spin_lock_irqsave(&chan->lock, flags); 51287244fe5SLaurent Pinchart list_splice_tail(&list, &chan->desc.free); 51387244fe5SLaurent Pinchart list_add_tail(&page->node, &chan->desc.pages); 514d23c9a0aSKuninori Morimoto spin_unlock_irqrestore(&chan->lock, flags); 51587244fe5SLaurent Pinchart 51687244fe5SLaurent Pinchart return 0; 51787244fe5SLaurent Pinchart } 51887244fe5SLaurent Pinchart 51987244fe5SLaurent Pinchart /* 52087244fe5SLaurent Pinchart * rcar_dmac_desc_put - Release a DMA transfer descriptor 52187244fe5SLaurent Pinchart * @chan: the DMA channel 52287244fe5SLaurent Pinchart * @desc: the descriptor 52387244fe5SLaurent Pinchart * 52487244fe5SLaurent Pinchart * Put the descriptor and its transfer chunk descriptors back in the channel's 5251ed1315fSLaurent Pinchart * free descriptors lists. The descriptor's chunks list will be reinitialized to 5261ed1315fSLaurent Pinchart * an empty list as a result. 52787244fe5SLaurent Pinchart * 528ccadee9bSLaurent Pinchart * The descriptor must have been removed from the channel's lists before calling 529ccadee9bSLaurent Pinchart * this function. 53087244fe5SLaurent Pinchart */ 53187244fe5SLaurent Pinchart static void rcar_dmac_desc_put(struct rcar_dmac_chan *chan, 53287244fe5SLaurent Pinchart struct rcar_dmac_desc *desc) 53387244fe5SLaurent Pinchart { 534f3915072SLaurent Pinchart unsigned long flags; 535f3915072SLaurent Pinchart 536f3915072SLaurent Pinchart spin_lock_irqsave(&chan->lock, flags); 53787244fe5SLaurent Pinchart list_splice_tail_init(&desc->chunks, &chan->desc.chunks_free); 5383565fe53SKuninori Morimoto list_add(&desc->node, &chan->desc.free); 539f3915072SLaurent Pinchart spin_unlock_irqrestore(&chan->lock, flags); 54087244fe5SLaurent Pinchart } 54187244fe5SLaurent Pinchart 54287244fe5SLaurent Pinchart static void rcar_dmac_desc_recycle_acked(struct rcar_dmac_chan *chan) 54387244fe5SLaurent Pinchart { 54487244fe5SLaurent Pinchart struct rcar_dmac_desc *desc, *_desc; 545d23c9a0aSKuninori Morimoto unsigned long flags; 546ccadee9bSLaurent Pinchart LIST_HEAD(list); 54787244fe5SLaurent Pinchart 548ccadee9bSLaurent Pinchart /* 549ccadee9bSLaurent Pinchart * We have to temporarily move all descriptors from the wait list to a 550ccadee9bSLaurent Pinchart * local list as iterating over the wait list, even with 551ccadee9bSLaurent Pinchart * list_for_each_entry_safe, isn't safe if we release the channel lock 552ccadee9bSLaurent Pinchart * around the rcar_dmac_desc_put() call. 553ccadee9bSLaurent Pinchart */ 554d23c9a0aSKuninori Morimoto spin_lock_irqsave(&chan->lock, flags); 555ccadee9bSLaurent Pinchart list_splice_init(&chan->desc.wait, &list); 556d23c9a0aSKuninori Morimoto spin_unlock_irqrestore(&chan->lock, flags); 557ccadee9bSLaurent Pinchart 558ccadee9bSLaurent Pinchart list_for_each_entry_safe(desc, _desc, &list, node) { 55987244fe5SLaurent Pinchart if (async_tx_test_ack(&desc->async_tx)) { 56087244fe5SLaurent Pinchart list_del(&desc->node); 56187244fe5SLaurent Pinchart rcar_dmac_desc_put(chan, desc); 56287244fe5SLaurent Pinchart } 56387244fe5SLaurent Pinchart } 564ccadee9bSLaurent Pinchart 565ccadee9bSLaurent Pinchart if (list_empty(&list)) 566ccadee9bSLaurent Pinchart return; 567ccadee9bSLaurent Pinchart 568ccadee9bSLaurent Pinchart /* Put the remaining descriptors back in the wait list. */ 569d23c9a0aSKuninori Morimoto spin_lock_irqsave(&chan->lock, flags); 570ccadee9bSLaurent Pinchart list_splice(&list, &chan->desc.wait); 571d23c9a0aSKuninori Morimoto spin_unlock_irqrestore(&chan->lock, flags); 57287244fe5SLaurent Pinchart } 57387244fe5SLaurent Pinchart 57487244fe5SLaurent Pinchart /* 57587244fe5SLaurent Pinchart * rcar_dmac_desc_get - Allocate a descriptor for a DMA transfer 57687244fe5SLaurent Pinchart * @chan: the DMA channel 57787244fe5SLaurent Pinchart * 57887244fe5SLaurent Pinchart * Locking: This function must be called in a non-atomic context. 57987244fe5SLaurent Pinchart * 58087244fe5SLaurent Pinchart * Return: A pointer to the allocated descriptor or NULL if no descriptor can 58187244fe5SLaurent Pinchart * be allocated. 58287244fe5SLaurent Pinchart */ 58387244fe5SLaurent Pinchart static struct rcar_dmac_desc *rcar_dmac_desc_get(struct rcar_dmac_chan *chan) 58487244fe5SLaurent Pinchart { 58587244fe5SLaurent Pinchart struct rcar_dmac_desc *desc; 586d23c9a0aSKuninori Morimoto unsigned long flags; 58787244fe5SLaurent Pinchart int ret; 58887244fe5SLaurent Pinchart 58987244fe5SLaurent Pinchart /* Recycle acked descriptors before attempting allocation. */ 59087244fe5SLaurent Pinchart rcar_dmac_desc_recycle_acked(chan); 59187244fe5SLaurent Pinchart 592d23c9a0aSKuninori Morimoto spin_lock_irqsave(&chan->lock, flags); 593ccadee9bSLaurent Pinchart 594a55e07c8SLaurent Pinchart while (list_empty(&chan->desc.free)) { 59587244fe5SLaurent Pinchart /* 596a55e07c8SLaurent Pinchart * No free descriptors, allocate a page worth of them and try 597a55e07c8SLaurent Pinchart * again, as someone else could race us to get the newly 598a55e07c8SLaurent Pinchart * allocated descriptors. If the allocation fails return an 599a55e07c8SLaurent Pinchart * error. 60087244fe5SLaurent Pinchart */ 601d23c9a0aSKuninori Morimoto spin_unlock_irqrestore(&chan->lock, flags); 60287244fe5SLaurent Pinchart ret = rcar_dmac_desc_alloc(chan, GFP_NOWAIT); 60387244fe5SLaurent Pinchart if (ret < 0) 60487244fe5SLaurent Pinchart return NULL; 605d23c9a0aSKuninori Morimoto spin_lock_irqsave(&chan->lock, flags); 60687244fe5SLaurent Pinchart } 60787244fe5SLaurent Pinchart 608a55e07c8SLaurent Pinchart desc = list_first_entry(&chan->desc.free, struct rcar_dmac_desc, node); 60987244fe5SLaurent Pinchart list_del(&desc->node); 61087244fe5SLaurent Pinchart 611d23c9a0aSKuninori Morimoto spin_unlock_irqrestore(&chan->lock, flags); 61287244fe5SLaurent Pinchart 61387244fe5SLaurent Pinchart return desc; 61487244fe5SLaurent Pinchart } 61587244fe5SLaurent Pinchart 61687244fe5SLaurent Pinchart /* 61787244fe5SLaurent Pinchart * rcar_dmac_xfer_chunk_alloc - Allocate a page worth of transfer chunks 61887244fe5SLaurent Pinchart * @chan: the DMA channel 61987244fe5SLaurent Pinchart * @gfp: allocation flags 62087244fe5SLaurent Pinchart */ 62187244fe5SLaurent Pinchart static int rcar_dmac_xfer_chunk_alloc(struct rcar_dmac_chan *chan, gfp_t gfp) 62287244fe5SLaurent Pinchart { 62387244fe5SLaurent Pinchart struct rcar_dmac_desc_page *page; 624d23c9a0aSKuninori Morimoto unsigned long flags; 62587244fe5SLaurent Pinchart LIST_HEAD(list); 62687244fe5SLaurent Pinchart unsigned int i; 62787244fe5SLaurent Pinchart 62887244fe5SLaurent Pinchart page = (void *)get_zeroed_page(gfp); 62987244fe5SLaurent Pinchart if (!page) 63087244fe5SLaurent Pinchart return -ENOMEM; 63187244fe5SLaurent Pinchart 63287244fe5SLaurent Pinchart for (i = 0; i < RCAR_DMAC_XFER_CHUNKS_PER_PAGE; ++i) { 63387244fe5SLaurent Pinchart struct rcar_dmac_xfer_chunk *chunk = &page->chunks[i]; 63487244fe5SLaurent Pinchart 63587244fe5SLaurent Pinchart list_add_tail(&chunk->node, &list); 63687244fe5SLaurent Pinchart } 63787244fe5SLaurent Pinchart 638d23c9a0aSKuninori Morimoto spin_lock_irqsave(&chan->lock, flags); 63987244fe5SLaurent Pinchart list_splice_tail(&list, &chan->desc.chunks_free); 64087244fe5SLaurent Pinchart list_add_tail(&page->node, &chan->desc.pages); 641d23c9a0aSKuninori Morimoto spin_unlock_irqrestore(&chan->lock, flags); 64287244fe5SLaurent Pinchart 64387244fe5SLaurent Pinchart return 0; 64487244fe5SLaurent Pinchart } 64587244fe5SLaurent Pinchart 64687244fe5SLaurent Pinchart /* 64787244fe5SLaurent Pinchart * rcar_dmac_xfer_chunk_get - Allocate a transfer chunk for a DMA transfer 64887244fe5SLaurent Pinchart * @chan: the DMA channel 64987244fe5SLaurent Pinchart * 65087244fe5SLaurent Pinchart * Locking: This function must be called in a non-atomic context. 65187244fe5SLaurent Pinchart * 65287244fe5SLaurent Pinchart * Return: A pointer to the allocated transfer chunk descriptor or NULL if no 65387244fe5SLaurent Pinchart * descriptor can be allocated. 65487244fe5SLaurent Pinchart */ 65587244fe5SLaurent Pinchart static struct rcar_dmac_xfer_chunk * 65687244fe5SLaurent Pinchart rcar_dmac_xfer_chunk_get(struct rcar_dmac_chan *chan) 65787244fe5SLaurent Pinchart { 65887244fe5SLaurent Pinchart struct rcar_dmac_xfer_chunk *chunk; 659d23c9a0aSKuninori Morimoto unsigned long flags; 66087244fe5SLaurent Pinchart int ret; 66187244fe5SLaurent Pinchart 662d23c9a0aSKuninori Morimoto spin_lock_irqsave(&chan->lock, flags); 66387244fe5SLaurent Pinchart 664a55e07c8SLaurent Pinchart while (list_empty(&chan->desc.chunks_free)) { 66587244fe5SLaurent Pinchart /* 666a55e07c8SLaurent Pinchart * No free descriptors, allocate a page worth of them and try 667a55e07c8SLaurent Pinchart * again, as someone else could race us to get the newly 668a55e07c8SLaurent Pinchart * allocated descriptors. If the allocation fails return an 669a55e07c8SLaurent Pinchart * error. 67087244fe5SLaurent Pinchart */ 671d23c9a0aSKuninori Morimoto spin_unlock_irqrestore(&chan->lock, flags); 67287244fe5SLaurent Pinchart ret = rcar_dmac_xfer_chunk_alloc(chan, GFP_NOWAIT); 67387244fe5SLaurent Pinchart if (ret < 0) 67487244fe5SLaurent Pinchart return NULL; 675d23c9a0aSKuninori Morimoto spin_lock_irqsave(&chan->lock, flags); 67687244fe5SLaurent Pinchart } 67787244fe5SLaurent Pinchart 67887244fe5SLaurent Pinchart chunk = list_first_entry(&chan->desc.chunks_free, 67987244fe5SLaurent Pinchart struct rcar_dmac_xfer_chunk, node); 68087244fe5SLaurent Pinchart list_del(&chunk->node); 68187244fe5SLaurent Pinchart 682d23c9a0aSKuninori Morimoto spin_unlock_irqrestore(&chan->lock, flags); 68387244fe5SLaurent Pinchart 68487244fe5SLaurent Pinchart return chunk; 68587244fe5SLaurent Pinchart } 68687244fe5SLaurent Pinchart 6871ed1315fSLaurent Pinchart static void rcar_dmac_realloc_hwdesc(struct rcar_dmac_chan *chan, 6881ed1315fSLaurent Pinchart struct rcar_dmac_desc *desc, size_t size) 6891ed1315fSLaurent Pinchart { 6901ed1315fSLaurent Pinchart /* 6911ed1315fSLaurent Pinchart * dma_alloc_coherent() allocates memory in page size increments. To 6921ed1315fSLaurent Pinchart * avoid reallocating the hardware descriptors when the allocated size 6931ed1315fSLaurent Pinchart * wouldn't change align the requested size to a multiple of the page 6941ed1315fSLaurent Pinchart * size. 6951ed1315fSLaurent Pinchart */ 6961ed1315fSLaurent Pinchart size = PAGE_ALIGN(size); 6971ed1315fSLaurent Pinchart 6981ed1315fSLaurent Pinchart if (desc->hwdescs.size == size) 6991ed1315fSLaurent Pinchart return; 7001ed1315fSLaurent Pinchart 7011ed1315fSLaurent Pinchart if (desc->hwdescs.mem) { 7026a634808SLaurent Pinchart dma_free_coherent(chan->chan.device->dev, desc->hwdescs.size, 7036a634808SLaurent Pinchart desc->hwdescs.mem, desc->hwdescs.dma); 7041ed1315fSLaurent Pinchart desc->hwdescs.mem = NULL; 7051ed1315fSLaurent Pinchart desc->hwdescs.size = 0; 7061ed1315fSLaurent Pinchart } 7071ed1315fSLaurent Pinchart 7081ed1315fSLaurent Pinchart if (!size) 7091ed1315fSLaurent Pinchart return; 7101ed1315fSLaurent Pinchart 7116a634808SLaurent Pinchart desc->hwdescs.mem = dma_alloc_coherent(chan->chan.device->dev, size, 7126a634808SLaurent Pinchart &desc->hwdescs.dma, GFP_NOWAIT); 7131ed1315fSLaurent Pinchart if (!desc->hwdescs.mem) 7141ed1315fSLaurent Pinchart return; 7151ed1315fSLaurent Pinchart 7161ed1315fSLaurent Pinchart desc->hwdescs.size = size; 7171ed1315fSLaurent Pinchart } 7181ed1315fSLaurent Pinchart 719ee4b876bSJürg Billeter static int rcar_dmac_fill_hwdesc(struct rcar_dmac_chan *chan, 720ccadee9bSLaurent Pinchart struct rcar_dmac_desc *desc) 721ccadee9bSLaurent Pinchart { 722ccadee9bSLaurent Pinchart struct rcar_dmac_xfer_chunk *chunk; 723ccadee9bSLaurent Pinchart struct rcar_dmac_hw_desc *hwdesc; 724ccadee9bSLaurent Pinchart 7251ed1315fSLaurent Pinchart rcar_dmac_realloc_hwdesc(chan, desc, desc->nchunks * sizeof(*hwdesc)); 7261ed1315fSLaurent Pinchart 7271ed1315fSLaurent Pinchart hwdesc = desc->hwdescs.mem; 728ccadee9bSLaurent Pinchart if (!hwdesc) 729ee4b876bSJürg Billeter return -ENOMEM; 730ccadee9bSLaurent Pinchart 731ccadee9bSLaurent Pinchart list_for_each_entry(chunk, &desc->chunks, node) { 732ccadee9bSLaurent Pinchart hwdesc->sar = chunk->src_addr; 733ccadee9bSLaurent Pinchart hwdesc->dar = chunk->dst_addr; 734ccadee9bSLaurent Pinchart hwdesc->tcr = chunk->size >> desc->xfer_shift; 735ccadee9bSLaurent Pinchart hwdesc++; 736ccadee9bSLaurent Pinchart } 737ee4b876bSJürg Billeter 738ee4b876bSJürg Billeter return 0; 739ccadee9bSLaurent Pinchart } 740ccadee9bSLaurent Pinchart 74187244fe5SLaurent Pinchart /* ----------------------------------------------------------------------------- 74287244fe5SLaurent Pinchart * Stop and reset 74387244fe5SLaurent Pinchart */ 744a8d46a7fSKuninori Morimoto static void rcar_dmac_chcr_de_barrier(struct rcar_dmac_chan *chan) 745a8d46a7fSKuninori Morimoto { 746a8d46a7fSKuninori Morimoto u32 chcr; 747a8d46a7fSKuninori Morimoto unsigned int i; 748a8d46a7fSKuninori Morimoto 749a8d46a7fSKuninori Morimoto /* 750a8d46a7fSKuninori Morimoto * Ensure that the setting of the DE bit is actually 0 after 751a8d46a7fSKuninori Morimoto * clearing it. 752a8d46a7fSKuninori Morimoto */ 753a8d46a7fSKuninori Morimoto for (i = 0; i < 1024; i++) { 754a8d46a7fSKuninori Morimoto chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR); 755a8d46a7fSKuninori Morimoto if (!(chcr & RCAR_DMACHCR_DE)) 756a8d46a7fSKuninori Morimoto return; 757a8d46a7fSKuninori Morimoto udelay(1); 758a8d46a7fSKuninori Morimoto } 759a8d46a7fSKuninori Morimoto 760a8d46a7fSKuninori Morimoto dev_err(chan->chan.device->dev, "CHCR DE check error\n"); 761a8d46a7fSKuninori Morimoto } 76287244fe5SLaurent Pinchart 7634de1247aSYoshihiro Shimoda static void rcar_dmac_clear_chcr_de(struct rcar_dmac_chan *chan) 7644de1247aSYoshihiro Shimoda { 7654de1247aSYoshihiro Shimoda u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR); 7664de1247aSYoshihiro Shimoda 7674de1247aSYoshihiro Shimoda /* set DE=0 and flush remaining data */ 7684de1247aSYoshihiro Shimoda rcar_dmac_chan_write(chan, RCAR_DMACHCR, (chcr & ~RCAR_DMACHCR_DE)); 7694de1247aSYoshihiro Shimoda 7704de1247aSYoshihiro Shimoda /* make sure all remaining data was flushed */ 7714de1247aSYoshihiro Shimoda rcar_dmac_chcr_de_barrier(chan); 7724de1247aSYoshihiro Shimoda } 7734de1247aSYoshihiro Shimoda 77487244fe5SLaurent Pinchart static void rcar_dmac_chan_halt(struct rcar_dmac_chan *chan) 77587244fe5SLaurent Pinchart { 77687244fe5SLaurent Pinchart u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR); 77787244fe5SLaurent Pinchart 778ccadee9bSLaurent Pinchart chcr &= ~(RCAR_DMACHCR_DSE | RCAR_DMACHCR_DSIE | RCAR_DMACHCR_IE | 7799203dbecSKuninori Morimoto RCAR_DMACHCR_TE | RCAR_DMACHCR_DE | 7809203dbecSKuninori Morimoto RCAR_DMACHCR_CAE | RCAR_DMACHCR_CAIE); 78187244fe5SLaurent Pinchart rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr); 782a8d46a7fSKuninori Morimoto rcar_dmac_chcr_de_barrier(chan); 78387244fe5SLaurent Pinchart } 78487244fe5SLaurent Pinchart 78587244fe5SLaurent Pinchart static void rcar_dmac_chan_reinit(struct rcar_dmac_chan *chan) 78687244fe5SLaurent Pinchart { 78787244fe5SLaurent Pinchart struct rcar_dmac_desc *desc, *_desc; 78887244fe5SLaurent Pinchart unsigned long flags; 78987244fe5SLaurent Pinchart LIST_HEAD(descs); 79087244fe5SLaurent Pinchart 79187244fe5SLaurent Pinchart spin_lock_irqsave(&chan->lock, flags); 79287244fe5SLaurent Pinchart 79387244fe5SLaurent Pinchart /* Move all non-free descriptors to the local lists. */ 79487244fe5SLaurent Pinchart list_splice_init(&chan->desc.pending, &descs); 79587244fe5SLaurent Pinchart list_splice_init(&chan->desc.active, &descs); 79687244fe5SLaurent Pinchart list_splice_init(&chan->desc.done, &descs); 79787244fe5SLaurent Pinchart list_splice_init(&chan->desc.wait, &descs); 79887244fe5SLaurent Pinchart 79987244fe5SLaurent Pinchart chan->desc.running = NULL; 80087244fe5SLaurent Pinchart 80187244fe5SLaurent Pinchart spin_unlock_irqrestore(&chan->lock, flags); 80287244fe5SLaurent Pinchart 80387244fe5SLaurent Pinchart list_for_each_entry_safe(desc, _desc, &descs, node) { 80487244fe5SLaurent Pinchart list_del(&desc->node); 80587244fe5SLaurent Pinchart rcar_dmac_desc_put(chan, desc); 80687244fe5SLaurent Pinchart } 80787244fe5SLaurent Pinchart } 80887244fe5SLaurent Pinchart 8099203dbecSKuninori Morimoto static void rcar_dmac_stop_all_chan(struct rcar_dmac *dmac) 81087244fe5SLaurent Pinchart { 81187244fe5SLaurent Pinchart unsigned int i; 81287244fe5SLaurent Pinchart 81387244fe5SLaurent Pinchart /* Stop all channels. */ 81487244fe5SLaurent Pinchart for (i = 0; i < dmac->n_channels; ++i) { 81587244fe5SLaurent Pinchart struct rcar_dmac_chan *chan = &dmac->channels[i]; 81687244fe5SLaurent Pinchart 81787244fe5SLaurent Pinchart /* Stop and reinitialize the channel. */ 81845c9a603SGeert Uytterhoeven spin_lock_irq(&chan->lock); 81987244fe5SLaurent Pinchart rcar_dmac_chan_halt(chan); 82045c9a603SGeert Uytterhoeven spin_unlock_irq(&chan->lock); 8219203dbecSKuninori Morimoto } 8229203dbecSKuninori Morimoto } 82387244fe5SLaurent Pinchart 8248115ce74SYoshihiro Shimoda static int rcar_dmac_chan_pause(struct dma_chan *chan) 8258115ce74SYoshihiro Shimoda { 8268115ce74SYoshihiro Shimoda unsigned long flags; 8278115ce74SYoshihiro Shimoda struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); 8288115ce74SYoshihiro Shimoda 8298115ce74SYoshihiro Shimoda spin_lock_irqsave(&rchan->lock, flags); 8308115ce74SYoshihiro Shimoda rcar_dmac_clear_chcr_de(rchan); 8318115ce74SYoshihiro Shimoda spin_unlock_irqrestore(&rchan->lock, flags); 8328115ce74SYoshihiro Shimoda 8338115ce74SYoshihiro Shimoda return 0; 8348115ce74SYoshihiro Shimoda } 83587244fe5SLaurent Pinchart 83687244fe5SLaurent Pinchart /* ----------------------------------------------------------------------------- 83787244fe5SLaurent Pinchart * Descriptors preparation 83887244fe5SLaurent Pinchart */ 83987244fe5SLaurent Pinchart 84087244fe5SLaurent Pinchart static void rcar_dmac_chan_configure_desc(struct rcar_dmac_chan *chan, 84187244fe5SLaurent Pinchart struct rcar_dmac_desc *desc) 84287244fe5SLaurent Pinchart { 84387244fe5SLaurent Pinchart static const u32 chcr_ts[] = { 84487244fe5SLaurent Pinchart RCAR_DMACHCR_TS_1B, RCAR_DMACHCR_TS_2B, 84587244fe5SLaurent Pinchart RCAR_DMACHCR_TS_4B, RCAR_DMACHCR_TS_8B, 84687244fe5SLaurent Pinchart RCAR_DMACHCR_TS_16B, RCAR_DMACHCR_TS_32B, 84787244fe5SLaurent Pinchart RCAR_DMACHCR_TS_64B, 84887244fe5SLaurent Pinchart }; 84987244fe5SLaurent Pinchart 85087244fe5SLaurent Pinchart unsigned int xfer_size; 85187244fe5SLaurent Pinchart u32 chcr; 85287244fe5SLaurent Pinchart 85387244fe5SLaurent Pinchart switch (desc->direction) { 85487244fe5SLaurent Pinchart case DMA_DEV_TO_MEM: 85587244fe5SLaurent Pinchart chcr = RCAR_DMACHCR_DM_INC | RCAR_DMACHCR_SM_FIXED 85687244fe5SLaurent Pinchart | RCAR_DMACHCR_RS_DMARS; 857c5ed08e9SNiklas Söderlund xfer_size = chan->src.xfer_size; 85887244fe5SLaurent Pinchart break; 85987244fe5SLaurent Pinchart 86087244fe5SLaurent Pinchart case DMA_MEM_TO_DEV: 86187244fe5SLaurent Pinchart chcr = RCAR_DMACHCR_DM_FIXED | RCAR_DMACHCR_SM_INC 86287244fe5SLaurent Pinchart | RCAR_DMACHCR_RS_DMARS; 863c5ed08e9SNiklas Söderlund xfer_size = chan->dst.xfer_size; 86487244fe5SLaurent Pinchart break; 86587244fe5SLaurent Pinchart 86687244fe5SLaurent Pinchart case DMA_MEM_TO_MEM: 86787244fe5SLaurent Pinchart default: 86887244fe5SLaurent Pinchart chcr = RCAR_DMACHCR_DM_INC | RCAR_DMACHCR_SM_INC 86987244fe5SLaurent Pinchart | RCAR_DMACHCR_RS_AUTO; 87087244fe5SLaurent Pinchart xfer_size = RCAR_DMAC_MEMCPY_XFER_SIZE; 87187244fe5SLaurent Pinchart break; 87287244fe5SLaurent Pinchart } 87387244fe5SLaurent Pinchart 87487244fe5SLaurent Pinchart desc->xfer_shift = ilog2(xfer_size); 87587244fe5SLaurent Pinchart desc->chcr = chcr | chcr_ts[desc->xfer_shift]; 87687244fe5SLaurent Pinchart } 87787244fe5SLaurent Pinchart 87887244fe5SLaurent Pinchart /* 87987244fe5SLaurent Pinchart * rcar_dmac_chan_prep_sg - prepare transfer descriptors from an SG list 88087244fe5SLaurent Pinchart * 88187244fe5SLaurent Pinchart * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also 88287244fe5SLaurent Pinchart * converted to scatter-gather to guarantee consistent locking and a correct 88387244fe5SLaurent Pinchart * list manipulation. For slave DMA direction carries the usual meaning, and, 88487244fe5SLaurent Pinchart * logically, the SG list is RAM and the addr variable contains slave address, 88587244fe5SLaurent Pinchart * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM 88687244fe5SLaurent Pinchart * and the SG list contains only one element and points at the source buffer. 88787244fe5SLaurent Pinchart */ 88887244fe5SLaurent Pinchart static struct dma_async_tx_descriptor * 88987244fe5SLaurent Pinchart rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl, 89087244fe5SLaurent Pinchart unsigned int sg_len, dma_addr_t dev_addr, 89187244fe5SLaurent Pinchart enum dma_transfer_direction dir, unsigned long dma_flags, 89287244fe5SLaurent Pinchart bool cyclic) 89387244fe5SLaurent Pinchart { 89487244fe5SLaurent Pinchart struct rcar_dmac_xfer_chunk *chunk; 89587244fe5SLaurent Pinchart struct rcar_dmac_desc *desc; 89687244fe5SLaurent Pinchart struct scatterlist *sg; 897ccadee9bSLaurent Pinchart unsigned int nchunks = 0; 89887244fe5SLaurent Pinchart unsigned int max_chunk_size; 89987244fe5SLaurent Pinchart unsigned int full_size = 0; 9001175f83cSKuninori Morimoto bool cross_boundary = false; 90187244fe5SLaurent Pinchart unsigned int i; 9021175f83cSKuninori Morimoto #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 9031175f83cSKuninori Morimoto u32 high_dev_addr; 9041175f83cSKuninori Morimoto u32 high_mem_addr; 9051175f83cSKuninori Morimoto #endif 90687244fe5SLaurent Pinchart 90787244fe5SLaurent Pinchart desc = rcar_dmac_desc_get(chan); 90887244fe5SLaurent Pinchart if (!desc) 90987244fe5SLaurent Pinchart return NULL; 91087244fe5SLaurent Pinchart 91187244fe5SLaurent Pinchart desc->async_tx.flags = dma_flags; 91287244fe5SLaurent Pinchart desc->async_tx.cookie = -EBUSY; 91387244fe5SLaurent Pinchart 91487244fe5SLaurent Pinchart desc->cyclic = cyclic; 91587244fe5SLaurent Pinchart desc->direction = dir; 91687244fe5SLaurent Pinchart 91787244fe5SLaurent Pinchart rcar_dmac_chan_configure_desc(chan, desc); 91887244fe5SLaurent Pinchart 919d716d9b7SYoshihiro Shimoda max_chunk_size = RCAR_DMATCR_MASK << desc->xfer_shift; 92087244fe5SLaurent Pinchart 92187244fe5SLaurent Pinchart /* 92287244fe5SLaurent Pinchart * Allocate and fill the transfer chunk descriptors. We own the only 92387244fe5SLaurent Pinchart * reference to the DMA descriptor, there's no need for locking. 92487244fe5SLaurent Pinchart */ 92587244fe5SLaurent Pinchart for_each_sg(sgl, sg, sg_len, i) { 92687244fe5SLaurent Pinchart dma_addr_t mem_addr = sg_dma_address(sg); 92787244fe5SLaurent Pinchart unsigned int len = sg_dma_len(sg); 92887244fe5SLaurent Pinchart 92987244fe5SLaurent Pinchart full_size += len; 93087244fe5SLaurent Pinchart 9311175f83cSKuninori Morimoto #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 9321175f83cSKuninori Morimoto if (i == 0) { 9331175f83cSKuninori Morimoto high_dev_addr = dev_addr >> 32; 9341175f83cSKuninori Morimoto high_mem_addr = mem_addr >> 32; 9351175f83cSKuninori Morimoto } 9361175f83cSKuninori Morimoto 9371175f83cSKuninori Morimoto if ((dev_addr >> 32 != high_dev_addr) || 9381175f83cSKuninori Morimoto (mem_addr >> 32 != high_mem_addr)) 9391175f83cSKuninori Morimoto cross_boundary = true; 9401175f83cSKuninori Morimoto #endif 94187244fe5SLaurent Pinchart while (len) { 94287244fe5SLaurent Pinchart unsigned int size = min(len, max_chunk_size); 94387244fe5SLaurent Pinchart 94487244fe5SLaurent Pinchart #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 94587244fe5SLaurent Pinchart /* 94687244fe5SLaurent Pinchart * Prevent individual transfers from crossing 4GB 94787244fe5SLaurent Pinchart * boundaries. 94887244fe5SLaurent Pinchart */ 9491175f83cSKuninori Morimoto if (dev_addr >> 32 != (dev_addr + size - 1) >> 32) { 95087244fe5SLaurent Pinchart size = ALIGN(dev_addr, 1ULL << 32) - dev_addr; 9511175f83cSKuninori Morimoto cross_boundary = true; 9521175f83cSKuninori Morimoto } 9531175f83cSKuninori Morimoto if (mem_addr >> 32 != (mem_addr + size - 1) >> 32) { 95487244fe5SLaurent Pinchart size = ALIGN(mem_addr, 1ULL << 32) - mem_addr; 9551175f83cSKuninori Morimoto cross_boundary = true; 9561175f83cSKuninori Morimoto } 95787244fe5SLaurent Pinchart #endif 95887244fe5SLaurent Pinchart 95987244fe5SLaurent Pinchart chunk = rcar_dmac_xfer_chunk_get(chan); 96087244fe5SLaurent Pinchart if (!chunk) { 96187244fe5SLaurent Pinchart rcar_dmac_desc_put(chan, desc); 96287244fe5SLaurent Pinchart return NULL; 96387244fe5SLaurent Pinchart } 96487244fe5SLaurent Pinchart 96587244fe5SLaurent Pinchart if (dir == DMA_DEV_TO_MEM) { 96687244fe5SLaurent Pinchart chunk->src_addr = dev_addr; 96787244fe5SLaurent Pinchart chunk->dst_addr = mem_addr; 96887244fe5SLaurent Pinchart } else { 96987244fe5SLaurent Pinchart chunk->src_addr = mem_addr; 97087244fe5SLaurent Pinchart chunk->dst_addr = dev_addr; 97187244fe5SLaurent Pinchart } 97287244fe5SLaurent Pinchart 97387244fe5SLaurent Pinchart chunk->size = size; 97487244fe5SLaurent Pinchart 97587244fe5SLaurent Pinchart dev_dbg(chan->chan.device->dev, 97687244fe5SLaurent Pinchart "chan%u: chunk %p/%p sgl %u@%p, %u/%u %pad -> %pad\n", 97787244fe5SLaurent Pinchart chan->index, chunk, desc, i, sg, size, len, 97887244fe5SLaurent Pinchart &chunk->src_addr, &chunk->dst_addr); 97987244fe5SLaurent Pinchart 98087244fe5SLaurent Pinchart mem_addr += size; 98187244fe5SLaurent Pinchart if (dir == DMA_MEM_TO_MEM) 98287244fe5SLaurent Pinchart dev_addr += size; 98387244fe5SLaurent Pinchart 98487244fe5SLaurent Pinchart len -= size; 98587244fe5SLaurent Pinchart 98687244fe5SLaurent Pinchart list_add_tail(&chunk->node, &desc->chunks); 987ccadee9bSLaurent Pinchart nchunks++; 98887244fe5SLaurent Pinchart } 98987244fe5SLaurent Pinchart } 99087244fe5SLaurent Pinchart 991ccadee9bSLaurent Pinchart desc->nchunks = nchunks; 99287244fe5SLaurent Pinchart desc->size = full_size; 99387244fe5SLaurent Pinchart 994ccadee9bSLaurent Pinchart /* 995ccadee9bSLaurent Pinchart * Use hardware descriptor lists if possible when more than one chunk 996ccadee9bSLaurent Pinchart * needs to be transferred (otherwise they don't make much sense). 997ccadee9bSLaurent Pinchart * 9981175f83cSKuninori Morimoto * Source/Destination address should be located in same 4GiB region 9991175f83cSKuninori Morimoto * in the 40bit address space when it uses Hardware descriptor, 10001175f83cSKuninori Morimoto * and cross_boundary is checking it. 1001ccadee9bSLaurent Pinchart */ 10021175f83cSKuninori Morimoto desc->hwdescs.use = !cross_boundary && nchunks > 1; 1003ee4b876bSJürg Billeter if (desc->hwdescs.use) { 1004ee4b876bSJürg Billeter if (rcar_dmac_fill_hwdesc(chan, desc) < 0) 1005ee4b876bSJürg Billeter desc->hwdescs.use = false; 1006ee4b876bSJürg Billeter } 1007ccadee9bSLaurent Pinchart 100887244fe5SLaurent Pinchart return &desc->async_tx; 100987244fe5SLaurent Pinchart } 101087244fe5SLaurent Pinchart 101187244fe5SLaurent Pinchart /* ----------------------------------------------------------------------------- 101287244fe5SLaurent Pinchart * DMA engine operations 101387244fe5SLaurent Pinchart */ 101487244fe5SLaurent Pinchart 101587244fe5SLaurent Pinchart static int rcar_dmac_alloc_chan_resources(struct dma_chan *chan) 101687244fe5SLaurent Pinchart { 101787244fe5SLaurent Pinchart struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); 101887244fe5SLaurent Pinchart int ret; 101987244fe5SLaurent Pinchart 102087244fe5SLaurent Pinchart INIT_LIST_HEAD(&rchan->desc.chunks_free); 102187244fe5SLaurent Pinchart INIT_LIST_HEAD(&rchan->desc.pages); 102287244fe5SLaurent Pinchart 102387244fe5SLaurent Pinchart /* Preallocate descriptors. */ 102487244fe5SLaurent Pinchart ret = rcar_dmac_xfer_chunk_alloc(rchan, GFP_KERNEL); 102587244fe5SLaurent Pinchart if (ret < 0) 102687244fe5SLaurent Pinchart return -ENOMEM; 102787244fe5SLaurent Pinchart 102887244fe5SLaurent Pinchart ret = rcar_dmac_desc_alloc(rchan, GFP_KERNEL); 102987244fe5SLaurent Pinchart if (ret < 0) 103087244fe5SLaurent Pinchart return -ENOMEM; 103187244fe5SLaurent Pinchart 103287244fe5SLaurent Pinchart return pm_runtime_get_sync(chan->device->dev); 103387244fe5SLaurent Pinchart } 103487244fe5SLaurent Pinchart 103587244fe5SLaurent Pinchart static void rcar_dmac_free_chan_resources(struct dma_chan *chan) 103687244fe5SLaurent Pinchart { 103787244fe5SLaurent Pinchart struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); 103887244fe5SLaurent Pinchart struct rcar_dmac *dmac = to_rcar_dmac(chan->device); 10393139dc8dSNiklas Söderlund struct rcar_dmac_chan_map *map = &rchan->map; 104087244fe5SLaurent Pinchart struct rcar_dmac_desc_page *page, *_page; 10411ed1315fSLaurent Pinchart struct rcar_dmac_desc *desc; 10421ed1315fSLaurent Pinchart LIST_HEAD(list); 104387244fe5SLaurent Pinchart 104487244fe5SLaurent Pinchart /* Protect against ISR */ 104587244fe5SLaurent Pinchart spin_lock_irq(&rchan->lock); 104687244fe5SLaurent Pinchart rcar_dmac_chan_halt(rchan); 104787244fe5SLaurent Pinchart spin_unlock_irq(&rchan->lock); 104887244fe5SLaurent Pinchart 1049a1ed64efSNiklas Söderlund /* 1050a1ed64efSNiklas Söderlund * Now no new interrupts will occur, but one might already be 1051a1ed64efSNiklas Söderlund * running. Wait for it to finish before freeing resources. 1052a1ed64efSNiklas Söderlund */ 1053a1ed64efSNiklas Söderlund synchronize_irq(rchan->irq); 105487244fe5SLaurent Pinchart 105587244fe5SLaurent Pinchart if (rchan->mid_rid >= 0) { 105687244fe5SLaurent Pinchart /* The caller is holding dma_list_mutex */ 105787244fe5SLaurent Pinchart clear_bit(rchan->mid_rid, dmac->modules); 105887244fe5SLaurent Pinchart rchan->mid_rid = -EINVAL; 105987244fe5SLaurent Pinchart } 106087244fe5SLaurent Pinchart 1061f7638c90SLaurent Pinchart list_splice_init(&rchan->desc.free, &list); 1062f7638c90SLaurent Pinchart list_splice_init(&rchan->desc.pending, &list); 1063f7638c90SLaurent Pinchart list_splice_init(&rchan->desc.active, &list); 1064f7638c90SLaurent Pinchart list_splice_init(&rchan->desc.done, &list); 1065f7638c90SLaurent Pinchart list_splice_init(&rchan->desc.wait, &list); 10661ed1315fSLaurent Pinchart 106748c73659SMuhammad Hamza Farooq rchan->desc.running = NULL; 106848c73659SMuhammad Hamza Farooq 10691ed1315fSLaurent Pinchart list_for_each_entry(desc, &list, node) 10701ed1315fSLaurent Pinchart rcar_dmac_realloc_hwdesc(rchan, desc, 0); 10711ed1315fSLaurent Pinchart 107287244fe5SLaurent Pinchart list_for_each_entry_safe(page, _page, &rchan->desc.pages, node) { 107387244fe5SLaurent Pinchart list_del(&page->node); 107487244fe5SLaurent Pinchart free_page((unsigned long)page); 107587244fe5SLaurent Pinchart } 107687244fe5SLaurent Pinchart 10773139dc8dSNiklas Söderlund /* Remove slave mapping if present. */ 10783139dc8dSNiklas Söderlund if (map->slave.xfer_size) { 10793139dc8dSNiklas Söderlund dma_unmap_resource(chan->device->dev, map->addr, 10803139dc8dSNiklas Söderlund map->slave.xfer_size, map->dir, 0); 10813139dc8dSNiklas Söderlund map->slave.xfer_size = 0; 10823139dc8dSNiklas Söderlund } 10833139dc8dSNiklas Söderlund 108487244fe5SLaurent Pinchart pm_runtime_put(chan->device->dev); 108587244fe5SLaurent Pinchart } 108687244fe5SLaurent Pinchart 108787244fe5SLaurent Pinchart static struct dma_async_tx_descriptor * 108887244fe5SLaurent Pinchart rcar_dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, 108987244fe5SLaurent Pinchart dma_addr_t dma_src, size_t len, unsigned long flags) 109087244fe5SLaurent Pinchart { 109187244fe5SLaurent Pinchart struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); 109287244fe5SLaurent Pinchart struct scatterlist sgl; 109387244fe5SLaurent Pinchart 109487244fe5SLaurent Pinchart if (!len) 109587244fe5SLaurent Pinchart return NULL; 109687244fe5SLaurent Pinchart 109787244fe5SLaurent Pinchart sg_init_table(&sgl, 1); 109887244fe5SLaurent Pinchart sg_set_page(&sgl, pfn_to_page(PFN_DOWN(dma_src)), len, 109987244fe5SLaurent Pinchart offset_in_page(dma_src)); 110087244fe5SLaurent Pinchart sg_dma_address(&sgl) = dma_src; 110187244fe5SLaurent Pinchart sg_dma_len(&sgl) = len; 110287244fe5SLaurent Pinchart 110387244fe5SLaurent Pinchart return rcar_dmac_chan_prep_sg(rchan, &sgl, 1, dma_dest, 110487244fe5SLaurent Pinchart DMA_MEM_TO_MEM, flags, false); 110587244fe5SLaurent Pinchart } 110687244fe5SLaurent Pinchart 11079f878603SNiklas Söderlund static int rcar_dmac_map_slave_addr(struct dma_chan *chan, 11089f878603SNiklas Söderlund enum dma_transfer_direction dir) 11099f878603SNiklas Söderlund { 11109f878603SNiklas Söderlund struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); 11119f878603SNiklas Söderlund struct rcar_dmac_chan_map *map = &rchan->map; 11129f878603SNiklas Söderlund phys_addr_t dev_addr; 11139f878603SNiklas Söderlund size_t dev_size; 11149f878603SNiklas Söderlund enum dma_data_direction dev_dir; 11159f878603SNiklas Söderlund 11169f878603SNiklas Söderlund if (dir == DMA_DEV_TO_MEM) { 11179f878603SNiklas Söderlund dev_addr = rchan->src.slave_addr; 11189f878603SNiklas Söderlund dev_size = rchan->src.xfer_size; 11199f878603SNiklas Söderlund dev_dir = DMA_TO_DEVICE; 11209f878603SNiklas Söderlund } else { 11219f878603SNiklas Söderlund dev_addr = rchan->dst.slave_addr; 11229f878603SNiklas Söderlund dev_size = rchan->dst.xfer_size; 11239f878603SNiklas Söderlund dev_dir = DMA_FROM_DEVICE; 11249f878603SNiklas Söderlund } 11259f878603SNiklas Söderlund 11269f878603SNiklas Söderlund /* Reuse current map if possible. */ 11279f878603SNiklas Söderlund if (dev_addr == map->slave.slave_addr && 11289f878603SNiklas Söderlund dev_size == map->slave.xfer_size && 11299f878603SNiklas Söderlund dev_dir == map->dir) 11309f878603SNiklas Söderlund return 0; 11319f878603SNiklas Söderlund 11329f878603SNiklas Söderlund /* Remove old mapping if present. */ 11339f878603SNiklas Söderlund if (map->slave.xfer_size) 11349f878603SNiklas Söderlund dma_unmap_resource(chan->device->dev, map->addr, 11359f878603SNiklas Söderlund map->slave.xfer_size, map->dir, 0); 11369f878603SNiklas Söderlund map->slave.xfer_size = 0; 11379f878603SNiklas Söderlund 11389f878603SNiklas Söderlund /* Create new slave address map. */ 11399f878603SNiklas Söderlund map->addr = dma_map_resource(chan->device->dev, dev_addr, dev_size, 11409f878603SNiklas Söderlund dev_dir, 0); 11419f878603SNiklas Söderlund 11429f878603SNiklas Söderlund if (dma_mapping_error(chan->device->dev, map->addr)) { 11439f878603SNiklas Söderlund dev_err(chan->device->dev, 11449f878603SNiklas Söderlund "chan%u: failed to map %zx@%pap", rchan->index, 11459f878603SNiklas Söderlund dev_size, &dev_addr); 11469f878603SNiklas Söderlund return -EIO; 11479f878603SNiklas Söderlund } 11489f878603SNiklas Söderlund 11499f878603SNiklas Söderlund dev_dbg(chan->device->dev, "chan%u: map %zx@%pap to %pad dir: %s\n", 11509f878603SNiklas Söderlund rchan->index, dev_size, &dev_addr, &map->addr, 11519f878603SNiklas Söderlund dev_dir == DMA_TO_DEVICE ? "DMA_TO_DEVICE" : "DMA_FROM_DEVICE"); 11529f878603SNiklas Söderlund 11539f878603SNiklas Söderlund map->slave.slave_addr = dev_addr; 11549f878603SNiklas Söderlund map->slave.xfer_size = dev_size; 11559f878603SNiklas Söderlund map->dir = dev_dir; 11569f878603SNiklas Söderlund 11579f878603SNiklas Söderlund return 0; 11589f878603SNiklas Söderlund } 11599f878603SNiklas Söderlund 116087244fe5SLaurent Pinchart static struct dma_async_tx_descriptor * 116187244fe5SLaurent Pinchart rcar_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, 116287244fe5SLaurent Pinchart unsigned int sg_len, enum dma_transfer_direction dir, 116387244fe5SLaurent Pinchart unsigned long flags, void *context) 116487244fe5SLaurent Pinchart { 116587244fe5SLaurent Pinchart struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); 116687244fe5SLaurent Pinchart 116787244fe5SLaurent Pinchart /* Someone calling slave DMA on a generic channel? */ 116887244fe5SLaurent Pinchart if (rchan->mid_rid < 0 || !sg_len) { 116987244fe5SLaurent Pinchart dev_warn(chan->device->dev, 117087244fe5SLaurent Pinchart "%s: bad parameter: len=%d, id=%d\n", 117187244fe5SLaurent Pinchart __func__, sg_len, rchan->mid_rid); 117287244fe5SLaurent Pinchart return NULL; 117387244fe5SLaurent Pinchart } 117487244fe5SLaurent Pinchart 11759f878603SNiklas Söderlund if (rcar_dmac_map_slave_addr(chan, dir)) 11769f878603SNiklas Söderlund return NULL; 11779f878603SNiklas Söderlund 11789f878603SNiklas Söderlund return rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, rchan->map.addr, 117987244fe5SLaurent Pinchart dir, flags, false); 118087244fe5SLaurent Pinchart } 118187244fe5SLaurent Pinchart 118287244fe5SLaurent Pinchart #define RCAR_DMAC_MAX_SG_LEN 32 118387244fe5SLaurent Pinchart 118487244fe5SLaurent Pinchart static struct dma_async_tx_descriptor * 118587244fe5SLaurent Pinchart rcar_dmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, 118687244fe5SLaurent Pinchart size_t buf_len, size_t period_len, 118787244fe5SLaurent Pinchart enum dma_transfer_direction dir, unsigned long flags) 118887244fe5SLaurent Pinchart { 118987244fe5SLaurent Pinchart struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); 119087244fe5SLaurent Pinchart struct dma_async_tx_descriptor *desc; 119187244fe5SLaurent Pinchart struct scatterlist *sgl; 119287244fe5SLaurent Pinchart unsigned int sg_len; 119387244fe5SLaurent Pinchart unsigned int i; 119487244fe5SLaurent Pinchart 119587244fe5SLaurent Pinchart /* Someone calling slave DMA on a generic channel? */ 119687244fe5SLaurent Pinchart if (rchan->mid_rid < 0 || buf_len < period_len) { 119787244fe5SLaurent Pinchart dev_warn(chan->device->dev, 119887244fe5SLaurent Pinchart "%s: bad parameter: buf_len=%zu, period_len=%zu, id=%d\n", 119987244fe5SLaurent Pinchart __func__, buf_len, period_len, rchan->mid_rid); 120087244fe5SLaurent Pinchart return NULL; 120187244fe5SLaurent Pinchart } 120287244fe5SLaurent Pinchart 12039f878603SNiklas Söderlund if (rcar_dmac_map_slave_addr(chan, dir)) 12049f878603SNiklas Söderlund return NULL; 12059f878603SNiklas Söderlund 120687244fe5SLaurent Pinchart sg_len = buf_len / period_len; 120787244fe5SLaurent Pinchart if (sg_len > RCAR_DMAC_MAX_SG_LEN) { 120887244fe5SLaurent Pinchart dev_err(chan->device->dev, 120987244fe5SLaurent Pinchart "chan%u: sg length %d exceds limit %d", 121087244fe5SLaurent Pinchart rchan->index, sg_len, RCAR_DMAC_MAX_SG_LEN); 121187244fe5SLaurent Pinchart return NULL; 121287244fe5SLaurent Pinchart } 121387244fe5SLaurent Pinchart 121487244fe5SLaurent Pinchart /* 121587244fe5SLaurent Pinchart * Allocate the sg list dynamically as it would consume too much stack 121687244fe5SLaurent Pinchart * space. 121787244fe5SLaurent Pinchart */ 121887244fe5SLaurent Pinchart sgl = kcalloc(sg_len, sizeof(*sgl), GFP_NOWAIT); 121987244fe5SLaurent Pinchart if (!sgl) 122087244fe5SLaurent Pinchart return NULL; 122187244fe5SLaurent Pinchart 122287244fe5SLaurent Pinchart sg_init_table(sgl, sg_len); 122387244fe5SLaurent Pinchart 122487244fe5SLaurent Pinchart for (i = 0; i < sg_len; ++i) { 122587244fe5SLaurent Pinchart dma_addr_t src = buf_addr + (period_len * i); 122687244fe5SLaurent Pinchart 122787244fe5SLaurent Pinchart sg_set_page(&sgl[i], pfn_to_page(PFN_DOWN(src)), period_len, 122887244fe5SLaurent Pinchart offset_in_page(src)); 122987244fe5SLaurent Pinchart sg_dma_address(&sgl[i]) = src; 123087244fe5SLaurent Pinchart sg_dma_len(&sgl[i]) = period_len; 123187244fe5SLaurent Pinchart } 123287244fe5SLaurent Pinchart 12339f878603SNiklas Söderlund desc = rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, rchan->map.addr, 123487244fe5SLaurent Pinchart dir, flags, true); 123587244fe5SLaurent Pinchart 123687244fe5SLaurent Pinchart kfree(sgl); 123787244fe5SLaurent Pinchart return desc; 123887244fe5SLaurent Pinchart } 123987244fe5SLaurent Pinchart 124087244fe5SLaurent Pinchart static int rcar_dmac_device_config(struct dma_chan *chan, 124187244fe5SLaurent Pinchart struct dma_slave_config *cfg) 124287244fe5SLaurent Pinchart { 124387244fe5SLaurent Pinchart struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); 124487244fe5SLaurent Pinchart 124587244fe5SLaurent Pinchart /* 124687244fe5SLaurent Pinchart * We could lock this, but you shouldn't be configuring the 124787244fe5SLaurent Pinchart * channel, while using it... 124887244fe5SLaurent Pinchart */ 1249c5ed08e9SNiklas Söderlund rchan->src.slave_addr = cfg->src_addr; 1250c5ed08e9SNiklas Söderlund rchan->dst.slave_addr = cfg->dst_addr; 1251c5ed08e9SNiklas Söderlund rchan->src.xfer_size = cfg->src_addr_width; 1252c5ed08e9SNiklas Söderlund rchan->dst.xfer_size = cfg->dst_addr_width; 125387244fe5SLaurent Pinchart 125487244fe5SLaurent Pinchart return 0; 125587244fe5SLaurent Pinchart } 125687244fe5SLaurent Pinchart 125787244fe5SLaurent Pinchart static int rcar_dmac_chan_terminate_all(struct dma_chan *chan) 125887244fe5SLaurent Pinchart { 125987244fe5SLaurent Pinchart struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); 126087244fe5SLaurent Pinchart unsigned long flags; 126187244fe5SLaurent Pinchart 126287244fe5SLaurent Pinchart spin_lock_irqsave(&rchan->lock, flags); 126387244fe5SLaurent Pinchart rcar_dmac_chan_halt(rchan); 126487244fe5SLaurent Pinchart spin_unlock_irqrestore(&rchan->lock, flags); 126587244fe5SLaurent Pinchart 126687244fe5SLaurent Pinchart /* 126787244fe5SLaurent Pinchart * FIXME: No new interrupt can occur now, but the IRQ thread might still 126887244fe5SLaurent Pinchart * be running. 126987244fe5SLaurent Pinchart */ 127087244fe5SLaurent Pinchart 127187244fe5SLaurent Pinchart rcar_dmac_chan_reinit(rchan); 127287244fe5SLaurent Pinchart 127387244fe5SLaurent Pinchart return 0; 127487244fe5SLaurent Pinchart } 127587244fe5SLaurent Pinchart 127687244fe5SLaurent Pinchart static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan, 127787244fe5SLaurent Pinchart dma_cookie_t cookie) 127887244fe5SLaurent Pinchart { 127987244fe5SLaurent Pinchart struct rcar_dmac_desc *desc = chan->desc.running; 1280ccadee9bSLaurent Pinchart struct rcar_dmac_xfer_chunk *running = NULL; 128187244fe5SLaurent Pinchart struct rcar_dmac_xfer_chunk *chunk; 128255bd582bSLaurent Pinchart enum dma_status status; 128387244fe5SLaurent Pinchart unsigned int residue = 0; 1284ccadee9bSLaurent Pinchart unsigned int dptr = 0; 12856e7da747SAchim Dahlhoff unsigned int chcrb; 12866e7da747SAchim Dahlhoff unsigned int tcrb; 12876e7da747SAchim Dahlhoff unsigned int i; 128887244fe5SLaurent Pinchart 128987244fe5SLaurent Pinchart if (!desc) 129087244fe5SLaurent Pinchart return 0; 129187244fe5SLaurent Pinchart 129287244fe5SLaurent Pinchart /* 129355bd582bSLaurent Pinchart * If the cookie corresponds to a descriptor that has been completed 129455bd582bSLaurent Pinchart * there is no residue. The same check has already been performed by the 129555bd582bSLaurent Pinchart * caller but without holding the channel lock, so the descriptor could 129655bd582bSLaurent Pinchart * now be complete. 129755bd582bSLaurent Pinchart */ 129855bd582bSLaurent Pinchart status = dma_cookie_status(&chan->chan, cookie, NULL); 129955bd582bSLaurent Pinchart if (status == DMA_COMPLETE) 130055bd582bSLaurent Pinchart return 0; 130155bd582bSLaurent Pinchart 130255bd582bSLaurent Pinchart /* 130387244fe5SLaurent Pinchart * If the cookie doesn't correspond to the currently running transfer 130487244fe5SLaurent Pinchart * then the descriptor hasn't been processed yet, and the residue is 130587244fe5SLaurent Pinchart * equal to the full descriptor size. 13063e081628SYoshihiro Shimoda * Also, a client driver is possible to call this function before 13073e081628SYoshihiro Shimoda * rcar_dmac_isr_channel_thread() runs. In this case, the "desc.running" 13083e081628SYoshihiro Shimoda * will be the next descriptor, and the done list will appear. So, if 13093e081628SYoshihiro Shimoda * the argument cookie matches the done list's cookie, we can assume 13103e081628SYoshihiro Shimoda * the residue is zero. 131187244fe5SLaurent Pinchart */ 131255bd582bSLaurent Pinchart if (cookie != desc->async_tx.cookie) { 13133e081628SYoshihiro Shimoda list_for_each_entry(desc, &chan->desc.done, node) { 13143e081628SYoshihiro Shimoda if (cookie == desc->async_tx.cookie) 13153e081628SYoshihiro Shimoda return 0; 13163e081628SYoshihiro Shimoda } 131755bd582bSLaurent Pinchart list_for_each_entry(desc, &chan->desc.pending, node) { 131855bd582bSLaurent Pinchart if (cookie == desc->async_tx.cookie) 131987244fe5SLaurent Pinchart return desc->size; 132055bd582bSLaurent Pinchart } 132155bd582bSLaurent Pinchart list_for_each_entry(desc, &chan->desc.active, node) { 132255bd582bSLaurent Pinchart if (cookie == desc->async_tx.cookie) 132355bd582bSLaurent Pinchart return desc->size; 132455bd582bSLaurent Pinchart } 132555bd582bSLaurent Pinchart 132655bd582bSLaurent Pinchart /* 132755bd582bSLaurent Pinchart * No descriptor found for the cookie, there's thus no residue. 132855bd582bSLaurent Pinchart * This shouldn't happen if the calling driver passes a correct 132955bd582bSLaurent Pinchart * cookie value. 133055bd582bSLaurent Pinchart */ 133155bd582bSLaurent Pinchart WARN(1, "No descriptor for cookie!"); 133255bd582bSLaurent Pinchart return 0; 133355bd582bSLaurent Pinchart } 133487244fe5SLaurent Pinchart 1335ccadee9bSLaurent Pinchart /* 13366e7da747SAchim Dahlhoff * We need to read two registers. 13376e7da747SAchim Dahlhoff * Make sure the control register does not skip to next chunk 13386e7da747SAchim Dahlhoff * while reading the counter. 13396e7da747SAchim Dahlhoff * Trying it 3 times should be enough: Initial read, retry, retry 13406e7da747SAchim Dahlhoff * for the paranoid. 13416e7da747SAchim Dahlhoff */ 13426e7da747SAchim Dahlhoff for (i = 0; i < 3; i++) { 13436e7da747SAchim Dahlhoff chcrb = rcar_dmac_chan_read(chan, RCAR_DMACHCRB) & 13446e7da747SAchim Dahlhoff RCAR_DMACHCRB_DPTR_MASK; 13456e7da747SAchim Dahlhoff tcrb = rcar_dmac_chan_read(chan, RCAR_DMATCRB); 13466e7da747SAchim Dahlhoff /* Still the same? */ 13476e7da747SAchim Dahlhoff if (chcrb == (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) & 13486e7da747SAchim Dahlhoff RCAR_DMACHCRB_DPTR_MASK)) 13496e7da747SAchim Dahlhoff break; 13506e7da747SAchim Dahlhoff } 13516e7da747SAchim Dahlhoff WARN_ONCE(i >= 3, "residue might be not continuous!"); 13526e7da747SAchim Dahlhoff 13536e7da747SAchim Dahlhoff /* 1354ccadee9bSLaurent Pinchart * In descriptor mode the descriptor running pointer is not maintained 1355ccadee9bSLaurent Pinchart * by the interrupt handler, find the running descriptor from the 1356ccadee9bSLaurent Pinchart * descriptor pointer field in the CHCRB register. In non-descriptor 1357ccadee9bSLaurent Pinchart * mode just use the running descriptor pointer. 1358ccadee9bSLaurent Pinchart */ 13591ed1315fSLaurent Pinchart if (desc->hwdescs.use) { 13606e7da747SAchim Dahlhoff dptr = chcrb >> RCAR_DMACHCRB_DPTR_SHIFT; 136156b17705SKuninori Morimoto if (dptr == 0) 136256b17705SKuninori Morimoto dptr = desc->nchunks; 136356b17705SKuninori Morimoto dptr--; 1364ccadee9bSLaurent Pinchart WARN_ON(dptr >= desc->nchunks); 1365ccadee9bSLaurent Pinchart } else { 1366ccadee9bSLaurent Pinchart running = desc->running; 1367ccadee9bSLaurent Pinchart } 1368ccadee9bSLaurent Pinchart 136987244fe5SLaurent Pinchart /* Compute the size of all chunks still to be transferred. */ 137087244fe5SLaurent Pinchart list_for_each_entry_reverse(chunk, &desc->chunks, node) { 1371ccadee9bSLaurent Pinchart if (chunk == running || ++dptr == desc->nchunks) 137287244fe5SLaurent Pinchart break; 137387244fe5SLaurent Pinchart 137487244fe5SLaurent Pinchart residue += chunk->size; 137587244fe5SLaurent Pinchart } 137687244fe5SLaurent Pinchart 137787244fe5SLaurent Pinchart /* Add the residue for the current chunk. */ 13786e7da747SAchim Dahlhoff residue += tcrb << desc->xfer_shift; 137987244fe5SLaurent Pinchart 138087244fe5SLaurent Pinchart return residue; 138187244fe5SLaurent Pinchart } 138287244fe5SLaurent Pinchart 138387244fe5SLaurent Pinchart static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan, 138487244fe5SLaurent Pinchart dma_cookie_t cookie, 138587244fe5SLaurent Pinchart struct dma_tx_state *txstate) 138687244fe5SLaurent Pinchart { 138787244fe5SLaurent Pinchart struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); 138887244fe5SLaurent Pinchart enum dma_status status; 138987244fe5SLaurent Pinchart unsigned long flags; 139087244fe5SLaurent Pinchart unsigned int residue; 1391907bd68aSDirk Behme bool cyclic; 139287244fe5SLaurent Pinchart 139387244fe5SLaurent Pinchart status = dma_cookie_status(chan, cookie, txstate); 139487244fe5SLaurent Pinchart if (status == DMA_COMPLETE || !txstate) 139587244fe5SLaurent Pinchart return status; 139687244fe5SLaurent Pinchart 139787244fe5SLaurent Pinchart spin_lock_irqsave(&rchan->lock, flags); 139887244fe5SLaurent Pinchart residue = rcar_dmac_chan_get_residue(rchan, cookie); 1399907bd68aSDirk Behme cyclic = rchan->desc.running ? rchan->desc.running->cyclic : false; 140087244fe5SLaurent Pinchart spin_unlock_irqrestore(&rchan->lock, flags); 140187244fe5SLaurent Pinchart 14023544d287SMuhammad Hamza Farooq /* if there's no residue, the cookie is complete */ 1403907bd68aSDirk Behme if (!residue && !cyclic) 14043544d287SMuhammad Hamza Farooq return DMA_COMPLETE; 14053544d287SMuhammad Hamza Farooq 140687244fe5SLaurent Pinchart dma_set_residue(txstate, residue); 140787244fe5SLaurent Pinchart 140887244fe5SLaurent Pinchart return status; 140987244fe5SLaurent Pinchart } 141087244fe5SLaurent Pinchart 141187244fe5SLaurent Pinchart static void rcar_dmac_issue_pending(struct dma_chan *chan) 141287244fe5SLaurent Pinchart { 141387244fe5SLaurent Pinchart struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); 141487244fe5SLaurent Pinchart unsigned long flags; 141587244fe5SLaurent Pinchart 141687244fe5SLaurent Pinchart spin_lock_irqsave(&rchan->lock, flags); 141787244fe5SLaurent Pinchart 141887244fe5SLaurent Pinchart if (list_empty(&rchan->desc.pending)) 141987244fe5SLaurent Pinchart goto done; 142087244fe5SLaurent Pinchart 142187244fe5SLaurent Pinchart /* Append the pending list to the active list. */ 142287244fe5SLaurent Pinchart list_splice_tail_init(&rchan->desc.pending, &rchan->desc.active); 142387244fe5SLaurent Pinchart 142487244fe5SLaurent Pinchart /* 142587244fe5SLaurent Pinchart * If no transfer is running pick the first descriptor from the active 142687244fe5SLaurent Pinchart * list and start the transfer. 142787244fe5SLaurent Pinchart */ 142887244fe5SLaurent Pinchart if (!rchan->desc.running) { 142987244fe5SLaurent Pinchart struct rcar_dmac_desc *desc; 143087244fe5SLaurent Pinchart 143187244fe5SLaurent Pinchart desc = list_first_entry(&rchan->desc.active, 143287244fe5SLaurent Pinchart struct rcar_dmac_desc, node); 143387244fe5SLaurent Pinchart rchan->desc.running = desc; 143487244fe5SLaurent Pinchart 143587244fe5SLaurent Pinchart rcar_dmac_chan_start_xfer(rchan); 143687244fe5SLaurent Pinchart } 143787244fe5SLaurent Pinchart 143887244fe5SLaurent Pinchart done: 143987244fe5SLaurent Pinchart spin_unlock_irqrestore(&rchan->lock, flags); 144087244fe5SLaurent Pinchart } 144187244fe5SLaurent Pinchart 144230c45005SNiklas Söderlund static void rcar_dmac_device_synchronize(struct dma_chan *chan) 144330c45005SNiklas Söderlund { 144430c45005SNiklas Söderlund struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); 144530c45005SNiklas Söderlund 144630c45005SNiklas Söderlund synchronize_irq(rchan->irq); 144730c45005SNiklas Söderlund } 144830c45005SNiklas Söderlund 144987244fe5SLaurent Pinchart /* ----------------------------------------------------------------------------- 145087244fe5SLaurent Pinchart * IRQ handling 145187244fe5SLaurent Pinchart */ 145287244fe5SLaurent Pinchart 1453ccadee9bSLaurent Pinchart static irqreturn_t rcar_dmac_isr_desc_stage_end(struct rcar_dmac_chan *chan) 1454ccadee9bSLaurent Pinchart { 1455ccadee9bSLaurent Pinchart struct rcar_dmac_desc *desc = chan->desc.running; 1456ccadee9bSLaurent Pinchart unsigned int stage; 1457ccadee9bSLaurent Pinchart 1458ccadee9bSLaurent Pinchart if (WARN_ON(!desc || !desc->cyclic)) { 1459ccadee9bSLaurent Pinchart /* 1460ccadee9bSLaurent Pinchart * This should never happen, there should always be a running 1461ccadee9bSLaurent Pinchart * cyclic descriptor when a descriptor stage end interrupt is 1462ccadee9bSLaurent Pinchart * triggered. Warn and return. 1463ccadee9bSLaurent Pinchart */ 1464ccadee9bSLaurent Pinchart return IRQ_NONE; 1465ccadee9bSLaurent Pinchart } 1466ccadee9bSLaurent Pinchart 1467ccadee9bSLaurent Pinchart /* Program the interrupt pointer to the next stage. */ 1468ccadee9bSLaurent Pinchart stage = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) & 1469ccadee9bSLaurent Pinchart RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT; 1470ccadee9bSLaurent Pinchart rcar_dmac_chan_write(chan, RCAR_DMADPCR, RCAR_DMADPCR_DIPT(stage)); 1471ccadee9bSLaurent Pinchart 1472ccadee9bSLaurent Pinchart return IRQ_WAKE_THREAD; 1473ccadee9bSLaurent Pinchart } 1474ccadee9bSLaurent Pinchart 147587244fe5SLaurent Pinchart static irqreturn_t rcar_dmac_isr_transfer_end(struct rcar_dmac_chan *chan) 147687244fe5SLaurent Pinchart { 147787244fe5SLaurent Pinchart struct rcar_dmac_desc *desc = chan->desc.running; 147887244fe5SLaurent Pinchart irqreturn_t ret = IRQ_WAKE_THREAD; 147987244fe5SLaurent Pinchart 148087244fe5SLaurent Pinchart if (WARN_ON_ONCE(!desc)) { 148187244fe5SLaurent Pinchart /* 1482ccadee9bSLaurent Pinchart * This should never happen, there should always be a running 1483ccadee9bSLaurent Pinchart * descriptor when a transfer end interrupt is triggered. Warn 1484ccadee9bSLaurent Pinchart * and return. 148587244fe5SLaurent Pinchart */ 148687244fe5SLaurent Pinchart return IRQ_NONE; 148787244fe5SLaurent Pinchart } 148887244fe5SLaurent Pinchart 148987244fe5SLaurent Pinchart /* 1490ccadee9bSLaurent Pinchart * The transfer end interrupt isn't generated for each chunk when using 1491ccadee9bSLaurent Pinchart * descriptor mode. Only update the running chunk pointer in 1492ccadee9bSLaurent Pinchart * non-descriptor mode. 149387244fe5SLaurent Pinchart */ 14941ed1315fSLaurent Pinchart if (!desc->hwdescs.use) { 1495ccadee9bSLaurent Pinchart /* 1496ccadee9bSLaurent Pinchart * If we haven't completed the last transfer chunk simply move 1497ccadee9bSLaurent Pinchart * to the next one. Only wake the IRQ thread if the transfer is 1498ccadee9bSLaurent Pinchart * cyclic. 1499ccadee9bSLaurent Pinchart */ 1500ccadee9bSLaurent Pinchart if (!list_is_last(&desc->running->node, &desc->chunks)) { 1501ccadee9bSLaurent Pinchart desc->running = list_next_entry(desc->running, node); 150287244fe5SLaurent Pinchart if (!desc->cyclic) 150387244fe5SLaurent Pinchart ret = IRQ_HANDLED; 150487244fe5SLaurent Pinchart goto done; 150587244fe5SLaurent Pinchart } 150687244fe5SLaurent Pinchart 150787244fe5SLaurent Pinchart /* 1508ccadee9bSLaurent Pinchart * We've completed the last transfer chunk. If the transfer is 1509ccadee9bSLaurent Pinchart * cyclic, move back to the first one. 151087244fe5SLaurent Pinchart */ 151187244fe5SLaurent Pinchart if (desc->cyclic) { 1512ccadee9bSLaurent Pinchart desc->running = 1513ccadee9bSLaurent Pinchart list_first_entry(&desc->chunks, 151487244fe5SLaurent Pinchart struct rcar_dmac_xfer_chunk, 151587244fe5SLaurent Pinchart node); 151687244fe5SLaurent Pinchart goto done; 151787244fe5SLaurent Pinchart } 1518ccadee9bSLaurent Pinchart } 151987244fe5SLaurent Pinchart 152087244fe5SLaurent Pinchart /* The descriptor is complete, move it to the done list. */ 152187244fe5SLaurent Pinchart list_move_tail(&desc->node, &chan->desc.done); 152287244fe5SLaurent Pinchart 152387244fe5SLaurent Pinchart /* Queue the next descriptor, if any. */ 152487244fe5SLaurent Pinchart if (!list_empty(&chan->desc.active)) 152587244fe5SLaurent Pinchart chan->desc.running = list_first_entry(&chan->desc.active, 152687244fe5SLaurent Pinchart struct rcar_dmac_desc, 152787244fe5SLaurent Pinchart node); 152887244fe5SLaurent Pinchart else 152987244fe5SLaurent Pinchart chan->desc.running = NULL; 153087244fe5SLaurent Pinchart 153187244fe5SLaurent Pinchart done: 153287244fe5SLaurent Pinchart if (chan->desc.running) 153387244fe5SLaurent Pinchart rcar_dmac_chan_start_xfer(chan); 153487244fe5SLaurent Pinchart 153587244fe5SLaurent Pinchart return ret; 153687244fe5SLaurent Pinchart } 153787244fe5SLaurent Pinchart 153887244fe5SLaurent Pinchart static irqreturn_t rcar_dmac_isr_channel(int irq, void *dev) 153987244fe5SLaurent Pinchart { 1540ccadee9bSLaurent Pinchart u32 mask = RCAR_DMACHCR_DSE | RCAR_DMACHCR_TE; 154187244fe5SLaurent Pinchart struct rcar_dmac_chan *chan = dev; 154287244fe5SLaurent Pinchart irqreturn_t ret = IRQ_NONE; 15439203dbecSKuninori Morimoto bool reinit = false; 154487244fe5SLaurent Pinchart u32 chcr; 154587244fe5SLaurent Pinchart 154687244fe5SLaurent Pinchart spin_lock(&chan->lock); 154787244fe5SLaurent Pinchart 154887244fe5SLaurent Pinchart chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR); 15499203dbecSKuninori Morimoto if (chcr & RCAR_DMACHCR_CAE) { 1550e919417bSKuninori Morimoto struct rcar_dmac *dmac = to_rcar_dmac(chan->chan.device); 1551e919417bSKuninori Morimoto 1552e919417bSKuninori Morimoto /* 1553e919417bSKuninori Morimoto * We don't need to call rcar_dmac_chan_halt() 1554e919417bSKuninori Morimoto * because channel is already stopped in error case. 1555e919417bSKuninori Morimoto * We need to clear register and check DE bit as recovery. 1556e919417bSKuninori Morimoto */ 1557e919417bSKuninori Morimoto rcar_dmac_write(dmac, RCAR_DMACHCLR, 1 << chan->index); 1558e919417bSKuninori Morimoto rcar_dmac_chcr_de_barrier(chan); 15599203dbecSKuninori Morimoto reinit = true; 15609203dbecSKuninori Morimoto goto spin_lock_end; 15619203dbecSKuninori Morimoto } 15629203dbecSKuninori Morimoto 1563ccadee9bSLaurent Pinchart if (chcr & RCAR_DMACHCR_TE) 1564ccadee9bSLaurent Pinchart mask |= RCAR_DMACHCR_DE; 1565ccadee9bSLaurent Pinchart rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr & ~mask); 1566a8d46a7fSKuninori Morimoto if (mask & RCAR_DMACHCR_DE) 1567a8d46a7fSKuninori Morimoto rcar_dmac_chcr_de_barrier(chan); 1568ccadee9bSLaurent Pinchart 1569ccadee9bSLaurent Pinchart if (chcr & RCAR_DMACHCR_DSE) 1570ccadee9bSLaurent Pinchart ret |= rcar_dmac_isr_desc_stage_end(chan); 157187244fe5SLaurent Pinchart 157287244fe5SLaurent Pinchart if (chcr & RCAR_DMACHCR_TE) 157387244fe5SLaurent Pinchart ret |= rcar_dmac_isr_transfer_end(chan); 157487244fe5SLaurent Pinchart 15759203dbecSKuninori Morimoto spin_lock_end: 157687244fe5SLaurent Pinchart spin_unlock(&chan->lock); 157787244fe5SLaurent Pinchart 15789203dbecSKuninori Morimoto if (reinit) { 15799203dbecSKuninori Morimoto dev_err(chan->chan.device->dev, "Channel Address Error\n"); 15809203dbecSKuninori Morimoto 15819203dbecSKuninori Morimoto rcar_dmac_chan_reinit(chan); 15829203dbecSKuninori Morimoto ret = IRQ_HANDLED; 15839203dbecSKuninori Morimoto } 15849203dbecSKuninori Morimoto 158587244fe5SLaurent Pinchart return ret; 158687244fe5SLaurent Pinchart } 158787244fe5SLaurent Pinchart 158887244fe5SLaurent Pinchart static irqreturn_t rcar_dmac_isr_channel_thread(int irq, void *dev) 158987244fe5SLaurent Pinchart { 159087244fe5SLaurent Pinchart struct rcar_dmac_chan *chan = dev; 159187244fe5SLaurent Pinchart struct rcar_dmac_desc *desc; 1592964b2fd8SDave Jiang struct dmaengine_desc_callback cb; 159387244fe5SLaurent Pinchart 159487244fe5SLaurent Pinchart spin_lock_irq(&chan->lock); 159587244fe5SLaurent Pinchart 159687244fe5SLaurent Pinchart /* For cyclic transfers notify the user after every chunk. */ 159787244fe5SLaurent Pinchart if (chan->desc.running && chan->desc.running->cyclic) { 159887244fe5SLaurent Pinchart desc = chan->desc.running; 1599964b2fd8SDave Jiang dmaengine_desc_get_callback(&desc->async_tx, &cb); 160087244fe5SLaurent Pinchart 1601964b2fd8SDave Jiang if (dmaengine_desc_callback_valid(&cb)) { 160287244fe5SLaurent Pinchart spin_unlock_irq(&chan->lock); 1603964b2fd8SDave Jiang dmaengine_desc_callback_invoke(&cb, NULL); 160487244fe5SLaurent Pinchart spin_lock_irq(&chan->lock); 160587244fe5SLaurent Pinchart } 160687244fe5SLaurent Pinchart } 160787244fe5SLaurent Pinchart 160887244fe5SLaurent Pinchart /* 160987244fe5SLaurent Pinchart * Call the callback function for all descriptors on the done list and 161087244fe5SLaurent Pinchart * move them to the ack wait list. 161187244fe5SLaurent Pinchart */ 161287244fe5SLaurent Pinchart while (!list_empty(&chan->desc.done)) { 161387244fe5SLaurent Pinchart desc = list_first_entry(&chan->desc.done, struct rcar_dmac_desc, 161487244fe5SLaurent Pinchart node); 161587244fe5SLaurent Pinchart dma_cookie_complete(&desc->async_tx); 161687244fe5SLaurent Pinchart list_del(&desc->node); 161787244fe5SLaurent Pinchart 1618964b2fd8SDave Jiang dmaengine_desc_get_callback(&desc->async_tx, &cb); 1619964b2fd8SDave Jiang if (dmaengine_desc_callback_valid(&cb)) { 162087244fe5SLaurent Pinchart spin_unlock_irq(&chan->lock); 162187244fe5SLaurent Pinchart /* 162287244fe5SLaurent Pinchart * We own the only reference to this descriptor, we can 162387244fe5SLaurent Pinchart * safely dereference it without holding the channel 162487244fe5SLaurent Pinchart * lock. 162587244fe5SLaurent Pinchart */ 1626964b2fd8SDave Jiang dmaengine_desc_callback_invoke(&cb, NULL); 162787244fe5SLaurent Pinchart spin_lock_irq(&chan->lock); 162887244fe5SLaurent Pinchart } 162987244fe5SLaurent Pinchart 163087244fe5SLaurent Pinchart list_add_tail(&desc->node, &chan->desc.wait); 163187244fe5SLaurent Pinchart } 163287244fe5SLaurent Pinchart 1633ccadee9bSLaurent Pinchart spin_unlock_irq(&chan->lock); 1634ccadee9bSLaurent Pinchart 163587244fe5SLaurent Pinchart /* Recycle all acked descriptors. */ 163687244fe5SLaurent Pinchart rcar_dmac_desc_recycle_acked(chan); 163787244fe5SLaurent Pinchart 163887244fe5SLaurent Pinchart return IRQ_HANDLED; 163987244fe5SLaurent Pinchart } 164087244fe5SLaurent Pinchart 164187244fe5SLaurent Pinchart /* ----------------------------------------------------------------------------- 164287244fe5SLaurent Pinchart * OF xlate and channel filter 164387244fe5SLaurent Pinchart */ 164487244fe5SLaurent Pinchart 164587244fe5SLaurent Pinchart static bool rcar_dmac_chan_filter(struct dma_chan *chan, void *arg) 164687244fe5SLaurent Pinchart { 164787244fe5SLaurent Pinchart struct rcar_dmac *dmac = to_rcar_dmac(chan->device); 164887244fe5SLaurent Pinchart struct of_phandle_args *dma_spec = arg; 164987244fe5SLaurent Pinchart 165087244fe5SLaurent Pinchart /* 165187244fe5SLaurent Pinchart * FIXME: Using a filter on OF platforms is a nonsense. The OF xlate 165287244fe5SLaurent Pinchart * function knows from which device it wants to allocate a channel from, 165387244fe5SLaurent Pinchart * and would be perfectly capable of selecting the channel it wants. 165487244fe5SLaurent Pinchart * Forcing it to call dma_request_channel() and iterate through all 165587244fe5SLaurent Pinchart * channels from all controllers is just pointless. 165687244fe5SLaurent Pinchart */ 165787244fe5SLaurent Pinchart if (chan->device->device_config != rcar_dmac_device_config || 165887244fe5SLaurent Pinchart dma_spec->np != chan->device->dev->of_node) 165987244fe5SLaurent Pinchart return false; 166087244fe5SLaurent Pinchart 166187244fe5SLaurent Pinchart return !test_and_set_bit(dma_spec->args[0], dmac->modules); 166287244fe5SLaurent Pinchart } 166387244fe5SLaurent Pinchart 166487244fe5SLaurent Pinchart static struct dma_chan *rcar_dmac_of_xlate(struct of_phandle_args *dma_spec, 166587244fe5SLaurent Pinchart struct of_dma *ofdma) 166687244fe5SLaurent Pinchart { 166787244fe5SLaurent Pinchart struct rcar_dmac_chan *rchan; 166887244fe5SLaurent Pinchart struct dma_chan *chan; 166987244fe5SLaurent Pinchart dma_cap_mask_t mask; 167087244fe5SLaurent Pinchart 167187244fe5SLaurent Pinchart if (dma_spec->args_count != 1) 167287244fe5SLaurent Pinchart return NULL; 167387244fe5SLaurent Pinchart 167487244fe5SLaurent Pinchart /* Only slave DMA channels can be allocated via DT */ 167587244fe5SLaurent Pinchart dma_cap_zero(mask); 167687244fe5SLaurent Pinchart dma_cap_set(DMA_SLAVE, mask); 167787244fe5SLaurent Pinchart 167887244fe5SLaurent Pinchart chan = dma_request_channel(mask, rcar_dmac_chan_filter, dma_spec); 167987244fe5SLaurent Pinchart if (!chan) 168087244fe5SLaurent Pinchart return NULL; 168187244fe5SLaurent Pinchart 168287244fe5SLaurent Pinchart rchan = to_rcar_dmac_chan(chan); 168387244fe5SLaurent Pinchart rchan->mid_rid = dma_spec->args[0]; 168487244fe5SLaurent Pinchart 168587244fe5SLaurent Pinchart return chan; 168687244fe5SLaurent Pinchart } 168787244fe5SLaurent Pinchart 168887244fe5SLaurent Pinchart /* ----------------------------------------------------------------------------- 168987244fe5SLaurent Pinchart * Power management 169087244fe5SLaurent Pinchart */ 169187244fe5SLaurent Pinchart 169287244fe5SLaurent Pinchart #ifdef CONFIG_PM 169387244fe5SLaurent Pinchart static int rcar_dmac_runtime_suspend(struct device *dev) 169487244fe5SLaurent Pinchart { 169587244fe5SLaurent Pinchart return 0; 169687244fe5SLaurent Pinchart } 169787244fe5SLaurent Pinchart 169887244fe5SLaurent Pinchart static int rcar_dmac_runtime_resume(struct device *dev) 169987244fe5SLaurent Pinchart { 170087244fe5SLaurent Pinchart struct rcar_dmac *dmac = dev_get_drvdata(dev); 170187244fe5SLaurent Pinchart 170287244fe5SLaurent Pinchart return rcar_dmac_init(dmac); 170387244fe5SLaurent Pinchart } 170487244fe5SLaurent Pinchart #endif 170587244fe5SLaurent Pinchart 170687244fe5SLaurent Pinchart static const struct dev_pm_ops rcar_dmac_pm = { 17071131b0a4SGeert Uytterhoeven /* 17081131b0a4SGeert Uytterhoeven * TODO for system sleep/resume: 17091131b0a4SGeert Uytterhoeven * - Wait for the current transfer to complete and stop the device, 17101131b0a4SGeert Uytterhoeven * - Resume transfers, if any. 17111131b0a4SGeert Uytterhoeven */ 171273dcc666SGeert Uytterhoeven SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, 17131131b0a4SGeert Uytterhoeven pm_runtime_force_resume) 171487244fe5SLaurent Pinchart SET_RUNTIME_PM_OPS(rcar_dmac_runtime_suspend, rcar_dmac_runtime_resume, 171587244fe5SLaurent Pinchart NULL) 171687244fe5SLaurent Pinchart }; 171787244fe5SLaurent Pinchart 171887244fe5SLaurent Pinchart /* ----------------------------------------------------------------------------- 171987244fe5SLaurent Pinchart * Probe and remove 172087244fe5SLaurent Pinchart */ 172187244fe5SLaurent Pinchart 172287244fe5SLaurent Pinchart static int rcar_dmac_chan_probe(struct rcar_dmac *dmac, 172387244fe5SLaurent Pinchart struct rcar_dmac_chan *rchan, 172487244fe5SLaurent Pinchart unsigned int index) 172587244fe5SLaurent Pinchart { 172687244fe5SLaurent Pinchart struct platform_device *pdev = to_platform_device(dmac->dev); 172787244fe5SLaurent Pinchart struct dma_chan *chan = &rchan->chan; 172887244fe5SLaurent Pinchart char pdev_irqname[5]; 172987244fe5SLaurent Pinchart char *irqname; 173087244fe5SLaurent Pinchart int ret; 173187244fe5SLaurent Pinchart 173287244fe5SLaurent Pinchart rchan->index = index; 173387244fe5SLaurent Pinchart rchan->iomem = dmac->iomem + RCAR_DMAC_CHAN_OFFSET(index); 173487244fe5SLaurent Pinchart rchan->mid_rid = -EINVAL; 173587244fe5SLaurent Pinchart 173687244fe5SLaurent Pinchart spin_lock_init(&rchan->lock); 173787244fe5SLaurent Pinchart 1738f7638c90SLaurent Pinchart INIT_LIST_HEAD(&rchan->desc.free); 1739f7638c90SLaurent Pinchart INIT_LIST_HEAD(&rchan->desc.pending); 1740f7638c90SLaurent Pinchart INIT_LIST_HEAD(&rchan->desc.active); 1741f7638c90SLaurent Pinchart INIT_LIST_HEAD(&rchan->desc.done); 1742f7638c90SLaurent Pinchart INIT_LIST_HEAD(&rchan->desc.wait); 1743f7638c90SLaurent Pinchart 174487244fe5SLaurent Pinchart /* Request the channel interrupt. */ 174587244fe5SLaurent Pinchart sprintf(pdev_irqname, "ch%u", index); 1746427d5ecdSNiklas Söderlund rchan->irq = platform_get_irq_byname(pdev, pdev_irqname); 1747427d5ecdSNiklas Söderlund if (rchan->irq < 0) { 174887244fe5SLaurent Pinchart dev_err(dmac->dev, "no IRQ specified for channel %u\n", index); 174987244fe5SLaurent Pinchart return -ENODEV; 175087244fe5SLaurent Pinchart } 175187244fe5SLaurent Pinchart 175287244fe5SLaurent Pinchart irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:%u", 175387244fe5SLaurent Pinchart dev_name(dmac->dev), index); 175487244fe5SLaurent Pinchart if (!irqname) 175587244fe5SLaurent Pinchart return -ENOMEM; 175687244fe5SLaurent Pinchart 17575e857047SKuninori Morimoto /* 17585e857047SKuninori Morimoto * Initialize the DMA engine channel and add it to the DMA engine 17595e857047SKuninori Morimoto * channels list. 17605e857047SKuninori Morimoto */ 17615e857047SKuninori Morimoto chan->device = &dmac->engine; 17625e857047SKuninori Morimoto dma_cookie_init(chan); 17635e857047SKuninori Morimoto 17645e857047SKuninori Morimoto list_add_tail(&chan->device_node, &dmac->engine.channels); 17655e857047SKuninori Morimoto 1766427d5ecdSNiklas Söderlund ret = devm_request_threaded_irq(dmac->dev, rchan->irq, 1767427d5ecdSNiklas Söderlund rcar_dmac_isr_channel, 176887244fe5SLaurent Pinchart rcar_dmac_isr_channel_thread, 0, 176987244fe5SLaurent Pinchart irqname, rchan); 177087244fe5SLaurent Pinchart if (ret) { 1771427d5ecdSNiklas Söderlund dev_err(dmac->dev, "failed to request IRQ %u (%d)\n", 1772427d5ecdSNiklas Söderlund rchan->irq, ret); 177387244fe5SLaurent Pinchart return ret; 177487244fe5SLaurent Pinchart } 177587244fe5SLaurent Pinchart 177687244fe5SLaurent Pinchart return 0; 177787244fe5SLaurent Pinchart } 177887244fe5SLaurent Pinchart 177987244fe5SLaurent Pinchart static int rcar_dmac_parse_of(struct device *dev, struct rcar_dmac *dmac) 178087244fe5SLaurent Pinchart { 178187244fe5SLaurent Pinchart struct device_node *np = dev->of_node; 178287244fe5SLaurent Pinchart int ret; 178387244fe5SLaurent Pinchart 178487244fe5SLaurent Pinchart ret = of_property_read_u32(np, "dma-channels", &dmac->n_channels); 178587244fe5SLaurent Pinchart if (ret < 0) { 178687244fe5SLaurent Pinchart dev_err(dev, "unable to read dma-channels property\n"); 178787244fe5SLaurent Pinchart return ret; 178887244fe5SLaurent Pinchart } 178987244fe5SLaurent Pinchart 179087244fe5SLaurent Pinchart if (dmac->n_channels <= 0 || dmac->n_channels >= 100) { 179187244fe5SLaurent Pinchart dev_err(dev, "invalid number of channels %u\n", 179287244fe5SLaurent Pinchart dmac->n_channels); 179387244fe5SLaurent Pinchart return -EINVAL; 179487244fe5SLaurent Pinchart } 179587244fe5SLaurent Pinchart 179687244fe5SLaurent Pinchart return 0; 179787244fe5SLaurent Pinchart } 179887244fe5SLaurent Pinchart 179987244fe5SLaurent Pinchart static int rcar_dmac_probe(struct platform_device *pdev) 180087244fe5SLaurent Pinchart { 180187244fe5SLaurent Pinchart const enum dma_slave_buswidth widths = DMA_SLAVE_BUSWIDTH_1_BYTE | 180287244fe5SLaurent Pinchart DMA_SLAVE_BUSWIDTH_2_BYTES | DMA_SLAVE_BUSWIDTH_4_BYTES | 180387244fe5SLaurent Pinchart DMA_SLAVE_BUSWIDTH_8_BYTES | DMA_SLAVE_BUSWIDTH_16_BYTES | 180487244fe5SLaurent Pinchart DMA_SLAVE_BUSWIDTH_32_BYTES | DMA_SLAVE_BUSWIDTH_64_BYTES; 1805be6893e1SLaurent Pinchart unsigned int channels_offset = 0; 180687244fe5SLaurent Pinchart struct dma_device *engine; 180787244fe5SLaurent Pinchart struct rcar_dmac *dmac; 180887244fe5SLaurent Pinchart struct resource *mem; 180987244fe5SLaurent Pinchart unsigned int i; 181087244fe5SLaurent Pinchart int ret; 181187244fe5SLaurent Pinchart 181287244fe5SLaurent Pinchart dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL); 181387244fe5SLaurent Pinchart if (!dmac) 181487244fe5SLaurent Pinchart return -ENOMEM; 181587244fe5SLaurent Pinchart 181687244fe5SLaurent Pinchart dmac->dev = &pdev->dev; 181787244fe5SLaurent Pinchart platform_set_drvdata(pdev, dmac); 181897d49c59SWolfram Sang dmac->dev->dma_parms = &dmac->parms; 181997d49c59SWolfram Sang dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK); 1820dc312349SGeert Uytterhoeven dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40)); 182187244fe5SLaurent Pinchart 182287244fe5SLaurent Pinchart ret = rcar_dmac_parse_of(&pdev->dev, dmac); 182387244fe5SLaurent Pinchart if (ret < 0) 182487244fe5SLaurent Pinchart return ret; 182587244fe5SLaurent Pinchart 1826be6893e1SLaurent Pinchart /* 1827be6893e1SLaurent Pinchart * A still unconfirmed hardware bug prevents the IPMMU microTLB 0 to be 1828be6893e1SLaurent Pinchart * flushed correctly, resulting in memory corruption. DMAC 0 channel 0 1829be6893e1SLaurent Pinchart * is connected to microTLB 0 on currently supported platforms, so we 1830be6893e1SLaurent Pinchart * can't use it with the IPMMU. As the IOMMU API operates at the device 1831be6893e1SLaurent Pinchart * level we can't disable it selectively, so ignore channel 0 for now if 1832be6893e1SLaurent Pinchart * the device is part of an IOMMU group. 1833be6893e1SLaurent Pinchart */ 1834f884f6eeSJoerg Roedel if (device_iommu_mapped(&pdev->dev)) { 1835be6893e1SLaurent Pinchart dmac->n_channels--; 1836be6893e1SLaurent Pinchart channels_offset = 1; 1837be6893e1SLaurent Pinchart } 1838be6893e1SLaurent Pinchart 183987244fe5SLaurent Pinchart dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels, 184087244fe5SLaurent Pinchart sizeof(*dmac->channels), GFP_KERNEL); 184187244fe5SLaurent Pinchart if (!dmac->channels) 184287244fe5SLaurent Pinchart return -ENOMEM; 184387244fe5SLaurent Pinchart 184487244fe5SLaurent Pinchart /* Request resources. */ 184587244fe5SLaurent Pinchart mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 184687244fe5SLaurent Pinchart dmac->iomem = devm_ioremap_resource(&pdev->dev, mem); 184787244fe5SLaurent Pinchart if (IS_ERR(dmac->iomem)) 184887244fe5SLaurent Pinchart return PTR_ERR(dmac->iomem); 184987244fe5SLaurent Pinchart 185087244fe5SLaurent Pinchart /* Enable runtime PM and initialize the device. */ 185187244fe5SLaurent Pinchart pm_runtime_enable(&pdev->dev); 185287244fe5SLaurent Pinchart ret = pm_runtime_get_sync(&pdev->dev); 185387244fe5SLaurent Pinchart if (ret < 0) { 185487244fe5SLaurent Pinchart dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret); 185587244fe5SLaurent Pinchart return ret; 185687244fe5SLaurent Pinchart } 185787244fe5SLaurent Pinchart 185887244fe5SLaurent Pinchart ret = rcar_dmac_init(dmac); 185987244fe5SLaurent Pinchart pm_runtime_put(&pdev->dev); 186087244fe5SLaurent Pinchart 186187244fe5SLaurent Pinchart if (ret) { 186287244fe5SLaurent Pinchart dev_err(&pdev->dev, "failed to reset device\n"); 186387244fe5SLaurent Pinchart goto error; 186487244fe5SLaurent Pinchart } 186587244fe5SLaurent Pinchart 18665e857047SKuninori Morimoto /* Initialize engine */ 186787244fe5SLaurent Pinchart engine = &dmac->engine; 18685e857047SKuninori Morimoto 186987244fe5SLaurent Pinchart dma_cap_set(DMA_MEMCPY, engine->cap_mask); 187087244fe5SLaurent Pinchart dma_cap_set(DMA_SLAVE, engine->cap_mask); 187187244fe5SLaurent Pinchart 187287244fe5SLaurent Pinchart engine->dev = &pdev->dev; 187387244fe5SLaurent Pinchart engine->copy_align = ilog2(RCAR_DMAC_MEMCPY_XFER_SIZE); 187487244fe5SLaurent Pinchart 187587244fe5SLaurent Pinchart engine->src_addr_widths = widths; 187687244fe5SLaurent Pinchart engine->dst_addr_widths = widths; 187787244fe5SLaurent Pinchart engine->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM); 187887244fe5SLaurent Pinchart engine->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; 187987244fe5SLaurent Pinchart 188087244fe5SLaurent Pinchart engine->device_alloc_chan_resources = rcar_dmac_alloc_chan_resources; 188187244fe5SLaurent Pinchart engine->device_free_chan_resources = rcar_dmac_free_chan_resources; 188287244fe5SLaurent Pinchart engine->device_prep_dma_memcpy = rcar_dmac_prep_dma_memcpy; 188387244fe5SLaurent Pinchart engine->device_prep_slave_sg = rcar_dmac_prep_slave_sg; 188487244fe5SLaurent Pinchart engine->device_prep_dma_cyclic = rcar_dmac_prep_dma_cyclic; 188587244fe5SLaurent Pinchart engine->device_config = rcar_dmac_device_config; 18868115ce74SYoshihiro Shimoda engine->device_pause = rcar_dmac_chan_pause; 188787244fe5SLaurent Pinchart engine->device_terminate_all = rcar_dmac_chan_terminate_all; 188887244fe5SLaurent Pinchart engine->device_tx_status = rcar_dmac_tx_status; 188987244fe5SLaurent Pinchart engine->device_issue_pending = rcar_dmac_issue_pending; 189030c45005SNiklas Söderlund engine->device_synchronize = rcar_dmac_device_synchronize; 189187244fe5SLaurent Pinchart 18925e857047SKuninori Morimoto INIT_LIST_HEAD(&engine->channels); 18935e857047SKuninori Morimoto 18945e857047SKuninori Morimoto for (i = 0; i < dmac->n_channels; ++i) { 18955e857047SKuninori Morimoto ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i], 18965e857047SKuninori Morimoto i + channels_offset); 18975e857047SKuninori Morimoto if (ret < 0) 18985e857047SKuninori Morimoto goto error; 18995e857047SKuninori Morimoto } 19005e857047SKuninori Morimoto 19015e857047SKuninori Morimoto /* Register the DMAC as a DMA provider for DT. */ 19025e857047SKuninori Morimoto ret = of_dma_controller_register(pdev->dev.of_node, rcar_dmac_of_xlate, 19035e857047SKuninori Morimoto NULL); 19045e857047SKuninori Morimoto if (ret < 0) 19055e857047SKuninori Morimoto goto error; 19065e857047SKuninori Morimoto 19075e857047SKuninori Morimoto /* 19085e857047SKuninori Morimoto * Register the DMA engine device. 19095e857047SKuninori Morimoto * 19105e857047SKuninori Morimoto * Default transfer size of 32 bytes requires 32-byte alignment. 19115e857047SKuninori Morimoto */ 191287244fe5SLaurent Pinchart ret = dma_async_device_register(engine); 191387244fe5SLaurent Pinchart if (ret < 0) 191487244fe5SLaurent Pinchart goto error; 191587244fe5SLaurent Pinchart 191687244fe5SLaurent Pinchart return 0; 191787244fe5SLaurent Pinchart 191887244fe5SLaurent Pinchart error: 191987244fe5SLaurent Pinchart of_dma_controller_free(pdev->dev.of_node); 192087244fe5SLaurent Pinchart pm_runtime_disable(&pdev->dev); 192187244fe5SLaurent Pinchart return ret; 192287244fe5SLaurent Pinchart } 192387244fe5SLaurent Pinchart 192487244fe5SLaurent Pinchart static int rcar_dmac_remove(struct platform_device *pdev) 192587244fe5SLaurent Pinchart { 192687244fe5SLaurent Pinchart struct rcar_dmac *dmac = platform_get_drvdata(pdev); 192787244fe5SLaurent Pinchart 192887244fe5SLaurent Pinchart of_dma_controller_free(pdev->dev.of_node); 192987244fe5SLaurent Pinchart dma_async_device_unregister(&dmac->engine); 193087244fe5SLaurent Pinchart 193187244fe5SLaurent Pinchart pm_runtime_disable(&pdev->dev); 193287244fe5SLaurent Pinchart 193387244fe5SLaurent Pinchart return 0; 193487244fe5SLaurent Pinchart } 193587244fe5SLaurent Pinchart 193687244fe5SLaurent Pinchart static void rcar_dmac_shutdown(struct platform_device *pdev) 193787244fe5SLaurent Pinchart { 193887244fe5SLaurent Pinchart struct rcar_dmac *dmac = platform_get_drvdata(pdev); 193987244fe5SLaurent Pinchart 19409203dbecSKuninori Morimoto rcar_dmac_stop_all_chan(dmac); 194187244fe5SLaurent Pinchart } 194287244fe5SLaurent Pinchart 194387244fe5SLaurent Pinchart static const struct of_device_id rcar_dmac_of_ids[] = { 194487244fe5SLaurent Pinchart { .compatible = "renesas,rcar-dmac", }, 194587244fe5SLaurent Pinchart { /* Sentinel */ } 194687244fe5SLaurent Pinchart }; 194787244fe5SLaurent Pinchart MODULE_DEVICE_TABLE(of, rcar_dmac_of_ids); 194887244fe5SLaurent Pinchart 194987244fe5SLaurent Pinchart static struct platform_driver rcar_dmac_driver = { 195087244fe5SLaurent Pinchart .driver = { 195187244fe5SLaurent Pinchart .pm = &rcar_dmac_pm, 195287244fe5SLaurent Pinchart .name = "rcar-dmac", 195387244fe5SLaurent Pinchart .of_match_table = rcar_dmac_of_ids, 195487244fe5SLaurent Pinchart }, 195587244fe5SLaurent Pinchart .probe = rcar_dmac_probe, 195687244fe5SLaurent Pinchart .remove = rcar_dmac_remove, 195787244fe5SLaurent Pinchart .shutdown = rcar_dmac_shutdown, 195887244fe5SLaurent Pinchart }; 195987244fe5SLaurent Pinchart 196087244fe5SLaurent Pinchart module_platform_driver(rcar_dmac_driver); 196187244fe5SLaurent Pinchart 196287244fe5SLaurent Pinchart MODULE_DESCRIPTION("R-Car Gen2 DMA Controller Driver"); 196387244fe5SLaurent Pinchart MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>"); 196487244fe5SLaurent Pinchart MODULE_LICENSE("GPL v2"); 1965