187244fe5SLaurent Pinchart /* 287244fe5SLaurent Pinchart * Renesas R-Car Gen2 DMA Controller Driver 387244fe5SLaurent Pinchart * 487244fe5SLaurent Pinchart * Copyright (C) 2014 Renesas Electronics Inc. 587244fe5SLaurent Pinchart * 687244fe5SLaurent Pinchart * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com> 787244fe5SLaurent Pinchart * 887244fe5SLaurent Pinchart * This is free software; you can redistribute it and/or modify 987244fe5SLaurent Pinchart * it under the terms of version 2 of the GNU General Public License as 1087244fe5SLaurent Pinchart * published by the Free Software Foundation. 1187244fe5SLaurent Pinchart */ 1287244fe5SLaurent Pinchart 13ccadee9bSLaurent Pinchart #include <linux/dma-mapping.h> 1487244fe5SLaurent Pinchart #include <linux/dmaengine.h> 1587244fe5SLaurent Pinchart #include <linux/interrupt.h> 1687244fe5SLaurent Pinchart #include <linux/list.h> 1787244fe5SLaurent Pinchart #include <linux/module.h> 1887244fe5SLaurent Pinchart #include <linux/mutex.h> 1987244fe5SLaurent Pinchart #include <linux/of.h> 2087244fe5SLaurent Pinchart #include <linux/of_dma.h> 2187244fe5SLaurent Pinchart #include <linux/of_platform.h> 2287244fe5SLaurent Pinchart #include <linux/platform_device.h> 2387244fe5SLaurent Pinchart #include <linux/pm_runtime.h> 2487244fe5SLaurent Pinchart #include <linux/slab.h> 2587244fe5SLaurent Pinchart #include <linux/spinlock.h> 2687244fe5SLaurent Pinchart 2787244fe5SLaurent Pinchart #include "../dmaengine.h" 2887244fe5SLaurent Pinchart 2987244fe5SLaurent Pinchart /* 3087244fe5SLaurent Pinchart * struct rcar_dmac_xfer_chunk - Descriptor for a hardware transfer 3187244fe5SLaurent Pinchart * @node: entry in the parent's chunks list 3287244fe5SLaurent Pinchart * @src_addr: device source address 3387244fe5SLaurent Pinchart * @dst_addr: device destination address 3487244fe5SLaurent Pinchart * @size: transfer size in bytes 3587244fe5SLaurent Pinchart */ 3687244fe5SLaurent Pinchart struct rcar_dmac_xfer_chunk { 3787244fe5SLaurent Pinchart struct list_head node; 3887244fe5SLaurent Pinchart 3987244fe5SLaurent Pinchart dma_addr_t src_addr; 4087244fe5SLaurent Pinchart dma_addr_t dst_addr; 4187244fe5SLaurent Pinchart u32 size; 4287244fe5SLaurent Pinchart }; 4387244fe5SLaurent Pinchart 4487244fe5SLaurent Pinchart /* 45ccadee9bSLaurent Pinchart * struct rcar_dmac_hw_desc - Hardware descriptor for a transfer chunk 46ccadee9bSLaurent Pinchart * @sar: value of the SAR register (source address) 47ccadee9bSLaurent Pinchart * @dar: value of the DAR register (destination address) 48ccadee9bSLaurent Pinchart * @tcr: value of the TCR register (transfer count) 49ccadee9bSLaurent Pinchart */ 50ccadee9bSLaurent Pinchart struct rcar_dmac_hw_desc { 51ccadee9bSLaurent Pinchart u32 sar; 52ccadee9bSLaurent Pinchart u32 dar; 53ccadee9bSLaurent Pinchart u32 tcr; 54ccadee9bSLaurent Pinchart u32 reserved; 55ccadee9bSLaurent Pinchart } __attribute__((__packed__)); 56ccadee9bSLaurent Pinchart 57ccadee9bSLaurent Pinchart /* 5887244fe5SLaurent Pinchart * struct rcar_dmac_desc - R-Car Gen2 DMA Transfer Descriptor 5987244fe5SLaurent Pinchart * @async_tx: base DMA asynchronous transaction descriptor 6087244fe5SLaurent Pinchart * @direction: direction of the DMA transfer 6187244fe5SLaurent Pinchart * @xfer_shift: log2 of the transfer size 6287244fe5SLaurent Pinchart * @chcr: value of the channel configuration register for this transfer 6387244fe5SLaurent Pinchart * @node: entry in the channel's descriptors lists 6487244fe5SLaurent Pinchart * @chunks: list of transfer chunks for this transfer 6587244fe5SLaurent Pinchart * @running: the transfer chunk being currently processed 66ccadee9bSLaurent Pinchart * @nchunks: number of transfer chunks for this transfer 671ed1315fSLaurent Pinchart * @hwdescs.use: whether the transfer descriptor uses hardware descriptors 68ccadee9bSLaurent Pinchart * @hwdescs.mem: hardware descriptors memory for the transfer 69ccadee9bSLaurent Pinchart * @hwdescs.dma: device address of the hardware descriptors memory 70ccadee9bSLaurent Pinchart * @hwdescs.size: size of the hardware descriptors in bytes 7187244fe5SLaurent Pinchart * @size: transfer size in bytes 7287244fe5SLaurent Pinchart * @cyclic: when set indicates that the DMA transfer is cyclic 7387244fe5SLaurent Pinchart */ 7487244fe5SLaurent Pinchart struct rcar_dmac_desc { 7587244fe5SLaurent Pinchart struct dma_async_tx_descriptor async_tx; 7687244fe5SLaurent Pinchart enum dma_transfer_direction direction; 7787244fe5SLaurent Pinchart unsigned int xfer_shift; 7887244fe5SLaurent Pinchart u32 chcr; 7987244fe5SLaurent Pinchart 8087244fe5SLaurent Pinchart struct list_head node; 8187244fe5SLaurent Pinchart struct list_head chunks; 8287244fe5SLaurent Pinchart struct rcar_dmac_xfer_chunk *running; 83ccadee9bSLaurent Pinchart unsigned int nchunks; 84ccadee9bSLaurent Pinchart 85ccadee9bSLaurent Pinchart struct { 861ed1315fSLaurent Pinchart bool use; 87ccadee9bSLaurent Pinchart struct rcar_dmac_hw_desc *mem; 88ccadee9bSLaurent Pinchart dma_addr_t dma; 89ccadee9bSLaurent Pinchart size_t size; 90ccadee9bSLaurent Pinchart } hwdescs; 9187244fe5SLaurent Pinchart 9287244fe5SLaurent Pinchart unsigned int size; 9387244fe5SLaurent Pinchart bool cyclic; 9487244fe5SLaurent Pinchart }; 9587244fe5SLaurent Pinchart 9687244fe5SLaurent Pinchart #define to_rcar_dmac_desc(d) container_of(d, struct rcar_dmac_desc, async_tx) 9787244fe5SLaurent Pinchart 9887244fe5SLaurent Pinchart /* 9987244fe5SLaurent Pinchart * struct rcar_dmac_desc_page - One page worth of descriptors 10087244fe5SLaurent Pinchart * @node: entry in the channel's pages list 10187244fe5SLaurent Pinchart * @descs: array of DMA descriptors 10287244fe5SLaurent Pinchart * @chunks: array of transfer chunk descriptors 10387244fe5SLaurent Pinchart */ 10487244fe5SLaurent Pinchart struct rcar_dmac_desc_page { 10587244fe5SLaurent Pinchart struct list_head node; 10687244fe5SLaurent Pinchart 10787244fe5SLaurent Pinchart union { 10887244fe5SLaurent Pinchart struct rcar_dmac_desc descs[0]; 10987244fe5SLaurent Pinchart struct rcar_dmac_xfer_chunk chunks[0]; 11087244fe5SLaurent Pinchart }; 11187244fe5SLaurent Pinchart }; 11287244fe5SLaurent Pinchart 11387244fe5SLaurent Pinchart #define RCAR_DMAC_DESCS_PER_PAGE \ 11487244fe5SLaurent Pinchart ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, descs)) / \ 11587244fe5SLaurent Pinchart sizeof(struct rcar_dmac_desc)) 11687244fe5SLaurent Pinchart #define RCAR_DMAC_XFER_CHUNKS_PER_PAGE \ 11787244fe5SLaurent Pinchart ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, chunks)) / \ 11887244fe5SLaurent Pinchart sizeof(struct rcar_dmac_xfer_chunk)) 11987244fe5SLaurent Pinchart 12087244fe5SLaurent Pinchart /* 12187244fe5SLaurent Pinchart * struct rcar_dmac_chan - R-Car Gen2 DMA Controller Channel 12287244fe5SLaurent Pinchart * @chan: base DMA channel object 12387244fe5SLaurent Pinchart * @iomem: channel I/O memory base 12487244fe5SLaurent Pinchart * @index: index of this channel in the controller 12587244fe5SLaurent Pinchart * @src_xfer_size: size (in bytes) of hardware transfers on the source side 12687244fe5SLaurent Pinchart * @dst_xfer_size: size (in bytes) of hardware transfers on the destination side 12787244fe5SLaurent Pinchart * @src_slave_addr: slave source memory address 12887244fe5SLaurent Pinchart * @dst_slave_addr: slave destination memory address 12987244fe5SLaurent Pinchart * @mid_rid: hardware MID/RID for the DMA client using this channel 13087244fe5SLaurent Pinchart * @lock: protects the channel CHCR register and the desc members 13187244fe5SLaurent Pinchart * @desc.free: list of free descriptors 13287244fe5SLaurent Pinchart * @desc.pending: list of pending descriptors (submitted with tx_submit) 13387244fe5SLaurent Pinchart * @desc.active: list of active descriptors (activated with issue_pending) 13487244fe5SLaurent Pinchart * @desc.done: list of completed descriptors 13587244fe5SLaurent Pinchart * @desc.wait: list of descriptors waiting for an ack 13687244fe5SLaurent Pinchart * @desc.running: the descriptor being processed (a member of the active list) 13787244fe5SLaurent Pinchart * @desc.chunks_free: list of free transfer chunk descriptors 13887244fe5SLaurent Pinchart * @desc.pages: list of pages used by allocated descriptors 13987244fe5SLaurent Pinchart */ 14087244fe5SLaurent Pinchart struct rcar_dmac_chan { 14187244fe5SLaurent Pinchart struct dma_chan chan; 14287244fe5SLaurent Pinchart void __iomem *iomem; 14387244fe5SLaurent Pinchart unsigned int index; 14487244fe5SLaurent Pinchart 14587244fe5SLaurent Pinchart unsigned int src_xfer_size; 14687244fe5SLaurent Pinchart unsigned int dst_xfer_size; 14787244fe5SLaurent Pinchart dma_addr_t src_slave_addr; 14887244fe5SLaurent Pinchart dma_addr_t dst_slave_addr; 14987244fe5SLaurent Pinchart int mid_rid; 15087244fe5SLaurent Pinchart 15187244fe5SLaurent Pinchart spinlock_t lock; 15287244fe5SLaurent Pinchart 15387244fe5SLaurent Pinchart struct { 15487244fe5SLaurent Pinchart struct list_head free; 15587244fe5SLaurent Pinchart struct list_head pending; 15687244fe5SLaurent Pinchart struct list_head active; 15787244fe5SLaurent Pinchart struct list_head done; 15887244fe5SLaurent Pinchart struct list_head wait; 15987244fe5SLaurent Pinchart struct rcar_dmac_desc *running; 16087244fe5SLaurent Pinchart 16187244fe5SLaurent Pinchart struct list_head chunks_free; 16287244fe5SLaurent Pinchart 16387244fe5SLaurent Pinchart struct list_head pages; 16487244fe5SLaurent Pinchart } desc; 16587244fe5SLaurent Pinchart }; 16687244fe5SLaurent Pinchart 16787244fe5SLaurent Pinchart #define to_rcar_dmac_chan(c) container_of(c, struct rcar_dmac_chan, chan) 16887244fe5SLaurent Pinchart 16987244fe5SLaurent Pinchart /* 17087244fe5SLaurent Pinchart * struct rcar_dmac - R-Car Gen2 DMA Controller 17187244fe5SLaurent Pinchart * @engine: base DMA engine object 17287244fe5SLaurent Pinchart * @dev: the hardware device 17387244fe5SLaurent Pinchart * @iomem: remapped I/O memory base 17487244fe5SLaurent Pinchart * @n_channels: number of available channels 17587244fe5SLaurent Pinchart * @channels: array of DMAC channels 17687244fe5SLaurent Pinchart * @modules: bitmask of client modules in use 17787244fe5SLaurent Pinchart */ 17887244fe5SLaurent Pinchart struct rcar_dmac { 17987244fe5SLaurent Pinchart struct dma_device engine; 18087244fe5SLaurent Pinchart struct device *dev; 18187244fe5SLaurent Pinchart void __iomem *iomem; 18287244fe5SLaurent Pinchart 18387244fe5SLaurent Pinchart unsigned int n_channels; 18487244fe5SLaurent Pinchart struct rcar_dmac_chan *channels; 18587244fe5SLaurent Pinchart 18687244fe5SLaurent Pinchart unsigned long modules[256 / BITS_PER_LONG]; 18787244fe5SLaurent Pinchart }; 18887244fe5SLaurent Pinchart 18987244fe5SLaurent Pinchart #define to_rcar_dmac(d) container_of(d, struct rcar_dmac, engine) 19087244fe5SLaurent Pinchart 19187244fe5SLaurent Pinchart /* ----------------------------------------------------------------------------- 19287244fe5SLaurent Pinchart * Registers 19387244fe5SLaurent Pinchart */ 19487244fe5SLaurent Pinchart 19587244fe5SLaurent Pinchart #define RCAR_DMAC_CHAN_OFFSET(i) (0x8000 + 0x80 * (i)) 19687244fe5SLaurent Pinchart 19787244fe5SLaurent Pinchart #define RCAR_DMAISTA 0x0020 19887244fe5SLaurent Pinchart #define RCAR_DMASEC 0x0030 19987244fe5SLaurent Pinchart #define RCAR_DMAOR 0x0060 20087244fe5SLaurent Pinchart #define RCAR_DMAOR_PRI_FIXED (0 << 8) 20187244fe5SLaurent Pinchart #define RCAR_DMAOR_PRI_ROUND_ROBIN (3 << 8) 20287244fe5SLaurent Pinchart #define RCAR_DMAOR_AE (1 << 2) 20387244fe5SLaurent Pinchart #define RCAR_DMAOR_DME (1 << 0) 20487244fe5SLaurent Pinchart #define RCAR_DMACHCLR 0x0080 20587244fe5SLaurent Pinchart #define RCAR_DMADPSEC 0x00a0 20687244fe5SLaurent Pinchart 20787244fe5SLaurent Pinchart #define RCAR_DMASAR 0x0000 20887244fe5SLaurent Pinchart #define RCAR_DMADAR 0x0004 20987244fe5SLaurent Pinchart #define RCAR_DMATCR 0x0008 21087244fe5SLaurent Pinchart #define RCAR_DMATCR_MASK 0x00ffffff 21187244fe5SLaurent Pinchart #define RCAR_DMATSR 0x0028 21287244fe5SLaurent Pinchart #define RCAR_DMACHCR 0x000c 21387244fe5SLaurent Pinchart #define RCAR_DMACHCR_CAE (1 << 31) 21487244fe5SLaurent Pinchart #define RCAR_DMACHCR_CAIE (1 << 30) 21587244fe5SLaurent Pinchart #define RCAR_DMACHCR_DPM_DISABLED (0 << 28) 21687244fe5SLaurent Pinchart #define RCAR_DMACHCR_DPM_ENABLED (1 << 28) 21787244fe5SLaurent Pinchart #define RCAR_DMACHCR_DPM_REPEAT (2 << 28) 21887244fe5SLaurent Pinchart #define RCAR_DMACHCR_DPM_INFINITE (3 << 28) 21987244fe5SLaurent Pinchart #define RCAR_DMACHCR_RPT_SAR (1 << 27) 22087244fe5SLaurent Pinchart #define RCAR_DMACHCR_RPT_DAR (1 << 26) 22187244fe5SLaurent Pinchart #define RCAR_DMACHCR_RPT_TCR (1 << 25) 22287244fe5SLaurent Pinchart #define RCAR_DMACHCR_DPB (1 << 22) 22387244fe5SLaurent Pinchart #define RCAR_DMACHCR_DSE (1 << 19) 22487244fe5SLaurent Pinchart #define RCAR_DMACHCR_DSIE (1 << 18) 22587244fe5SLaurent Pinchart #define RCAR_DMACHCR_TS_1B ((0 << 20) | (0 << 3)) 22687244fe5SLaurent Pinchart #define RCAR_DMACHCR_TS_2B ((0 << 20) | (1 << 3)) 22787244fe5SLaurent Pinchart #define RCAR_DMACHCR_TS_4B ((0 << 20) | (2 << 3)) 22887244fe5SLaurent Pinchart #define RCAR_DMACHCR_TS_16B ((0 << 20) | (3 << 3)) 22987244fe5SLaurent Pinchart #define RCAR_DMACHCR_TS_32B ((1 << 20) | (0 << 3)) 23087244fe5SLaurent Pinchart #define RCAR_DMACHCR_TS_64B ((1 << 20) | (1 << 3)) 23187244fe5SLaurent Pinchart #define RCAR_DMACHCR_TS_8B ((1 << 20) | (3 << 3)) 23287244fe5SLaurent Pinchart #define RCAR_DMACHCR_DM_FIXED (0 << 14) 23387244fe5SLaurent Pinchart #define RCAR_DMACHCR_DM_INC (1 << 14) 23487244fe5SLaurent Pinchart #define RCAR_DMACHCR_DM_DEC (2 << 14) 23587244fe5SLaurent Pinchart #define RCAR_DMACHCR_SM_FIXED (0 << 12) 23687244fe5SLaurent Pinchart #define RCAR_DMACHCR_SM_INC (1 << 12) 23787244fe5SLaurent Pinchart #define RCAR_DMACHCR_SM_DEC (2 << 12) 23887244fe5SLaurent Pinchart #define RCAR_DMACHCR_RS_AUTO (4 << 8) 23987244fe5SLaurent Pinchart #define RCAR_DMACHCR_RS_DMARS (8 << 8) 24087244fe5SLaurent Pinchart #define RCAR_DMACHCR_IE (1 << 2) 24187244fe5SLaurent Pinchart #define RCAR_DMACHCR_TE (1 << 1) 24287244fe5SLaurent Pinchart #define RCAR_DMACHCR_DE (1 << 0) 24387244fe5SLaurent Pinchart #define RCAR_DMATCRB 0x0018 24487244fe5SLaurent Pinchart #define RCAR_DMATSRB 0x0038 24587244fe5SLaurent Pinchart #define RCAR_DMACHCRB 0x001c 24687244fe5SLaurent Pinchart #define RCAR_DMACHCRB_DCNT(n) ((n) << 24) 247ccadee9bSLaurent Pinchart #define RCAR_DMACHCRB_DPTR_MASK (0xff << 16) 248ccadee9bSLaurent Pinchart #define RCAR_DMACHCRB_DPTR_SHIFT 16 24987244fe5SLaurent Pinchart #define RCAR_DMACHCRB_DRST (1 << 15) 25087244fe5SLaurent Pinchart #define RCAR_DMACHCRB_DTS (1 << 8) 25187244fe5SLaurent Pinchart #define RCAR_DMACHCRB_SLM_NORMAL (0 << 4) 25287244fe5SLaurent Pinchart #define RCAR_DMACHCRB_SLM_CLK(n) ((8 | (n)) << 4) 25387244fe5SLaurent Pinchart #define RCAR_DMACHCRB_PRI(n) ((n) << 0) 25487244fe5SLaurent Pinchart #define RCAR_DMARS 0x0040 25587244fe5SLaurent Pinchart #define RCAR_DMABUFCR 0x0048 25687244fe5SLaurent Pinchart #define RCAR_DMABUFCR_MBU(n) ((n) << 16) 25787244fe5SLaurent Pinchart #define RCAR_DMABUFCR_ULB(n) ((n) << 0) 25887244fe5SLaurent Pinchart #define RCAR_DMADPBASE 0x0050 25987244fe5SLaurent Pinchart #define RCAR_DMADPBASE_MASK 0xfffffff0 26087244fe5SLaurent Pinchart #define RCAR_DMADPBASE_SEL (1 << 0) 26187244fe5SLaurent Pinchart #define RCAR_DMADPCR 0x0054 26287244fe5SLaurent Pinchart #define RCAR_DMADPCR_DIPT(n) ((n) << 24) 26387244fe5SLaurent Pinchart #define RCAR_DMAFIXSAR 0x0010 26487244fe5SLaurent Pinchart #define RCAR_DMAFIXDAR 0x0014 26587244fe5SLaurent Pinchart #define RCAR_DMAFIXDPBASE 0x0060 26687244fe5SLaurent Pinchart 26787244fe5SLaurent Pinchart /* Hardcode the MEMCPY transfer size to 4 bytes. */ 26887244fe5SLaurent Pinchart #define RCAR_DMAC_MEMCPY_XFER_SIZE 4 26987244fe5SLaurent Pinchart 27087244fe5SLaurent Pinchart /* ----------------------------------------------------------------------------- 27187244fe5SLaurent Pinchart * Device access 27287244fe5SLaurent Pinchart */ 27387244fe5SLaurent Pinchart 27487244fe5SLaurent Pinchart static void rcar_dmac_write(struct rcar_dmac *dmac, u32 reg, u32 data) 27587244fe5SLaurent Pinchart { 27687244fe5SLaurent Pinchart if (reg == RCAR_DMAOR) 27787244fe5SLaurent Pinchart writew(data, dmac->iomem + reg); 27887244fe5SLaurent Pinchart else 27987244fe5SLaurent Pinchart writel(data, dmac->iomem + reg); 28087244fe5SLaurent Pinchart } 28187244fe5SLaurent Pinchart 28287244fe5SLaurent Pinchart static u32 rcar_dmac_read(struct rcar_dmac *dmac, u32 reg) 28387244fe5SLaurent Pinchart { 28487244fe5SLaurent Pinchart if (reg == RCAR_DMAOR) 28587244fe5SLaurent Pinchart return readw(dmac->iomem + reg); 28687244fe5SLaurent Pinchart else 28787244fe5SLaurent Pinchart return readl(dmac->iomem + reg); 28887244fe5SLaurent Pinchart } 28987244fe5SLaurent Pinchart 29087244fe5SLaurent Pinchart static u32 rcar_dmac_chan_read(struct rcar_dmac_chan *chan, u32 reg) 29187244fe5SLaurent Pinchart { 29287244fe5SLaurent Pinchart if (reg == RCAR_DMARS) 29387244fe5SLaurent Pinchart return readw(chan->iomem + reg); 29487244fe5SLaurent Pinchart else 29587244fe5SLaurent Pinchart return readl(chan->iomem + reg); 29687244fe5SLaurent Pinchart } 29787244fe5SLaurent Pinchart 29887244fe5SLaurent Pinchart static void rcar_dmac_chan_write(struct rcar_dmac_chan *chan, u32 reg, u32 data) 29987244fe5SLaurent Pinchart { 30087244fe5SLaurent Pinchart if (reg == RCAR_DMARS) 30187244fe5SLaurent Pinchart writew(data, chan->iomem + reg); 30287244fe5SLaurent Pinchart else 30387244fe5SLaurent Pinchart writel(data, chan->iomem + reg); 30487244fe5SLaurent Pinchart } 30587244fe5SLaurent Pinchart 30687244fe5SLaurent Pinchart /* ----------------------------------------------------------------------------- 30787244fe5SLaurent Pinchart * Initialization and configuration 30887244fe5SLaurent Pinchart */ 30987244fe5SLaurent Pinchart 31087244fe5SLaurent Pinchart static bool rcar_dmac_chan_is_busy(struct rcar_dmac_chan *chan) 31187244fe5SLaurent Pinchart { 31287244fe5SLaurent Pinchart u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR); 31387244fe5SLaurent Pinchart 31487244fe5SLaurent Pinchart return (chcr & (RCAR_DMACHCR_DE | RCAR_DMACHCR_TE)) == RCAR_DMACHCR_DE; 31587244fe5SLaurent Pinchart } 31687244fe5SLaurent Pinchart 31787244fe5SLaurent Pinchart static void rcar_dmac_chan_start_xfer(struct rcar_dmac_chan *chan) 31887244fe5SLaurent Pinchart { 31987244fe5SLaurent Pinchart struct rcar_dmac_desc *desc = chan->desc.running; 320ccadee9bSLaurent Pinchart u32 chcr = desc->chcr; 321ccadee9bSLaurent Pinchart 322ccadee9bSLaurent Pinchart WARN_ON_ONCE(rcar_dmac_chan_is_busy(chan)); 323ccadee9bSLaurent Pinchart 324ccadee9bSLaurent Pinchart if (chan->mid_rid >= 0) 325ccadee9bSLaurent Pinchart rcar_dmac_chan_write(chan, RCAR_DMARS, chan->mid_rid); 326ccadee9bSLaurent Pinchart 3271ed1315fSLaurent Pinchart if (desc->hwdescs.use) { 328ccadee9bSLaurent Pinchart dev_dbg(chan->chan.device->dev, 329ccadee9bSLaurent Pinchart "chan%u: queue desc %p: %u@%pad\n", 330ccadee9bSLaurent Pinchart chan->index, desc, desc->nchunks, &desc->hwdescs.dma); 331ccadee9bSLaurent Pinchart 332ccadee9bSLaurent Pinchart #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 333ccadee9bSLaurent Pinchart rcar_dmac_chan_write(chan, RCAR_DMAFIXDPBASE, 334ccadee9bSLaurent Pinchart desc->hwdescs.dma >> 32); 335ccadee9bSLaurent Pinchart #endif 336ccadee9bSLaurent Pinchart rcar_dmac_chan_write(chan, RCAR_DMADPBASE, 337ccadee9bSLaurent Pinchart (desc->hwdescs.dma & 0xfffffff0) | 338ccadee9bSLaurent Pinchart RCAR_DMADPBASE_SEL); 339ccadee9bSLaurent Pinchart rcar_dmac_chan_write(chan, RCAR_DMACHCRB, 340ccadee9bSLaurent Pinchart RCAR_DMACHCRB_DCNT(desc->nchunks - 1) | 341ccadee9bSLaurent Pinchart RCAR_DMACHCRB_DRST); 342ccadee9bSLaurent Pinchart 343ccadee9bSLaurent Pinchart /* 344ccadee9bSLaurent Pinchart * Program the descriptor stage interrupt to occur after the end 345ccadee9bSLaurent Pinchart * of the first stage. 346ccadee9bSLaurent Pinchart */ 347ccadee9bSLaurent Pinchart rcar_dmac_chan_write(chan, RCAR_DMADPCR, RCAR_DMADPCR_DIPT(1)); 348ccadee9bSLaurent Pinchart 349ccadee9bSLaurent Pinchart chcr |= RCAR_DMACHCR_RPT_SAR | RCAR_DMACHCR_RPT_DAR 350ccadee9bSLaurent Pinchart | RCAR_DMACHCR_RPT_TCR | RCAR_DMACHCR_DPB; 351ccadee9bSLaurent Pinchart 352ccadee9bSLaurent Pinchart /* 353ccadee9bSLaurent Pinchart * If the descriptor isn't cyclic enable normal descriptor mode 354ccadee9bSLaurent Pinchart * and the transfer completion interrupt. 355ccadee9bSLaurent Pinchart */ 356ccadee9bSLaurent Pinchart if (!desc->cyclic) 357ccadee9bSLaurent Pinchart chcr |= RCAR_DMACHCR_DPM_ENABLED | RCAR_DMACHCR_IE; 358ccadee9bSLaurent Pinchart /* 359ccadee9bSLaurent Pinchart * If the descriptor is cyclic and has a callback enable the 360ccadee9bSLaurent Pinchart * descriptor stage interrupt in infinite repeat mode. 361ccadee9bSLaurent Pinchart */ 362ccadee9bSLaurent Pinchart else if (desc->async_tx.callback) 363ccadee9bSLaurent Pinchart chcr |= RCAR_DMACHCR_DPM_INFINITE | RCAR_DMACHCR_DSIE; 364ccadee9bSLaurent Pinchart /* 365ccadee9bSLaurent Pinchart * Otherwise just select infinite repeat mode without any 366ccadee9bSLaurent Pinchart * interrupt. 367ccadee9bSLaurent Pinchart */ 368ccadee9bSLaurent Pinchart else 369ccadee9bSLaurent Pinchart chcr |= RCAR_DMACHCR_DPM_INFINITE; 370ccadee9bSLaurent Pinchart } else { 37187244fe5SLaurent Pinchart struct rcar_dmac_xfer_chunk *chunk = desc->running; 37287244fe5SLaurent Pinchart 37387244fe5SLaurent Pinchart dev_dbg(chan->chan.device->dev, 37487244fe5SLaurent Pinchart "chan%u: queue chunk %p: %u@%pad -> %pad\n", 37587244fe5SLaurent Pinchart chan->index, chunk, chunk->size, &chunk->src_addr, 37687244fe5SLaurent Pinchart &chunk->dst_addr); 37787244fe5SLaurent Pinchart 37887244fe5SLaurent Pinchart #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 379ccadee9bSLaurent Pinchart rcar_dmac_chan_write(chan, RCAR_DMAFIXSAR, 380ccadee9bSLaurent Pinchart chunk->src_addr >> 32); 381ccadee9bSLaurent Pinchart rcar_dmac_chan_write(chan, RCAR_DMAFIXDAR, 382ccadee9bSLaurent Pinchart chunk->dst_addr >> 32); 38387244fe5SLaurent Pinchart #endif 384ccadee9bSLaurent Pinchart rcar_dmac_chan_write(chan, RCAR_DMASAR, 385ccadee9bSLaurent Pinchart chunk->src_addr & 0xffffffff); 386ccadee9bSLaurent Pinchart rcar_dmac_chan_write(chan, RCAR_DMADAR, 387ccadee9bSLaurent Pinchart chunk->dst_addr & 0xffffffff); 38887244fe5SLaurent Pinchart rcar_dmac_chan_write(chan, RCAR_DMATCR, 38987244fe5SLaurent Pinchart chunk->size >> desc->xfer_shift); 39087244fe5SLaurent Pinchart 391ccadee9bSLaurent Pinchart chcr |= RCAR_DMACHCR_DPM_DISABLED | RCAR_DMACHCR_IE; 392ccadee9bSLaurent Pinchart } 393ccadee9bSLaurent Pinchart 394ccadee9bSLaurent Pinchart rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr | RCAR_DMACHCR_DE); 39587244fe5SLaurent Pinchart } 39687244fe5SLaurent Pinchart 39787244fe5SLaurent Pinchart static int rcar_dmac_init(struct rcar_dmac *dmac) 39887244fe5SLaurent Pinchart { 39987244fe5SLaurent Pinchart u16 dmaor; 40087244fe5SLaurent Pinchart 40187244fe5SLaurent Pinchart /* Clear all channels and enable the DMAC globally. */ 40287244fe5SLaurent Pinchart rcar_dmac_write(dmac, RCAR_DMACHCLR, 0x7fff); 40387244fe5SLaurent Pinchart rcar_dmac_write(dmac, RCAR_DMAOR, 40487244fe5SLaurent Pinchart RCAR_DMAOR_PRI_FIXED | RCAR_DMAOR_DME); 40587244fe5SLaurent Pinchart 40687244fe5SLaurent Pinchart dmaor = rcar_dmac_read(dmac, RCAR_DMAOR); 40787244fe5SLaurent Pinchart if ((dmaor & (RCAR_DMAOR_AE | RCAR_DMAOR_DME)) != RCAR_DMAOR_DME) { 40887244fe5SLaurent Pinchart dev_warn(dmac->dev, "DMAOR initialization failed.\n"); 40987244fe5SLaurent Pinchart return -EIO; 41087244fe5SLaurent Pinchart } 41187244fe5SLaurent Pinchart 41287244fe5SLaurent Pinchart return 0; 41387244fe5SLaurent Pinchart } 41487244fe5SLaurent Pinchart 41587244fe5SLaurent Pinchart /* ----------------------------------------------------------------------------- 41687244fe5SLaurent Pinchart * Descriptors submission 41787244fe5SLaurent Pinchart */ 41887244fe5SLaurent Pinchart 41987244fe5SLaurent Pinchart static dma_cookie_t rcar_dmac_tx_submit(struct dma_async_tx_descriptor *tx) 42087244fe5SLaurent Pinchart { 42187244fe5SLaurent Pinchart struct rcar_dmac_chan *chan = to_rcar_dmac_chan(tx->chan); 42287244fe5SLaurent Pinchart struct rcar_dmac_desc *desc = to_rcar_dmac_desc(tx); 42387244fe5SLaurent Pinchart unsigned long flags; 42487244fe5SLaurent Pinchart dma_cookie_t cookie; 42587244fe5SLaurent Pinchart 42687244fe5SLaurent Pinchart spin_lock_irqsave(&chan->lock, flags); 42787244fe5SLaurent Pinchart 42887244fe5SLaurent Pinchart cookie = dma_cookie_assign(tx); 42987244fe5SLaurent Pinchart 43087244fe5SLaurent Pinchart dev_dbg(chan->chan.device->dev, "chan%u: submit #%d@%p\n", 43187244fe5SLaurent Pinchart chan->index, tx->cookie, desc); 43287244fe5SLaurent Pinchart 43387244fe5SLaurent Pinchart list_add_tail(&desc->node, &chan->desc.pending); 43487244fe5SLaurent Pinchart desc->running = list_first_entry(&desc->chunks, 43587244fe5SLaurent Pinchart struct rcar_dmac_xfer_chunk, node); 43687244fe5SLaurent Pinchart 43787244fe5SLaurent Pinchart spin_unlock_irqrestore(&chan->lock, flags); 43887244fe5SLaurent Pinchart 43987244fe5SLaurent Pinchart return cookie; 44087244fe5SLaurent Pinchart } 44187244fe5SLaurent Pinchart 44287244fe5SLaurent Pinchart /* ----------------------------------------------------------------------------- 44387244fe5SLaurent Pinchart * Descriptors allocation and free 44487244fe5SLaurent Pinchart */ 44587244fe5SLaurent Pinchart 44687244fe5SLaurent Pinchart /* 44787244fe5SLaurent Pinchart * rcar_dmac_desc_alloc - Allocate a page worth of DMA descriptors 44887244fe5SLaurent Pinchart * @chan: the DMA channel 44987244fe5SLaurent Pinchart * @gfp: allocation flags 45087244fe5SLaurent Pinchart */ 45187244fe5SLaurent Pinchart static int rcar_dmac_desc_alloc(struct rcar_dmac_chan *chan, gfp_t gfp) 45287244fe5SLaurent Pinchart { 45387244fe5SLaurent Pinchart struct rcar_dmac_desc_page *page; 45487244fe5SLaurent Pinchart LIST_HEAD(list); 45587244fe5SLaurent Pinchart unsigned int i; 45687244fe5SLaurent Pinchart 45787244fe5SLaurent Pinchart page = (void *)get_zeroed_page(gfp); 45887244fe5SLaurent Pinchart if (!page) 45987244fe5SLaurent Pinchart return -ENOMEM; 46087244fe5SLaurent Pinchart 46187244fe5SLaurent Pinchart for (i = 0; i < RCAR_DMAC_DESCS_PER_PAGE; ++i) { 46287244fe5SLaurent Pinchart struct rcar_dmac_desc *desc = &page->descs[i]; 46387244fe5SLaurent Pinchart 46487244fe5SLaurent Pinchart dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan); 46587244fe5SLaurent Pinchart desc->async_tx.tx_submit = rcar_dmac_tx_submit; 46687244fe5SLaurent Pinchart INIT_LIST_HEAD(&desc->chunks); 46787244fe5SLaurent Pinchart 46887244fe5SLaurent Pinchart list_add_tail(&desc->node, &list); 46987244fe5SLaurent Pinchart } 47087244fe5SLaurent Pinchart 47187244fe5SLaurent Pinchart spin_lock_irq(&chan->lock); 47287244fe5SLaurent Pinchart list_splice_tail(&list, &chan->desc.free); 47387244fe5SLaurent Pinchart list_add_tail(&page->node, &chan->desc.pages); 47487244fe5SLaurent Pinchart spin_unlock_irq(&chan->lock); 47587244fe5SLaurent Pinchart 47687244fe5SLaurent Pinchart return 0; 47787244fe5SLaurent Pinchart } 47887244fe5SLaurent Pinchart 47987244fe5SLaurent Pinchart /* 48087244fe5SLaurent Pinchart * rcar_dmac_desc_put - Release a DMA transfer descriptor 48187244fe5SLaurent Pinchart * @chan: the DMA channel 48287244fe5SLaurent Pinchart * @desc: the descriptor 48387244fe5SLaurent Pinchart * 48487244fe5SLaurent Pinchart * Put the descriptor and its transfer chunk descriptors back in the channel's 4851ed1315fSLaurent Pinchart * free descriptors lists. The descriptor's chunks list will be reinitialized to 4861ed1315fSLaurent Pinchart * an empty list as a result. 48787244fe5SLaurent Pinchart * 488ccadee9bSLaurent Pinchart * The descriptor must have been removed from the channel's lists before calling 489ccadee9bSLaurent Pinchart * this function. 49087244fe5SLaurent Pinchart */ 49187244fe5SLaurent Pinchart static void rcar_dmac_desc_put(struct rcar_dmac_chan *chan, 49287244fe5SLaurent Pinchart struct rcar_dmac_desc *desc) 49387244fe5SLaurent Pinchart { 494f3915072SLaurent Pinchart unsigned long flags; 495f3915072SLaurent Pinchart 496f3915072SLaurent Pinchart spin_lock_irqsave(&chan->lock, flags); 49787244fe5SLaurent Pinchart list_splice_tail_init(&desc->chunks, &chan->desc.chunks_free); 49887244fe5SLaurent Pinchart list_add_tail(&desc->node, &chan->desc.free); 499f3915072SLaurent Pinchart spin_unlock_irqrestore(&chan->lock, flags); 50087244fe5SLaurent Pinchart } 50187244fe5SLaurent Pinchart 50287244fe5SLaurent Pinchart static void rcar_dmac_desc_recycle_acked(struct rcar_dmac_chan *chan) 50387244fe5SLaurent Pinchart { 50487244fe5SLaurent Pinchart struct rcar_dmac_desc *desc, *_desc; 505ccadee9bSLaurent Pinchart LIST_HEAD(list); 50687244fe5SLaurent Pinchart 507ccadee9bSLaurent Pinchart /* 508ccadee9bSLaurent Pinchart * We have to temporarily move all descriptors from the wait list to a 509ccadee9bSLaurent Pinchart * local list as iterating over the wait list, even with 510ccadee9bSLaurent Pinchart * list_for_each_entry_safe, isn't safe if we release the channel lock 511ccadee9bSLaurent Pinchart * around the rcar_dmac_desc_put() call. 512ccadee9bSLaurent Pinchart */ 513ccadee9bSLaurent Pinchart spin_lock_irq(&chan->lock); 514ccadee9bSLaurent Pinchart list_splice_init(&chan->desc.wait, &list); 515ccadee9bSLaurent Pinchart spin_unlock_irq(&chan->lock); 516ccadee9bSLaurent Pinchart 517ccadee9bSLaurent Pinchart list_for_each_entry_safe(desc, _desc, &list, node) { 51887244fe5SLaurent Pinchart if (async_tx_test_ack(&desc->async_tx)) { 51987244fe5SLaurent Pinchart list_del(&desc->node); 52087244fe5SLaurent Pinchart rcar_dmac_desc_put(chan, desc); 52187244fe5SLaurent Pinchart } 52287244fe5SLaurent Pinchart } 523ccadee9bSLaurent Pinchart 524ccadee9bSLaurent Pinchart if (list_empty(&list)) 525ccadee9bSLaurent Pinchart return; 526ccadee9bSLaurent Pinchart 527ccadee9bSLaurent Pinchart /* Put the remaining descriptors back in the wait list. */ 528ccadee9bSLaurent Pinchart spin_lock_irq(&chan->lock); 529ccadee9bSLaurent Pinchart list_splice(&list, &chan->desc.wait); 530ccadee9bSLaurent Pinchart spin_unlock_irq(&chan->lock); 53187244fe5SLaurent Pinchart } 53287244fe5SLaurent Pinchart 53387244fe5SLaurent Pinchart /* 53487244fe5SLaurent Pinchart * rcar_dmac_desc_get - Allocate a descriptor for a DMA transfer 53587244fe5SLaurent Pinchart * @chan: the DMA channel 53687244fe5SLaurent Pinchart * 53787244fe5SLaurent Pinchart * Locking: This function must be called in a non-atomic context. 53887244fe5SLaurent Pinchart * 53987244fe5SLaurent Pinchart * Return: A pointer to the allocated descriptor or NULL if no descriptor can 54087244fe5SLaurent Pinchart * be allocated. 54187244fe5SLaurent Pinchart */ 54287244fe5SLaurent Pinchart static struct rcar_dmac_desc *rcar_dmac_desc_get(struct rcar_dmac_chan *chan) 54387244fe5SLaurent Pinchart { 54487244fe5SLaurent Pinchart struct rcar_dmac_desc *desc; 54587244fe5SLaurent Pinchart int ret; 54687244fe5SLaurent Pinchart 54787244fe5SLaurent Pinchart /* Recycle acked descriptors before attempting allocation. */ 54887244fe5SLaurent Pinchart rcar_dmac_desc_recycle_acked(chan); 54987244fe5SLaurent Pinchart 550ccadee9bSLaurent Pinchart spin_lock_irq(&chan->lock); 551ccadee9bSLaurent Pinchart 552a55e07c8SLaurent Pinchart while (list_empty(&chan->desc.free)) { 55387244fe5SLaurent Pinchart /* 554a55e07c8SLaurent Pinchart * No free descriptors, allocate a page worth of them and try 555a55e07c8SLaurent Pinchart * again, as someone else could race us to get the newly 556a55e07c8SLaurent Pinchart * allocated descriptors. If the allocation fails return an 557a55e07c8SLaurent Pinchart * error. 55887244fe5SLaurent Pinchart */ 55987244fe5SLaurent Pinchart spin_unlock_irq(&chan->lock); 56087244fe5SLaurent Pinchart ret = rcar_dmac_desc_alloc(chan, GFP_NOWAIT); 56187244fe5SLaurent Pinchart if (ret < 0) 56287244fe5SLaurent Pinchart return NULL; 56387244fe5SLaurent Pinchart spin_lock_irq(&chan->lock); 56487244fe5SLaurent Pinchart } 56587244fe5SLaurent Pinchart 566a55e07c8SLaurent Pinchart desc = list_first_entry(&chan->desc.free, struct rcar_dmac_desc, node); 56787244fe5SLaurent Pinchart list_del(&desc->node); 56887244fe5SLaurent Pinchart 56987244fe5SLaurent Pinchart spin_unlock_irq(&chan->lock); 57087244fe5SLaurent Pinchart 57187244fe5SLaurent Pinchart return desc; 57287244fe5SLaurent Pinchart } 57387244fe5SLaurent Pinchart 57487244fe5SLaurent Pinchart /* 57587244fe5SLaurent Pinchart * rcar_dmac_xfer_chunk_alloc - Allocate a page worth of transfer chunks 57687244fe5SLaurent Pinchart * @chan: the DMA channel 57787244fe5SLaurent Pinchart * @gfp: allocation flags 57887244fe5SLaurent Pinchart */ 57987244fe5SLaurent Pinchart static int rcar_dmac_xfer_chunk_alloc(struct rcar_dmac_chan *chan, gfp_t gfp) 58087244fe5SLaurent Pinchart { 58187244fe5SLaurent Pinchart struct rcar_dmac_desc_page *page; 58287244fe5SLaurent Pinchart LIST_HEAD(list); 58387244fe5SLaurent Pinchart unsigned int i; 58487244fe5SLaurent Pinchart 58587244fe5SLaurent Pinchart page = (void *)get_zeroed_page(gfp); 58687244fe5SLaurent Pinchart if (!page) 58787244fe5SLaurent Pinchart return -ENOMEM; 58887244fe5SLaurent Pinchart 58987244fe5SLaurent Pinchart for (i = 0; i < RCAR_DMAC_XFER_CHUNKS_PER_PAGE; ++i) { 59087244fe5SLaurent Pinchart struct rcar_dmac_xfer_chunk *chunk = &page->chunks[i]; 59187244fe5SLaurent Pinchart 59287244fe5SLaurent Pinchart list_add_tail(&chunk->node, &list); 59387244fe5SLaurent Pinchart } 59487244fe5SLaurent Pinchart 59587244fe5SLaurent Pinchart spin_lock_irq(&chan->lock); 59687244fe5SLaurent Pinchart list_splice_tail(&list, &chan->desc.chunks_free); 59787244fe5SLaurent Pinchart list_add_tail(&page->node, &chan->desc.pages); 59887244fe5SLaurent Pinchart spin_unlock_irq(&chan->lock); 59987244fe5SLaurent Pinchart 60087244fe5SLaurent Pinchart return 0; 60187244fe5SLaurent Pinchart } 60287244fe5SLaurent Pinchart 60387244fe5SLaurent Pinchart /* 60487244fe5SLaurent Pinchart * rcar_dmac_xfer_chunk_get - Allocate a transfer chunk for a DMA transfer 60587244fe5SLaurent Pinchart * @chan: the DMA channel 60687244fe5SLaurent Pinchart * 60787244fe5SLaurent Pinchart * Locking: This function must be called in a non-atomic context. 60887244fe5SLaurent Pinchart * 60987244fe5SLaurent Pinchart * Return: A pointer to the allocated transfer chunk descriptor or NULL if no 61087244fe5SLaurent Pinchart * descriptor can be allocated. 61187244fe5SLaurent Pinchart */ 61287244fe5SLaurent Pinchart static struct rcar_dmac_xfer_chunk * 61387244fe5SLaurent Pinchart rcar_dmac_xfer_chunk_get(struct rcar_dmac_chan *chan) 61487244fe5SLaurent Pinchart { 61587244fe5SLaurent Pinchart struct rcar_dmac_xfer_chunk *chunk; 61687244fe5SLaurent Pinchart int ret; 61787244fe5SLaurent Pinchart 61887244fe5SLaurent Pinchart spin_lock_irq(&chan->lock); 61987244fe5SLaurent Pinchart 620a55e07c8SLaurent Pinchart while (list_empty(&chan->desc.chunks_free)) { 62187244fe5SLaurent Pinchart /* 622a55e07c8SLaurent Pinchart * No free descriptors, allocate a page worth of them and try 623a55e07c8SLaurent Pinchart * again, as someone else could race us to get the newly 624a55e07c8SLaurent Pinchart * allocated descriptors. If the allocation fails return an 625a55e07c8SLaurent Pinchart * error. 62687244fe5SLaurent Pinchart */ 62787244fe5SLaurent Pinchart spin_unlock_irq(&chan->lock); 62887244fe5SLaurent Pinchart ret = rcar_dmac_xfer_chunk_alloc(chan, GFP_NOWAIT); 62987244fe5SLaurent Pinchart if (ret < 0) 63087244fe5SLaurent Pinchart return NULL; 63187244fe5SLaurent Pinchart spin_lock_irq(&chan->lock); 63287244fe5SLaurent Pinchart } 63387244fe5SLaurent Pinchart 63487244fe5SLaurent Pinchart chunk = list_first_entry(&chan->desc.chunks_free, 63587244fe5SLaurent Pinchart struct rcar_dmac_xfer_chunk, node); 63687244fe5SLaurent Pinchart list_del(&chunk->node); 63787244fe5SLaurent Pinchart 63887244fe5SLaurent Pinchart spin_unlock_irq(&chan->lock); 63987244fe5SLaurent Pinchart 64087244fe5SLaurent Pinchart return chunk; 64187244fe5SLaurent Pinchart } 64287244fe5SLaurent Pinchart 6431ed1315fSLaurent Pinchart static void rcar_dmac_realloc_hwdesc(struct rcar_dmac_chan *chan, 6441ed1315fSLaurent Pinchart struct rcar_dmac_desc *desc, size_t size) 6451ed1315fSLaurent Pinchart { 6461ed1315fSLaurent Pinchart /* 6471ed1315fSLaurent Pinchart * dma_alloc_coherent() allocates memory in page size increments. To 6481ed1315fSLaurent Pinchart * avoid reallocating the hardware descriptors when the allocated size 6491ed1315fSLaurent Pinchart * wouldn't change align the requested size to a multiple of the page 6501ed1315fSLaurent Pinchart * size. 6511ed1315fSLaurent Pinchart */ 6521ed1315fSLaurent Pinchart size = PAGE_ALIGN(size); 6531ed1315fSLaurent Pinchart 6541ed1315fSLaurent Pinchart if (desc->hwdescs.size == size) 6551ed1315fSLaurent Pinchart return; 6561ed1315fSLaurent Pinchart 6571ed1315fSLaurent Pinchart if (desc->hwdescs.mem) { 6581ed1315fSLaurent Pinchart dma_free_coherent(NULL, desc->hwdescs.size, desc->hwdescs.mem, 6591ed1315fSLaurent Pinchart desc->hwdescs.dma); 6601ed1315fSLaurent Pinchart desc->hwdescs.mem = NULL; 6611ed1315fSLaurent Pinchart desc->hwdescs.size = 0; 6621ed1315fSLaurent Pinchart } 6631ed1315fSLaurent Pinchart 6641ed1315fSLaurent Pinchart if (!size) 6651ed1315fSLaurent Pinchart return; 6661ed1315fSLaurent Pinchart 6671ed1315fSLaurent Pinchart desc->hwdescs.mem = dma_alloc_coherent(NULL, size, &desc->hwdescs.dma, 6681ed1315fSLaurent Pinchart GFP_NOWAIT); 6691ed1315fSLaurent Pinchart if (!desc->hwdescs.mem) 6701ed1315fSLaurent Pinchart return; 6711ed1315fSLaurent Pinchart 6721ed1315fSLaurent Pinchart desc->hwdescs.size = size; 6731ed1315fSLaurent Pinchart } 6741ed1315fSLaurent Pinchart 675ee4b876bSJürg Billeter static int rcar_dmac_fill_hwdesc(struct rcar_dmac_chan *chan, 676ccadee9bSLaurent Pinchart struct rcar_dmac_desc *desc) 677ccadee9bSLaurent Pinchart { 678ccadee9bSLaurent Pinchart struct rcar_dmac_xfer_chunk *chunk; 679ccadee9bSLaurent Pinchart struct rcar_dmac_hw_desc *hwdesc; 680ccadee9bSLaurent Pinchart 6811ed1315fSLaurent Pinchart rcar_dmac_realloc_hwdesc(chan, desc, desc->nchunks * sizeof(*hwdesc)); 6821ed1315fSLaurent Pinchart 6831ed1315fSLaurent Pinchart hwdesc = desc->hwdescs.mem; 684ccadee9bSLaurent Pinchart if (!hwdesc) 685ee4b876bSJürg Billeter return -ENOMEM; 686ccadee9bSLaurent Pinchart 687ccadee9bSLaurent Pinchart list_for_each_entry(chunk, &desc->chunks, node) { 688ccadee9bSLaurent Pinchart hwdesc->sar = chunk->src_addr; 689ccadee9bSLaurent Pinchart hwdesc->dar = chunk->dst_addr; 690ccadee9bSLaurent Pinchart hwdesc->tcr = chunk->size >> desc->xfer_shift; 691ccadee9bSLaurent Pinchart hwdesc++; 692ccadee9bSLaurent Pinchart } 693ee4b876bSJürg Billeter 694ee4b876bSJürg Billeter return 0; 695ccadee9bSLaurent Pinchart } 696ccadee9bSLaurent Pinchart 69787244fe5SLaurent Pinchart /* ----------------------------------------------------------------------------- 69887244fe5SLaurent Pinchart * Stop and reset 69987244fe5SLaurent Pinchart */ 70087244fe5SLaurent Pinchart 70187244fe5SLaurent Pinchart static void rcar_dmac_chan_halt(struct rcar_dmac_chan *chan) 70287244fe5SLaurent Pinchart { 70387244fe5SLaurent Pinchart u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR); 70487244fe5SLaurent Pinchart 705ccadee9bSLaurent Pinchart chcr &= ~(RCAR_DMACHCR_DSE | RCAR_DMACHCR_DSIE | RCAR_DMACHCR_IE | 706ccadee9bSLaurent Pinchart RCAR_DMACHCR_TE | RCAR_DMACHCR_DE); 70787244fe5SLaurent Pinchart rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr); 70887244fe5SLaurent Pinchart } 70987244fe5SLaurent Pinchart 71087244fe5SLaurent Pinchart static void rcar_dmac_chan_reinit(struct rcar_dmac_chan *chan) 71187244fe5SLaurent Pinchart { 71287244fe5SLaurent Pinchart struct rcar_dmac_desc *desc, *_desc; 71387244fe5SLaurent Pinchart unsigned long flags; 71487244fe5SLaurent Pinchart LIST_HEAD(descs); 71587244fe5SLaurent Pinchart 71687244fe5SLaurent Pinchart spin_lock_irqsave(&chan->lock, flags); 71787244fe5SLaurent Pinchart 71887244fe5SLaurent Pinchart /* Move all non-free descriptors to the local lists. */ 71987244fe5SLaurent Pinchart list_splice_init(&chan->desc.pending, &descs); 72087244fe5SLaurent Pinchart list_splice_init(&chan->desc.active, &descs); 72187244fe5SLaurent Pinchart list_splice_init(&chan->desc.done, &descs); 72287244fe5SLaurent Pinchart list_splice_init(&chan->desc.wait, &descs); 72387244fe5SLaurent Pinchart 72487244fe5SLaurent Pinchart chan->desc.running = NULL; 72587244fe5SLaurent Pinchart 72687244fe5SLaurent Pinchart spin_unlock_irqrestore(&chan->lock, flags); 72787244fe5SLaurent Pinchart 72887244fe5SLaurent Pinchart list_for_each_entry_safe(desc, _desc, &descs, node) { 72987244fe5SLaurent Pinchart list_del(&desc->node); 73087244fe5SLaurent Pinchart rcar_dmac_desc_put(chan, desc); 73187244fe5SLaurent Pinchart } 73287244fe5SLaurent Pinchart } 73387244fe5SLaurent Pinchart 73487244fe5SLaurent Pinchart static void rcar_dmac_stop(struct rcar_dmac *dmac) 73587244fe5SLaurent Pinchart { 73687244fe5SLaurent Pinchart rcar_dmac_write(dmac, RCAR_DMAOR, 0); 73787244fe5SLaurent Pinchart } 73887244fe5SLaurent Pinchart 73987244fe5SLaurent Pinchart static void rcar_dmac_abort(struct rcar_dmac *dmac) 74087244fe5SLaurent Pinchart { 74187244fe5SLaurent Pinchart unsigned int i; 74287244fe5SLaurent Pinchart 74387244fe5SLaurent Pinchart /* Stop all channels. */ 74487244fe5SLaurent Pinchart for (i = 0; i < dmac->n_channels; ++i) { 74587244fe5SLaurent Pinchart struct rcar_dmac_chan *chan = &dmac->channels[i]; 74687244fe5SLaurent Pinchart 74787244fe5SLaurent Pinchart /* Stop and reinitialize the channel. */ 74887244fe5SLaurent Pinchart spin_lock(&chan->lock); 74987244fe5SLaurent Pinchart rcar_dmac_chan_halt(chan); 75087244fe5SLaurent Pinchart spin_unlock(&chan->lock); 75187244fe5SLaurent Pinchart 75287244fe5SLaurent Pinchart rcar_dmac_chan_reinit(chan); 75387244fe5SLaurent Pinchart } 75487244fe5SLaurent Pinchart } 75587244fe5SLaurent Pinchart 75687244fe5SLaurent Pinchart /* ----------------------------------------------------------------------------- 75787244fe5SLaurent Pinchart * Descriptors preparation 75887244fe5SLaurent Pinchart */ 75987244fe5SLaurent Pinchart 76087244fe5SLaurent Pinchart static void rcar_dmac_chan_configure_desc(struct rcar_dmac_chan *chan, 76187244fe5SLaurent Pinchart struct rcar_dmac_desc *desc) 76287244fe5SLaurent Pinchart { 76387244fe5SLaurent Pinchart static const u32 chcr_ts[] = { 76487244fe5SLaurent Pinchart RCAR_DMACHCR_TS_1B, RCAR_DMACHCR_TS_2B, 76587244fe5SLaurent Pinchart RCAR_DMACHCR_TS_4B, RCAR_DMACHCR_TS_8B, 76687244fe5SLaurent Pinchart RCAR_DMACHCR_TS_16B, RCAR_DMACHCR_TS_32B, 76787244fe5SLaurent Pinchart RCAR_DMACHCR_TS_64B, 76887244fe5SLaurent Pinchart }; 76987244fe5SLaurent Pinchart 77087244fe5SLaurent Pinchart unsigned int xfer_size; 77187244fe5SLaurent Pinchart u32 chcr; 77287244fe5SLaurent Pinchart 77387244fe5SLaurent Pinchart switch (desc->direction) { 77487244fe5SLaurent Pinchart case DMA_DEV_TO_MEM: 77587244fe5SLaurent Pinchart chcr = RCAR_DMACHCR_DM_INC | RCAR_DMACHCR_SM_FIXED 77687244fe5SLaurent Pinchart | RCAR_DMACHCR_RS_DMARS; 77787244fe5SLaurent Pinchart xfer_size = chan->src_xfer_size; 77887244fe5SLaurent Pinchart break; 77987244fe5SLaurent Pinchart 78087244fe5SLaurent Pinchart case DMA_MEM_TO_DEV: 78187244fe5SLaurent Pinchart chcr = RCAR_DMACHCR_DM_FIXED | RCAR_DMACHCR_SM_INC 78287244fe5SLaurent Pinchart | RCAR_DMACHCR_RS_DMARS; 78387244fe5SLaurent Pinchart xfer_size = chan->dst_xfer_size; 78487244fe5SLaurent Pinchart break; 78587244fe5SLaurent Pinchart 78687244fe5SLaurent Pinchart case DMA_MEM_TO_MEM: 78787244fe5SLaurent Pinchart default: 78887244fe5SLaurent Pinchart chcr = RCAR_DMACHCR_DM_INC | RCAR_DMACHCR_SM_INC 78987244fe5SLaurent Pinchart | RCAR_DMACHCR_RS_AUTO; 79087244fe5SLaurent Pinchart xfer_size = RCAR_DMAC_MEMCPY_XFER_SIZE; 79187244fe5SLaurent Pinchart break; 79287244fe5SLaurent Pinchart } 79387244fe5SLaurent Pinchart 79487244fe5SLaurent Pinchart desc->xfer_shift = ilog2(xfer_size); 79587244fe5SLaurent Pinchart desc->chcr = chcr | chcr_ts[desc->xfer_shift]; 79687244fe5SLaurent Pinchart } 79787244fe5SLaurent Pinchart 79887244fe5SLaurent Pinchart /* 79987244fe5SLaurent Pinchart * rcar_dmac_chan_prep_sg - prepare transfer descriptors from an SG list 80087244fe5SLaurent Pinchart * 80187244fe5SLaurent Pinchart * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also 80287244fe5SLaurent Pinchart * converted to scatter-gather to guarantee consistent locking and a correct 80387244fe5SLaurent Pinchart * list manipulation. For slave DMA direction carries the usual meaning, and, 80487244fe5SLaurent Pinchart * logically, the SG list is RAM and the addr variable contains slave address, 80587244fe5SLaurent Pinchart * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM 80687244fe5SLaurent Pinchart * and the SG list contains only one element and points at the source buffer. 80787244fe5SLaurent Pinchart */ 80887244fe5SLaurent Pinchart static struct dma_async_tx_descriptor * 80987244fe5SLaurent Pinchart rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl, 81087244fe5SLaurent Pinchart unsigned int sg_len, dma_addr_t dev_addr, 81187244fe5SLaurent Pinchart enum dma_transfer_direction dir, unsigned long dma_flags, 81287244fe5SLaurent Pinchart bool cyclic) 81387244fe5SLaurent Pinchart { 81487244fe5SLaurent Pinchart struct rcar_dmac_xfer_chunk *chunk; 81587244fe5SLaurent Pinchart struct rcar_dmac_desc *desc; 81687244fe5SLaurent Pinchart struct scatterlist *sg; 817ccadee9bSLaurent Pinchart unsigned int nchunks = 0; 81887244fe5SLaurent Pinchart unsigned int max_chunk_size; 81987244fe5SLaurent Pinchart unsigned int full_size = 0; 820ccadee9bSLaurent Pinchart bool highmem = false; 82187244fe5SLaurent Pinchart unsigned int i; 82287244fe5SLaurent Pinchart 82387244fe5SLaurent Pinchart desc = rcar_dmac_desc_get(chan); 82487244fe5SLaurent Pinchart if (!desc) 82587244fe5SLaurent Pinchart return NULL; 82687244fe5SLaurent Pinchart 82787244fe5SLaurent Pinchart desc->async_tx.flags = dma_flags; 82887244fe5SLaurent Pinchart desc->async_tx.cookie = -EBUSY; 82987244fe5SLaurent Pinchart 83087244fe5SLaurent Pinchart desc->cyclic = cyclic; 83187244fe5SLaurent Pinchart desc->direction = dir; 83287244fe5SLaurent Pinchart 83387244fe5SLaurent Pinchart rcar_dmac_chan_configure_desc(chan, desc); 83487244fe5SLaurent Pinchart 83587244fe5SLaurent Pinchart max_chunk_size = (RCAR_DMATCR_MASK + 1) << desc->xfer_shift; 83687244fe5SLaurent Pinchart 83787244fe5SLaurent Pinchart /* 83887244fe5SLaurent Pinchart * Allocate and fill the transfer chunk descriptors. We own the only 83987244fe5SLaurent Pinchart * reference to the DMA descriptor, there's no need for locking. 84087244fe5SLaurent Pinchart */ 84187244fe5SLaurent Pinchart for_each_sg(sgl, sg, sg_len, i) { 84287244fe5SLaurent Pinchart dma_addr_t mem_addr = sg_dma_address(sg); 84387244fe5SLaurent Pinchart unsigned int len = sg_dma_len(sg); 84487244fe5SLaurent Pinchart 84587244fe5SLaurent Pinchart full_size += len; 84687244fe5SLaurent Pinchart 84787244fe5SLaurent Pinchart while (len) { 84887244fe5SLaurent Pinchart unsigned int size = min(len, max_chunk_size); 84987244fe5SLaurent Pinchart 85087244fe5SLaurent Pinchart #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 85187244fe5SLaurent Pinchart /* 85287244fe5SLaurent Pinchart * Prevent individual transfers from crossing 4GB 85387244fe5SLaurent Pinchart * boundaries. 85487244fe5SLaurent Pinchart */ 85587244fe5SLaurent Pinchart if (dev_addr >> 32 != (dev_addr + size - 1) >> 32) 85687244fe5SLaurent Pinchart size = ALIGN(dev_addr, 1ULL << 32) - dev_addr; 85787244fe5SLaurent Pinchart if (mem_addr >> 32 != (mem_addr + size - 1) >> 32) 85887244fe5SLaurent Pinchart size = ALIGN(mem_addr, 1ULL << 32) - mem_addr; 859ccadee9bSLaurent Pinchart 860ccadee9bSLaurent Pinchart /* 861ccadee9bSLaurent Pinchart * Check if either of the source or destination address 862ccadee9bSLaurent Pinchart * can't be expressed in 32 bits. If so we can't use 863ccadee9bSLaurent Pinchart * hardware descriptor lists. 864ccadee9bSLaurent Pinchart */ 865ccadee9bSLaurent Pinchart if (dev_addr >> 32 || mem_addr >> 32) 866ccadee9bSLaurent Pinchart highmem = true; 86787244fe5SLaurent Pinchart #endif 86887244fe5SLaurent Pinchart 86987244fe5SLaurent Pinchart chunk = rcar_dmac_xfer_chunk_get(chan); 87087244fe5SLaurent Pinchart if (!chunk) { 87187244fe5SLaurent Pinchart rcar_dmac_desc_put(chan, desc); 87287244fe5SLaurent Pinchart return NULL; 87387244fe5SLaurent Pinchart } 87487244fe5SLaurent Pinchart 87587244fe5SLaurent Pinchart if (dir == DMA_DEV_TO_MEM) { 87687244fe5SLaurent Pinchart chunk->src_addr = dev_addr; 87787244fe5SLaurent Pinchart chunk->dst_addr = mem_addr; 87887244fe5SLaurent Pinchart } else { 87987244fe5SLaurent Pinchart chunk->src_addr = mem_addr; 88087244fe5SLaurent Pinchart chunk->dst_addr = dev_addr; 88187244fe5SLaurent Pinchart } 88287244fe5SLaurent Pinchart 88387244fe5SLaurent Pinchart chunk->size = size; 88487244fe5SLaurent Pinchart 88587244fe5SLaurent Pinchart dev_dbg(chan->chan.device->dev, 88687244fe5SLaurent Pinchart "chan%u: chunk %p/%p sgl %u@%p, %u/%u %pad -> %pad\n", 88787244fe5SLaurent Pinchart chan->index, chunk, desc, i, sg, size, len, 88887244fe5SLaurent Pinchart &chunk->src_addr, &chunk->dst_addr); 88987244fe5SLaurent Pinchart 89087244fe5SLaurent Pinchart mem_addr += size; 89187244fe5SLaurent Pinchart if (dir == DMA_MEM_TO_MEM) 89287244fe5SLaurent Pinchart dev_addr += size; 89387244fe5SLaurent Pinchart 89487244fe5SLaurent Pinchart len -= size; 89587244fe5SLaurent Pinchart 89687244fe5SLaurent Pinchart list_add_tail(&chunk->node, &desc->chunks); 897ccadee9bSLaurent Pinchart nchunks++; 89887244fe5SLaurent Pinchart } 89987244fe5SLaurent Pinchart } 90087244fe5SLaurent Pinchart 901ccadee9bSLaurent Pinchart desc->nchunks = nchunks; 90287244fe5SLaurent Pinchart desc->size = full_size; 90387244fe5SLaurent Pinchart 904ccadee9bSLaurent Pinchart /* 905ccadee9bSLaurent Pinchart * Use hardware descriptor lists if possible when more than one chunk 906ccadee9bSLaurent Pinchart * needs to be transferred (otherwise they don't make much sense). 907ccadee9bSLaurent Pinchart * 908ccadee9bSLaurent Pinchart * The highmem check currently covers the whole transfer. As an 909ccadee9bSLaurent Pinchart * optimization we could use descriptor lists for consecutive lowmem 910ccadee9bSLaurent Pinchart * chunks and direct manual mode for highmem chunks. Whether the 911ccadee9bSLaurent Pinchart * performance improvement would be significant enough compared to the 912ccadee9bSLaurent Pinchart * additional complexity remains to be investigated. 913ccadee9bSLaurent Pinchart */ 9141ed1315fSLaurent Pinchart desc->hwdescs.use = !highmem && nchunks > 1; 915ee4b876bSJürg Billeter if (desc->hwdescs.use) { 916ee4b876bSJürg Billeter if (rcar_dmac_fill_hwdesc(chan, desc) < 0) 917ee4b876bSJürg Billeter desc->hwdescs.use = false; 918ee4b876bSJürg Billeter } 919ccadee9bSLaurent Pinchart 92087244fe5SLaurent Pinchart return &desc->async_tx; 92187244fe5SLaurent Pinchart } 92287244fe5SLaurent Pinchart 92387244fe5SLaurent Pinchart /* ----------------------------------------------------------------------------- 92487244fe5SLaurent Pinchart * DMA engine operations 92587244fe5SLaurent Pinchart */ 92687244fe5SLaurent Pinchart 92787244fe5SLaurent Pinchart static int rcar_dmac_alloc_chan_resources(struct dma_chan *chan) 92887244fe5SLaurent Pinchart { 92987244fe5SLaurent Pinchart struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); 93087244fe5SLaurent Pinchart int ret; 93187244fe5SLaurent Pinchart 93287244fe5SLaurent Pinchart INIT_LIST_HEAD(&rchan->desc.chunks_free); 93387244fe5SLaurent Pinchart INIT_LIST_HEAD(&rchan->desc.pages); 93487244fe5SLaurent Pinchart 93587244fe5SLaurent Pinchart /* Preallocate descriptors. */ 93687244fe5SLaurent Pinchart ret = rcar_dmac_xfer_chunk_alloc(rchan, GFP_KERNEL); 93787244fe5SLaurent Pinchart if (ret < 0) 93887244fe5SLaurent Pinchart return -ENOMEM; 93987244fe5SLaurent Pinchart 94087244fe5SLaurent Pinchart ret = rcar_dmac_desc_alloc(rchan, GFP_KERNEL); 94187244fe5SLaurent Pinchart if (ret < 0) 94287244fe5SLaurent Pinchart return -ENOMEM; 94387244fe5SLaurent Pinchart 94487244fe5SLaurent Pinchart return pm_runtime_get_sync(chan->device->dev); 94587244fe5SLaurent Pinchart } 94687244fe5SLaurent Pinchart 94787244fe5SLaurent Pinchart static void rcar_dmac_free_chan_resources(struct dma_chan *chan) 94887244fe5SLaurent Pinchart { 94987244fe5SLaurent Pinchart struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); 95087244fe5SLaurent Pinchart struct rcar_dmac *dmac = to_rcar_dmac(chan->device); 95187244fe5SLaurent Pinchart struct rcar_dmac_desc_page *page, *_page; 9521ed1315fSLaurent Pinchart struct rcar_dmac_desc *desc; 9531ed1315fSLaurent Pinchart LIST_HEAD(list); 95487244fe5SLaurent Pinchart 95587244fe5SLaurent Pinchart /* Protect against ISR */ 95687244fe5SLaurent Pinchart spin_lock_irq(&rchan->lock); 95787244fe5SLaurent Pinchart rcar_dmac_chan_halt(rchan); 95887244fe5SLaurent Pinchart spin_unlock_irq(&rchan->lock); 95987244fe5SLaurent Pinchart 96087244fe5SLaurent Pinchart /* Now no new interrupts will occur */ 96187244fe5SLaurent Pinchart 96287244fe5SLaurent Pinchart if (rchan->mid_rid >= 0) { 96387244fe5SLaurent Pinchart /* The caller is holding dma_list_mutex */ 96487244fe5SLaurent Pinchart clear_bit(rchan->mid_rid, dmac->modules); 96587244fe5SLaurent Pinchart rchan->mid_rid = -EINVAL; 96687244fe5SLaurent Pinchart } 96787244fe5SLaurent Pinchart 968f7638c90SLaurent Pinchart list_splice_init(&rchan->desc.free, &list); 969f7638c90SLaurent Pinchart list_splice_init(&rchan->desc.pending, &list); 970f7638c90SLaurent Pinchart list_splice_init(&rchan->desc.active, &list); 971f7638c90SLaurent Pinchart list_splice_init(&rchan->desc.done, &list); 972f7638c90SLaurent Pinchart list_splice_init(&rchan->desc.wait, &list); 9731ed1315fSLaurent Pinchart 9741ed1315fSLaurent Pinchart list_for_each_entry(desc, &list, node) 9751ed1315fSLaurent Pinchart rcar_dmac_realloc_hwdesc(rchan, desc, 0); 9761ed1315fSLaurent Pinchart 97787244fe5SLaurent Pinchart list_for_each_entry_safe(page, _page, &rchan->desc.pages, node) { 97887244fe5SLaurent Pinchart list_del(&page->node); 97987244fe5SLaurent Pinchart free_page((unsigned long)page); 98087244fe5SLaurent Pinchart } 98187244fe5SLaurent Pinchart 98287244fe5SLaurent Pinchart pm_runtime_put(chan->device->dev); 98387244fe5SLaurent Pinchart } 98487244fe5SLaurent Pinchart 98587244fe5SLaurent Pinchart static struct dma_async_tx_descriptor * 98687244fe5SLaurent Pinchart rcar_dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, 98787244fe5SLaurent Pinchart dma_addr_t dma_src, size_t len, unsigned long flags) 98887244fe5SLaurent Pinchart { 98987244fe5SLaurent Pinchart struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); 99087244fe5SLaurent Pinchart struct scatterlist sgl; 99187244fe5SLaurent Pinchart 99287244fe5SLaurent Pinchart if (!len) 99387244fe5SLaurent Pinchart return NULL; 99487244fe5SLaurent Pinchart 99587244fe5SLaurent Pinchart sg_init_table(&sgl, 1); 99687244fe5SLaurent Pinchart sg_set_page(&sgl, pfn_to_page(PFN_DOWN(dma_src)), len, 99787244fe5SLaurent Pinchart offset_in_page(dma_src)); 99887244fe5SLaurent Pinchart sg_dma_address(&sgl) = dma_src; 99987244fe5SLaurent Pinchart sg_dma_len(&sgl) = len; 100087244fe5SLaurent Pinchart 100187244fe5SLaurent Pinchart return rcar_dmac_chan_prep_sg(rchan, &sgl, 1, dma_dest, 100287244fe5SLaurent Pinchart DMA_MEM_TO_MEM, flags, false); 100387244fe5SLaurent Pinchart } 100487244fe5SLaurent Pinchart 100587244fe5SLaurent Pinchart static struct dma_async_tx_descriptor * 100687244fe5SLaurent Pinchart rcar_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, 100787244fe5SLaurent Pinchart unsigned int sg_len, enum dma_transfer_direction dir, 100887244fe5SLaurent Pinchart unsigned long flags, void *context) 100987244fe5SLaurent Pinchart { 101087244fe5SLaurent Pinchart struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); 101187244fe5SLaurent Pinchart dma_addr_t dev_addr; 101287244fe5SLaurent Pinchart 101387244fe5SLaurent Pinchart /* Someone calling slave DMA on a generic channel? */ 101487244fe5SLaurent Pinchart if (rchan->mid_rid < 0 || !sg_len) { 101587244fe5SLaurent Pinchart dev_warn(chan->device->dev, 101687244fe5SLaurent Pinchart "%s: bad parameter: len=%d, id=%d\n", 101787244fe5SLaurent Pinchart __func__, sg_len, rchan->mid_rid); 101887244fe5SLaurent Pinchart return NULL; 101987244fe5SLaurent Pinchart } 102087244fe5SLaurent Pinchart 102187244fe5SLaurent Pinchart dev_addr = dir == DMA_DEV_TO_MEM 102287244fe5SLaurent Pinchart ? rchan->src_slave_addr : rchan->dst_slave_addr; 102387244fe5SLaurent Pinchart return rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, dev_addr, 102487244fe5SLaurent Pinchart dir, flags, false); 102587244fe5SLaurent Pinchart } 102687244fe5SLaurent Pinchart 102787244fe5SLaurent Pinchart #define RCAR_DMAC_MAX_SG_LEN 32 102887244fe5SLaurent Pinchart 102987244fe5SLaurent Pinchart static struct dma_async_tx_descriptor * 103087244fe5SLaurent Pinchart rcar_dmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, 103187244fe5SLaurent Pinchart size_t buf_len, size_t period_len, 103287244fe5SLaurent Pinchart enum dma_transfer_direction dir, unsigned long flags) 103387244fe5SLaurent Pinchart { 103487244fe5SLaurent Pinchart struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); 103587244fe5SLaurent Pinchart struct dma_async_tx_descriptor *desc; 103687244fe5SLaurent Pinchart struct scatterlist *sgl; 103787244fe5SLaurent Pinchart dma_addr_t dev_addr; 103887244fe5SLaurent Pinchart unsigned int sg_len; 103987244fe5SLaurent Pinchart unsigned int i; 104087244fe5SLaurent Pinchart 104187244fe5SLaurent Pinchart /* Someone calling slave DMA on a generic channel? */ 104287244fe5SLaurent Pinchart if (rchan->mid_rid < 0 || buf_len < period_len) { 104387244fe5SLaurent Pinchart dev_warn(chan->device->dev, 104487244fe5SLaurent Pinchart "%s: bad parameter: buf_len=%zu, period_len=%zu, id=%d\n", 104587244fe5SLaurent Pinchart __func__, buf_len, period_len, rchan->mid_rid); 104687244fe5SLaurent Pinchart return NULL; 104787244fe5SLaurent Pinchart } 104887244fe5SLaurent Pinchart 104987244fe5SLaurent Pinchart sg_len = buf_len / period_len; 105087244fe5SLaurent Pinchart if (sg_len > RCAR_DMAC_MAX_SG_LEN) { 105187244fe5SLaurent Pinchart dev_err(chan->device->dev, 105287244fe5SLaurent Pinchart "chan%u: sg length %d exceds limit %d", 105387244fe5SLaurent Pinchart rchan->index, sg_len, RCAR_DMAC_MAX_SG_LEN); 105487244fe5SLaurent Pinchart return NULL; 105587244fe5SLaurent Pinchart } 105687244fe5SLaurent Pinchart 105787244fe5SLaurent Pinchart /* 105887244fe5SLaurent Pinchart * Allocate the sg list dynamically as it would consume too much stack 105987244fe5SLaurent Pinchart * space. 106087244fe5SLaurent Pinchart */ 106187244fe5SLaurent Pinchart sgl = kcalloc(sg_len, sizeof(*sgl), GFP_NOWAIT); 106287244fe5SLaurent Pinchart if (!sgl) 106387244fe5SLaurent Pinchart return NULL; 106487244fe5SLaurent Pinchart 106587244fe5SLaurent Pinchart sg_init_table(sgl, sg_len); 106687244fe5SLaurent Pinchart 106787244fe5SLaurent Pinchart for (i = 0; i < sg_len; ++i) { 106887244fe5SLaurent Pinchart dma_addr_t src = buf_addr + (period_len * i); 106987244fe5SLaurent Pinchart 107087244fe5SLaurent Pinchart sg_set_page(&sgl[i], pfn_to_page(PFN_DOWN(src)), period_len, 107187244fe5SLaurent Pinchart offset_in_page(src)); 107287244fe5SLaurent Pinchart sg_dma_address(&sgl[i]) = src; 107387244fe5SLaurent Pinchart sg_dma_len(&sgl[i]) = period_len; 107487244fe5SLaurent Pinchart } 107587244fe5SLaurent Pinchart 107687244fe5SLaurent Pinchart dev_addr = dir == DMA_DEV_TO_MEM 107787244fe5SLaurent Pinchart ? rchan->src_slave_addr : rchan->dst_slave_addr; 107887244fe5SLaurent Pinchart desc = rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, dev_addr, 107987244fe5SLaurent Pinchart dir, flags, true); 108087244fe5SLaurent Pinchart 108187244fe5SLaurent Pinchart kfree(sgl); 108287244fe5SLaurent Pinchart return desc; 108387244fe5SLaurent Pinchart } 108487244fe5SLaurent Pinchart 108587244fe5SLaurent Pinchart static int rcar_dmac_device_config(struct dma_chan *chan, 108687244fe5SLaurent Pinchart struct dma_slave_config *cfg) 108787244fe5SLaurent Pinchart { 108887244fe5SLaurent Pinchart struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); 108987244fe5SLaurent Pinchart 109087244fe5SLaurent Pinchart /* 109187244fe5SLaurent Pinchart * We could lock this, but you shouldn't be configuring the 109287244fe5SLaurent Pinchart * channel, while using it... 109387244fe5SLaurent Pinchart */ 109487244fe5SLaurent Pinchart rchan->src_slave_addr = cfg->src_addr; 109587244fe5SLaurent Pinchart rchan->dst_slave_addr = cfg->dst_addr; 109687244fe5SLaurent Pinchart rchan->src_xfer_size = cfg->src_addr_width; 109787244fe5SLaurent Pinchart rchan->dst_xfer_size = cfg->dst_addr_width; 109887244fe5SLaurent Pinchart 109987244fe5SLaurent Pinchart return 0; 110087244fe5SLaurent Pinchart } 110187244fe5SLaurent Pinchart 110287244fe5SLaurent Pinchart static int rcar_dmac_chan_terminate_all(struct dma_chan *chan) 110387244fe5SLaurent Pinchart { 110487244fe5SLaurent Pinchart struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); 110587244fe5SLaurent Pinchart unsigned long flags; 110687244fe5SLaurent Pinchart 110787244fe5SLaurent Pinchart spin_lock_irqsave(&rchan->lock, flags); 110887244fe5SLaurent Pinchart rcar_dmac_chan_halt(rchan); 110987244fe5SLaurent Pinchart spin_unlock_irqrestore(&rchan->lock, flags); 111087244fe5SLaurent Pinchart 111187244fe5SLaurent Pinchart /* 111287244fe5SLaurent Pinchart * FIXME: No new interrupt can occur now, but the IRQ thread might still 111387244fe5SLaurent Pinchart * be running. 111487244fe5SLaurent Pinchart */ 111587244fe5SLaurent Pinchart 111687244fe5SLaurent Pinchart rcar_dmac_chan_reinit(rchan); 111787244fe5SLaurent Pinchart 111887244fe5SLaurent Pinchart return 0; 111987244fe5SLaurent Pinchart } 112087244fe5SLaurent Pinchart 112187244fe5SLaurent Pinchart static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan, 112287244fe5SLaurent Pinchart dma_cookie_t cookie) 112387244fe5SLaurent Pinchart { 112487244fe5SLaurent Pinchart struct rcar_dmac_desc *desc = chan->desc.running; 1125ccadee9bSLaurent Pinchart struct rcar_dmac_xfer_chunk *running = NULL; 112687244fe5SLaurent Pinchart struct rcar_dmac_xfer_chunk *chunk; 112787244fe5SLaurent Pinchart unsigned int residue = 0; 1128ccadee9bSLaurent Pinchart unsigned int dptr = 0; 112987244fe5SLaurent Pinchart 113087244fe5SLaurent Pinchart if (!desc) 113187244fe5SLaurent Pinchart return 0; 113287244fe5SLaurent Pinchart 113387244fe5SLaurent Pinchart /* 113487244fe5SLaurent Pinchart * If the cookie doesn't correspond to the currently running transfer 113587244fe5SLaurent Pinchart * then the descriptor hasn't been processed yet, and the residue is 113687244fe5SLaurent Pinchart * equal to the full descriptor size. 113787244fe5SLaurent Pinchart */ 113887244fe5SLaurent Pinchart if (cookie != desc->async_tx.cookie) 113987244fe5SLaurent Pinchart return desc->size; 114087244fe5SLaurent Pinchart 1141ccadee9bSLaurent Pinchart /* 1142ccadee9bSLaurent Pinchart * In descriptor mode the descriptor running pointer is not maintained 1143ccadee9bSLaurent Pinchart * by the interrupt handler, find the running descriptor from the 1144ccadee9bSLaurent Pinchart * descriptor pointer field in the CHCRB register. In non-descriptor 1145ccadee9bSLaurent Pinchart * mode just use the running descriptor pointer. 1146ccadee9bSLaurent Pinchart */ 11471ed1315fSLaurent Pinchart if (desc->hwdescs.use) { 1148ccadee9bSLaurent Pinchart dptr = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) & 1149ccadee9bSLaurent Pinchart RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT; 1150ccadee9bSLaurent Pinchart WARN_ON(dptr >= desc->nchunks); 1151ccadee9bSLaurent Pinchart } else { 1152ccadee9bSLaurent Pinchart running = desc->running; 1153ccadee9bSLaurent Pinchart } 1154ccadee9bSLaurent Pinchart 115587244fe5SLaurent Pinchart /* Compute the size of all chunks still to be transferred. */ 115687244fe5SLaurent Pinchart list_for_each_entry_reverse(chunk, &desc->chunks, node) { 1157ccadee9bSLaurent Pinchart if (chunk == running || ++dptr == desc->nchunks) 115887244fe5SLaurent Pinchart break; 115987244fe5SLaurent Pinchart 116087244fe5SLaurent Pinchart residue += chunk->size; 116187244fe5SLaurent Pinchart } 116287244fe5SLaurent Pinchart 116387244fe5SLaurent Pinchart /* Add the residue for the current chunk. */ 116487244fe5SLaurent Pinchart residue += rcar_dmac_chan_read(chan, RCAR_DMATCR) << desc->xfer_shift; 116587244fe5SLaurent Pinchart 116687244fe5SLaurent Pinchart return residue; 116787244fe5SLaurent Pinchart } 116887244fe5SLaurent Pinchart 116987244fe5SLaurent Pinchart static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan, 117087244fe5SLaurent Pinchart dma_cookie_t cookie, 117187244fe5SLaurent Pinchart struct dma_tx_state *txstate) 117287244fe5SLaurent Pinchart { 117387244fe5SLaurent Pinchart struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); 117487244fe5SLaurent Pinchart enum dma_status status; 117587244fe5SLaurent Pinchart unsigned long flags; 117687244fe5SLaurent Pinchart unsigned int residue; 117787244fe5SLaurent Pinchart 117887244fe5SLaurent Pinchart status = dma_cookie_status(chan, cookie, txstate); 117987244fe5SLaurent Pinchart if (status == DMA_COMPLETE || !txstate) 118087244fe5SLaurent Pinchart return status; 118187244fe5SLaurent Pinchart 118287244fe5SLaurent Pinchart spin_lock_irqsave(&rchan->lock, flags); 118387244fe5SLaurent Pinchart residue = rcar_dmac_chan_get_residue(rchan, cookie); 118487244fe5SLaurent Pinchart spin_unlock_irqrestore(&rchan->lock, flags); 118587244fe5SLaurent Pinchart 118687244fe5SLaurent Pinchart dma_set_residue(txstate, residue); 118787244fe5SLaurent Pinchart 118887244fe5SLaurent Pinchart return status; 118987244fe5SLaurent Pinchart } 119087244fe5SLaurent Pinchart 119187244fe5SLaurent Pinchart static void rcar_dmac_issue_pending(struct dma_chan *chan) 119287244fe5SLaurent Pinchart { 119387244fe5SLaurent Pinchart struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); 119487244fe5SLaurent Pinchart unsigned long flags; 119587244fe5SLaurent Pinchart 119687244fe5SLaurent Pinchart spin_lock_irqsave(&rchan->lock, flags); 119787244fe5SLaurent Pinchart 119887244fe5SLaurent Pinchart if (list_empty(&rchan->desc.pending)) 119987244fe5SLaurent Pinchart goto done; 120087244fe5SLaurent Pinchart 120187244fe5SLaurent Pinchart /* Append the pending list to the active list. */ 120287244fe5SLaurent Pinchart list_splice_tail_init(&rchan->desc.pending, &rchan->desc.active); 120387244fe5SLaurent Pinchart 120487244fe5SLaurent Pinchart /* 120587244fe5SLaurent Pinchart * If no transfer is running pick the first descriptor from the active 120687244fe5SLaurent Pinchart * list and start the transfer. 120787244fe5SLaurent Pinchart */ 120887244fe5SLaurent Pinchart if (!rchan->desc.running) { 120987244fe5SLaurent Pinchart struct rcar_dmac_desc *desc; 121087244fe5SLaurent Pinchart 121187244fe5SLaurent Pinchart desc = list_first_entry(&rchan->desc.active, 121287244fe5SLaurent Pinchart struct rcar_dmac_desc, node); 121387244fe5SLaurent Pinchart rchan->desc.running = desc; 121487244fe5SLaurent Pinchart 121587244fe5SLaurent Pinchart rcar_dmac_chan_start_xfer(rchan); 121687244fe5SLaurent Pinchart } 121787244fe5SLaurent Pinchart 121887244fe5SLaurent Pinchart done: 121987244fe5SLaurent Pinchart spin_unlock_irqrestore(&rchan->lock, flags); 122087244fe5SLaurent Pinchart } 122187244fe5SLaurent Pinchart 122287244fe5SLaurent Pinchart /* ----------------------------------------------------------------------------- 122387244fe5SLaurent Pinchart * IRQ handling 122487244fe5SLaurent Pinchart */ 122587244fe5SLaurent Pinchart 1226ccadee9bSLaurent Pinchart static irqreturn_t rcar_dmac_isr_desc_stage_end(struct rcar_dmac_chan *chan) 1227ccadee9bSLaurent Pinchart { 1228ccadee9bSLaurent Pinchart struct rcar_dmac_desc *desc = chan->desc.running; 1229ccadee9bSLaurent Pinchart unsigned int stage; 1230ccadee9bSLaurent Pinchart 1231ccadee9bSLaurent Pinchart if (WARN_ON(!desc || !desc->cyclic)) { 1232ccadee9bSLaurent Pinchart /* 1233ccadee9bSLaurent Pinchart * This should never happen, there should always be a running 1234ccadee9bSLaurent Pinchart * cyclic descriptor when a descriptor stage end interrupt is 1235ccadee9bSLaurent Pinchart * triggered. Warn and return. 1236ccadee9bSLaurent Pinchart */ 1237ccadee9bSLaurent Pinchart return IRQ_NONE; 1238ccadee9bSLaurent Pinchart } 1239ccadee9bSLaurent Pinchart 1240ccadee9bSLaurent Pinchart /* Program the interrupt pointer to the next stage. */ 1241ccadee9bSLaurent Pinchart stage = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) & 1242ccadee9bSLaurent Pinchart RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT; 1243ccadee9bSLaurent Pinchart rcar_dmac_chan_write(chan, RCAR_DMADPCR, RCAR_DMADPCR_DIPT(stage)); 1244ccadee9bSLaurent Pinchart 1245ccadee9bSLaurent Pinchart return IRQ_WAKE_THREAD; 1246ccadee9bSLaurent Pinchart } 1247ccadee9bSLaurent Pinchart 124887244fe5SLaurent Pinchart static irqreturn_t rcar_dmac_isr_transfer_end(struct rcar_dmac_chan *chan) 124987244fe5SLaurent Pinchart { 125087244fe5SLaurent Pinchart struct rcar_dmac_desc *desc = chan->desc.running; 125187244fe5SLaurent Pinchart irqreturn_t ret = IRQ_WAKE_THREAD; 125287244fe5SLaurent Pinchart 125387244fe5SLaurent Pinchart if (WARN_ON_ONCE(!desc)) { 125487244fe5SLaurent Pinchart /* 1255ccadee9bSLaurent Pinchart * This should never happen, there should always be a running 1256ccadee9bSLaurent Pinchart * descriptor when a transfer end interrupt is triggered. Warn 1257ccadee9bSLaurent Pinchart * and return. 125887244fe5SLaurent Pinchart */ 125987244fe5SLaurent Pinchart return IRQ_NONE; 126087244fe5SLaurent Pinchart } 126187244fe5SLaurent Pinchart 126287244fe5SLaurent Pinchart /* 1263ccadee9bSLaurent Pinchart * The transfer end interrupt isn't generated for each chunk when using 1264ccadee9bSLaurent Pinchart * descriptor mode. Only update the running chunk pointer in 1265ccadee9bSLaurent Pinchart * non-descriptor mode. 126687244fe5SLaurent Pinchart */ 12671ed1315fSLaurent Pinchart if (!desc->hwdescs.use) { 1268ccadee9bSLaurent Pinchart /* 1269ccadee9bSLaurent Pinchart * If we haven't completed the last transfer chunk simply move 1270ccadee9bSLaurent Pinchart * to the next one. Only wake the IRQ thread if the transfer is 1271ccadee9bSLaurent Pinchart * cyclic. 1272ccadee9bSLaurent Pinchart */ 1273ccadee9bSLaurent Pinchart if (!list_is_last(&desc->running->node, &desc->chunks)) { 1274ccadee9bSLaurent Pinchart desc->running = list_next_entry(desc->running, node); 127587244fe5SLaurent Pinchart if (!desc->cyclic) 127687244fe5SLaurent Pinchart ret = IRQ_HANDLED; 127787244fe5SLaurent Pinchart goto done; 127887244fe5SLaurent Pinchart } 127987244fe5SLaurent Pinchart 128087244fe5SLaurent Pinchart /* 1281ccadee9bSLaurent Pinchart * We've completed the last transfer chunk. If the transfer is 1282ccadee9bSLaurent Pinchart * cyclic, move back to the first one. 128387244fe5SLaurent Pinchart */ 128487244fe5SLaurent Pinchart if (desc->cyclic) { 1285ccadee9bSLaurent Pinchart desc->running = 1286ccadee9bSLaurent Pinchart list_first_entry(&desc->chunks, 128787244fe5SLaurent Pinchart struct rcar_dmac_xfer_chunk, 128887244fe5SLaurent Pinchart node); 128987244fe5SLaurent Pinchart goto done; 129087244fe5SLaurent Pinchart } 1291ccadee9bSLaurent Pinchart } 129287244fe5SLaurent Pinchart 129387244fe5SLaurent Pinchart /* The descriptor is complete, move it to the done list. */ 129487244fe5SLaurent Pinchart list_move_tail(&desc->node, &chan->desc.done); 129587244fe5SLaurent Pinchart 129687244fe5SLaurent Pinchart /* Queue the next descriptor, if any. */ 129787244fe5SLaurent Pinchart if (!list_empty(&chan->desc.active)) 129887244fe5SLaurent Pinchart chan->desc.running = list_first_entry(&chan->desc.active, 129987244fe5SLaurent Pinchart struct rcar_dmac_desc, 130087244fe5SLaurent Pinchart node); 130187244fe5SLaurent Pinchart else 130287244fe5SLaurent Pinchart chan->desc.running = NULL; 130387244fe5SLaurent Pinchart 130487244fe5SLaurent Pinchart done: 130587244fe5SLaurent Pinchart if (chan->desc.running) 130687244fe5SLaurent Pinchart rcar_dmac_chan_start_xfer(chan); 130787244fe5SLaurent Pinchart 130887244fe5SLaurent Pinchart return ret; 130987244fe5SLaurent Pinchart } 131087244fe5SLaurent Pinchart 131187244fe5SLaurent Pinchart static irqreturn_t rcar_dmac_isr_channel(int irq, void *dev) 131287244fe5SLaurent Pinchart { 1313ccadee9bSLaurent Pinchart u32 mask = RCAR_DMACHCR_DSE | RCAR_DMACHCR_TE; 131487244fe5SLaurent Pinchart struct rcar_dmac_chan *chan = dev; 131587244fe5SLaurent Pinchart irqreturn_t ret = IRQ_NONE; 131687244fe5SLaurent Pinchart u32 chcr; 131787244fe5SLaurent Pinchart 131887244fe5SLaurent Pinchart spin_lock(&chan->lock); 131987244fe5SLaurent Pinchart 132087244fe5SLaurent Pinchart chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR); 1321ccadee9bSLaurent Pinchart if (chcr & RCAR_DMACHCR_TE) 1322ccadee9bSLaurent Pinchart mask |= RCAR_DMACHCR_DE; 1323ccadee9bSLaurent Pinchart rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr & ~mask); 1324ccadee9bSLaurent Pinchart 1325ccadee9bSLaurent Pinchart if (chcr & RCAR_DMACHCR_DSE) 1326ccadee9bSLaurent Pinchart ret |= rcar_dmac_isr_desc_stage_end(chan); 132787244fe5SLaurent Pinchart 132887244fe5SLaurent Pinchart if (chcr & RCAR_DMACHCR_TE) 132987244fe5SLaurent Pinchart ret |= rcar_dmac_isr_transfer_end(chan); 133087244fe5SLaurent Pinchart 133187244fe5SLaurent Pinchart spin_unlock(&chan->lock); 133287244fe5SLaurent Pinchart 133387244fe5SLaurent Pinchart return ret; 133487244fe5SLaurent Pinchart } 133587244fe5SLaurent Pinchart 133687244fe5SLaurent Pinchart static irqreturn_t rcar_dmac_isr_channel_thread(int irq, void *dev) 133787244fe5SLaurent Pinchart { 133887244fe5SLaurent Pinchart struct rcar_dmac_chan *chan = dev; 133987244fe5SLaurent Pinchart struct rcar_dmac_desc *desc; 134087244fe5SLaurent Pinchart 134187244fe5SLaurent Pinchart spin_lock_irq(&chan->lock); 134287244fe5SLaurent Pinchart 134387244fe5SLaurent Pinchart /* For cyclic transfers notify the user after every chunk. */ 134487244fe5SLaurent Pinchart if (chan->desc.running && chan->desc.running->cyclic) { 134587244fe5SLaurent Pinchart dma_async_tx_callback callback; 134687244fe5SLaurent Pinchart void *callback_param; 134787244fe5SLaurent Pinchart 134887244fe5SLaurent Pinchart desc = chan->desc.running; 134987244fe5SLaurent Pinchart callback = desc->async_tx.callback; 135087244fe5SLaurent Pinchart callback_param = desc->async_tx.callback_param; 135187244fe5SLaurent Pinchart 135287244fe5SLaurent Pinchart if (callback) { 135387244fe5SLaurent Pinchart spin_unlock_irq(&chan->lock); 135487244fe5SLaurent Pinchart callback(callback_param); 135587244fe5SLaurent Pinchart spin_lock_irq(&chan->lock); 135687244fe5SLaurent Pinchart } 135787244fe5SLaurent Pinchart } 135887244fe5SLaurent Pinchart 135987244fe5SLaurent Pinchart /* 136087244fe5SLaurent Pinchart * Call the callback function for all descriptors on the done list and 136187244fe5SLaurent Pinchart * move them to the ack wait list. 136287244fe5SLaurent Pinchart */ 136387244fe5SLaurent Pinchart while (!list_empty(&chan->desc.done)) { 136487244fe5SLaurent Pinchart desc = list_first_entry(&chan->desc.done, struct rcar_dmac_desc, 136587244fe5SLaurent Pinchart node); 136687244fe5SLaurent Pinchart dma_cookie_complete(&desc->async_tx); 136787244fe5SLaurent Pinchart list_del(&desc->node); 136887244fe5SLaurent Pinchart 136987244fe5SLaurent Pinchart if (desc->async_tx.callback) { 137087244fe5SLaurent Pinchart spin_unlock_irq(&chan->lock); 137187244fe5SLaurent Pinchart /* 137287244fe5SLaurent Pinchart * We own the only reference to this descriptor, we can 137387244fe5SLaurent Pinchart * safely dereference it without holding the channel 137487244fe5SLaurent Pinchart * lock. 137587244fe5SLaurent Pinchart */ 137687244fe5SLaurent Pinchart desc->async_tx.callback(desc->async_tx.callback_param); 137787244fe5SLaurent Pinchart spin_lock_irq(&chan->lock); 137887244fe5SLaurent Pinchart } 137987244fe5SLaurent Pinchart 138087244fe5SLaurent Pinchart list_add_tail(&desc->node, &chan->desc.wait); 138187244fe5SLaurent Pinchart } 138287244fe5SLaurent Pinchart 1383ccadee9bSLaurent Pinchart spin_unlock_irq(&chan->lock); 1384ccadee9bSLaurent Pinchart 138587244fe5SLaurent Pinchart /* Recycle all acked descriptors. */ 138687244fe5SLaurent Pinchart rcar_dmac_desc_recycle_acked(chan); 138787244fe5SLaurent Pinchart 138887244fe5SLaurent Pinchart return IRQ_HANDLED; 138987244fe5SLaurent Pinchart } 139087244fe5SLaurent Pinchart 139187244fe5SLaurent Pinchart static irqreturn_t rcar_dmac_isr_error(int irq, void *data) 139287244fe5SLaurent Pinchart { 139387244fe5SLaurent Pinchart struct rcar_dmac *dmac = data; 139487244fe5SLaurent Pinchart 139587244fe5SLaurent Pinchart if (!(rcar_dmac_read(dmac, RCAR_DMAOR) & RCAR_DMAOR_AE)) 139687244fe5SLaurent Pinchart return IRQ_NONE; 139787244fe5SLaurent Pinchart 139887244fe5SLaurent Pinchart /* 139987244fe5SLaurent Pinchart * An unrecoverable error occurred on an unknown channel. Halt the DMAC, 140087244fe5SLaurent Pinchart * abort transfers on all channels, and reinitialize the DMAC. 140187244fe5SLaurent Pinchart */ 140287244fe5SLaurent Pinchart rcar_dmac_stop(dmac); 140387244fe5SLaurent Pinchart rcar_dmac_abort(dmac); 140487244fe5SLaurent Pinchart rcar_dmac_init(dmac); 140587244fe5SLaurent Pinchart 140687244fe5SLaurent Pinchart return IRQ_HANDLED; 140787244fe5SLaurent Pinchart } 140887244fe5SLaurent Pinchart 140987244fe5SLaurent Pinchart /* ----------------------------------------------------------------------------- 141087244fe5SLaurent Pinchart * OF xlate and channel filter 141187244fe5SLaurent Pinchart */ 141287244fe5SLaurent Pinchart 141387244fe5SLaurent Pinchart static bool rcar_dmac_chan_filter(struct dma_chan *chan, void *arg) 141487244fe5SLaurent Pinchart { 141587244fe5SLaurent Pinchart struct rcar_dmac *dmac = to_rcar_dmac(chan->device); 141687244fe5SLaurent Pinchart struct of_phandle_args *dma_spec = arg; 141787244fe5SLaurent Pinchart 141887244fe5SLaurent Pinchart /* 141987244fe5SLaurent Pinchart * FIXME: Using a filter on OF platforms is a nonsense. The OF xlate 142087244fe5SLaurent Pinchart * function knows from which device it wants to allocate a channel from, 142187244fe5SLaurent Pinchart * and would be perfectly capable of selecting the channel it wants. 142287244fe5SLaurent Pinchart * Forcing it to call dma_request_channel() and iterate through all 142387244fe5SLaurent Pinchart * channels from all controllers is just pointless. 142487244fe5SLaurent Pinchart */ 142587244fe5SLaurent Pinchart if (chan->device->device_config != rcar_dmac_device_config || 142687244fe5SLaurent Pinchart dma_spec->np != chan->device->dev->of_node) 142787244fe5SLaurent Pinchart return false; 142887244fe5SLaurent Pinchart 142987244fe5SLaurent Pinchart return !test_and_set_bit(dma_spec->args[0], dmac->modules); 143087244fe5SLaurent Pinchart } 143187244fe5SLaurent Pinchart 143287244fe5SLaurent Pinchart static struct dma_chan *rcar_dmac_of_xlate(struct of_phandle_args *dma_spec, 143387244fe5SLaurent Pinchart struct of_dma *ofdma) 143487244fe5SLaurent Pinchart { 143587244fe5SLaurent Pinchart struct rcar_dmac_chan *rchan; 143687244fe5SLaurent Pinchart struct dma_chan *chan; 143787244fe5SLaurent Pinchart dma_cap_mask_t mask; 143887244fe5SLaurent Pinchart 143987244fe5SLaurent Pinchart if (dma_spec->args_count != 1) 144087244fe5SLaurent Pinchart return NULL; 144187244fe5SLaurent Pinchart 144287244fe5SLaurent Pinchart /* Only slave DMA channels can be allocated via DT */ 144387244fe5SLaurent Pinchart dma_cap_zero(mask); 144487244fe5SLaurent Pinchart dma_cap_set(DMA_SLAVE, mask); 144587244fe5SLaurent Pinchart 144687244fe5SLaurent Pinchart chan = dma_request_channel(mask, rcar_dmac_chan_filter, dma_spec); 144787244fe5SLaurent Pinchart if (!chan) 144887244fe5SLaurent Pinchart return NULL; 144987244fe5SLaurent Pinchart 145087244fe5SLaurent Pinchart rchan = to_rcar_dmac_chan(chan); 145187244fe5SLaurent Pinchart rchan->mid_rid = dma_spec->args[0]; 145287244fe5SLaurent Pinchart 145387244fe5SLaurent Pinchart return chan; 145487244fe5SLaurent Pinchart } 145587244fe5SLaurent Pinchart 145687244fe5SLaurent Pinchart /* ----------------------------------------------------------------------------- 145787244fe5SLaurent Pinchart * Power management 145887244fe5SLaurent Pinchart */ 145987244fe5SLaurent Pinchart 146087244fe5SLaurent Pinchart #ifdef CONFIG_PM_SLEEP 146187244fe5SLaurent Pinchart static int rcar_dmac_sleep_suspend(struct device *dev) 146287244fe5SLaurent Pinchart { 146387244fe5SLaurent Pinchart /* 146487244fe5SLaurent Pinchart * TODO: Wait for the current transfer to complete and stop the device. 146587244fe5SLaurent Pinchart */ 146687244fe5SLaurent Pinchart return 0; 146787244fe5SLaurent Pinchart } 146887244fe5SLaurent Pinchart 146987244fe5SLaurent Pinchart static int rcar_dmac_sleep_resume(struct device *dev) 147087244fe5SLaurent Pinchart { 147187244fe5SLaurent Pinchart /* TODO: Resume transfers, if any. */ 147287244fe5SLaurent Pinchart return 0; 147387244fe5SLaurent Pinchart } 147487244fe5SLaurent Pinchart #endif 147587244fe5SLaurent Pinchart 147687244fe5SLaurent Pinchart #ifdef CONFIG_PM 147787244fe5SLaurent Pinchart static int rcar_dmac_runtime_suspend(struct device *dev) 147887244fe5SLaurent Pinchart { 147987244fe5SLaurent Pinchart return 0; 148087244fe5SLaurent Pinchart } 148187244fe5SLaurent Pinchart 148287244fe5SLaurent Pinchart static int rcar_dmac_runtime_resume(struct device *dev) 148387244fe5SLaurent Pinchart { 148487244fe5SLaurent Pinchart struct rcar_dmac *dmac = dev_get_drvdata(dev); 148587244fe5SLaurent Pinchart 148687244fe5SLaurent Pinchart return rcar_dmac_init(dmac); 148787244fe5SLaurent Pinchart } 148887244fe5SLaurent Pinchart #endif 148987244fe5SLaurent Pinchart 149087244fe5SLaurent Pinchart static const struct dev_pm_ops rcar_dmac_pm = { 149187244fe5SLaurent Pinchart SET_SYSTEM_SLEEP_PM_OPS(rcar_dmac_sleep_suspend, rcar_dmac_sleep_resume) 149287244fe5SLaurent Pinchart SET_RUNTIME_PM_OPS(rcar_dmac_runtime_suspend, rcar_dmac_runtime_resume, 149387244fe5SLaurent Pinchart NULL) 149487244fe5SLaurent Pinchart }; 149587244fe5SLaurent Pinchart 149687244fe5SLaurent Pinchart /* ----------------------------------------------------------------------------- 149787244fe5SLaurent Pinchart * Probe and remove 149887244fe5SLaurent Pinchart */ 149987244fe5SLaurent Pinchart 150087244fe5SLaurent Pinchart static int rcar_dmac_chan_probe(struct rcar_dmac *dmac, 150187244fe5SLaurent Pinchart struct rcar_dmac_chan *rchan, 150287244fe5SLaurent Pinchart unsigned int index) 150387244fe5SLaurent Pinchart { 150487244fe5SLaurent Pinchart struct platform_device *pdev = to_platform_device(dmac->dev); 150587244fe5SLaurent Pinchart struct dma_chan *chan = &rchan->chan; 150687244fe5SLaurent Pinchart char pdev_irqname[5]; 150787244fe5SLaurent Pinchart char *irqname; 150887244fe5SLaurent Pinchart int irq; 150987244fe5SLaurent Pinchart int ret; 151087244fe5SLaurent Pinchart 151187244fe5SLaurent Pinchart rchan->index = index; 151287244fe5SLaurent Pinchart rchan->iomem = dmac->iomem + RCAR_DMAC_CHAN_OFFSET(index); 151387244fe5SLaurent Pinchart rchan->mid_rid = -EINVAL; 151487244fe5SLaurent Pinchart 151587244fe5SLaurent Pinchart spin_lock_init(&rchan->lock); 151687244fe5SLaurent Pinchart 1517f7638c90SLaurent Pinchart INIT_LIST_HEAD(&rchan->desc.free); 1518f7638c90SLaurent Pinchart INIT_LIST_HEAD(&rchan->desc.pending); 1519f7638c90SLaurent Pinchart INIT_LIST_HEAD(&rchan->desc.active); 1520f7638c90SLaurent Pinchart INIT_LIST_HEAD(&rchan->desc.done); 1521f7638c90SLaurent Pinchart INIT_LIST_HEAD(&rchan->desc.wait); 1522f7638c90SLaurent Pinchart 152387244fe5SLaurent Pinchart /* Request the channel interrupt. */ 152487244fe5SLaurent Pinchart sprintf(pdev_irqname, "ch%u", index); 152587244fe5SLaurent Pinchart irq = platform_get_irq_byname(pdev, pdev_irqname); 152687244fe5SLaurent Pinchart if (irq < 0) { 152787244fe5SLaurent Pinchart dev_err(dmac->dev, "no IRQ specified for channel %u\n", index); 152887244fe5SLaurent Pinchart return -ENODEV; 152987244fe5SLaurent Pinchart } 153087244fe5SLaurent Pinchart 153187244fe5SLaurent Pinchart irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:%u", 153287244fe5SLaurent Pinchart dev_name(dmac->dev), index); 153387244fe5SLaurent Pinchart if (!irqname) 153487244fe5SLaurent Pinchart return -ENOMEM; 153587244fe5SLaurent Pinchart 153687244fe5SLaurent Pinchart ret = devm_request_threaded_irq(dmac->dev, irq, rcar_dmac_isr_channel, 153787244fe5SLaurent Pinchart rcar_dmac_isr_channel_thread, 0, 153887244fe5SLaurent Pinchart irqname, rchan); 153987244fe5SLaurent Pinchart if (ret) { 154087244fe5SLaurent Pinchart dev_err(dmac->dev, "failed to request IRQ %u (%d)\n", irq, ret); 154187244fe5SLaurent Pinchart return ret; 154287244fe5SLaurent Pinchart } 154387244fe5SLaurent Pinchart 154487244fe5SLaurent Pinchart /* 154587244fe5SLaurent Pinchart * Initialize the DMA engine channel and add it to the DMA engine 154687244fe5SLaurent Pinchart * channels list. 154787244fe5SLaurent Pinchart */ 154887244fe5SLaurent Pinchart chan->device = &dmac->engine; 154987244fe5SLaurent Pinchart dma_cookie_init(chan); 155087244fe5SLaurent Pinchart 155187244fe5SLaurent Pinchart list_add_tail(&chan->device_node, &dmac->engine.channels); 155287244fe5SLaurent Pinchart 155387244fe5SLaurent Pinchart return 0; 155487244fe5SLaurent Pinchart } 155587244fe5SLaurent Pinchart 155687244fe5SLaurent Pinchart static int rcar_dmac_parse_of(struct device *dev, struct rcar_dmac *dmac) 155787244fe5SLaurent Pinchart { 155887244fe5SLaurent Pinchart struct device_node *np = dev->of_node; 155987244fe5SLaurent Pinchart int ret; 156087244fe5SLaurent Pinchart 156187244fe5SLaurent Pinchart ret = of_property_read_u32(np, "dma-channels", &dmac->n_channels); 156287244fe5SLaurent Pinchart if (ret < 0) { 156387244fe5SLaurent Pinchart dev_err(dev, "unable to read dma-channels property\n"); 156487244fe5SLaurent Pinchart return ret; 156587244fe5SLaurent Pinchart } 156687244fe5SLaurent Pinchart 156787244fe5SLaurent Pinchart if (dmac->n_channels <= 0 || dmac->n_channels >= 100) { 156887244fe5SLaurent Pinchart dev_err(dev, "invalid number of channels %u\n", 156987244fe5SLaurent Pinchart dmac->n_channels); 157087244fe5SLaurent Pinchart return -EINVAL; 157187244fe5SLaurent Pinchart } 157287244fe5SLaurent Pinchart 157387244fe5SLaurent Pinchart return 0; 157487244fe5SLaurent Pinchart } 157587244fe5SLaurent Pinchart 157687244fe5SLaurent Pinchart static int rcar_dmac_probe(struct platform_device *pdev) 157787244fe5SLaurent Pinchart { 157887244fe5SLaurent Pinchart const enum dma_slave_buswidth widths = DMA_SLAVE_BUSWIDTH_1_BYTE | 157987244fe5SLaurent Pinchart DMA_SLAVE_BUSWIDTH_2_BYTES | DMA_SLAVE_BUSWIDTH_4_BYTES | 158087244fe5SLaurent Pinchart DMA_SLAVE_BUSWIDTH_8_BYTES | DMA_SLAVE_BUSWIDTH_16_BYTES | 158187244fe5SLaurent Pinchart DMA_SLAVE_BUSWIDTH_32_BYTES | DMA_SLAVE_BUSWIDTH_64_BYTES; 158287244fe5SLaurent Pinchart struct dma_device *engine; 158387244fe5SLaurent Pinchart struct rcar_dmac *dmac; 158487244fe5SLaurent Pinchart struct resource *mem; 158587244fe5SLaurent Pinchart unsigned int i; 158687244fe5SLaurent Pinchart char *irqname; 158787244fe5SLaurent Pinchart int irq; 158887244fe5SLaurent Pinchart int ret; 158987244fe5SLaurent Pinchart 159087244fe5SLaurent Pinchart dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL); 159187244fe5SLaurent Pinchart if (!dmac) 159287244fe5SLaurent Pinchart return -ENOMEM; 159387244fe5SLaurent Pinchart 159487244fe5SLaurent Pinchart dmac->dev = &pdev->dev; 159587244fe5SLaurent Pinchart platform_set_drvdata(pdev, dmac); 159687244fe5SLaurent Pinchart 159787244fe5SLaurent Pinchart ret = rcar_dmac_parse_of(&pdev->dev, dmac); 159887244fe5SLaurent Pinchart if (ret < 0) 159987244fe5SLaurent Pinchart return ret; 160087244fe5SLaurent Pinchart 160187244fe5SLaurent Pinchart dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels, 160287244fe5SLaurent Pinchart sizeof(*dmac->channels), GFP_KERNEL); 160387244fe5SLaurent Pinchart if (!dmac->channels) 160487244fe5SLaurent Pinchart return -ENOMEM; 160587244fe5SLaurent Pinchart 160687244fe5SLaurent Pinchart /* Request resources. */ 160787244fe5SLaurent Pinchart mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 160887244fe5SLaurent Pinchart dmac->iomem = devm_ioremap_resource(&pdev->dev, mem); 160987244fe5SLaurent Pinchart if (IS_ERR(dmac->iomem)) 161087244fe5SLaurent Pinchart return PTR_ERR(dmac->iomem); 161187244fe5SLaurent Pinchart 161287244fe5SLaurent Pinchart irq = platform_get_irq_byname(pdev, "error"); 161387244fe5SLaurent Pinchart if (irq < 0) { 161487244fe5SLaurent Pinchart dev_err(&pdev->dev, "no error IRQ specified\n"); 161587244fe5SLaurent Pinchart return -ENODEV; 161687244fe5SLaurent Pinchart } 161787244fe5SLaurent Pinchart 161887244fe5SLaurent Pinchart irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:error", 161987244fe5SLaurent Pinchart dev_name(dmac->dev)); 162087244fe5SLaurent Pinchart if (!irqname) 162187244fe5SLaurent Pinchart return -ENOMEM; 162287244fe5SLaurent Pinchart 162387244fe5SLaurent Pinchart ret = devm_request_irq(&pdev->dev, irq, rcar_dmac_isr_error, 0, 162487244fe5SLaurent Pinchart irqname, dmac); 162587244fe5SLaurent Pinchart if (ret) { 162687244fe5SLaurent Pinchart dev_err(&pdev->dev, "failed to request IRQ %u (%d)\n", 162787244fe5SLaurent Pinchart irq, ret); 162887244fe5SLaurent Pinchart return ret; 162987244fe5SLaurent Pinchart } 163087244fe5SLaurent Pinchart 163187244fe5SLaurent Pinchart /* Enable runtime PM and initialize the device. */ 163287244fe5SLaurent Pinchart pm_runtime_enable(&pdev->dev); 163387244fe5SLaurent Pinchart ret = pm_runtime_get_sync(&pdev->dev); 163487244fe5SLaurent Pinchart if (ret < 0) { 163587244fe5SLaurent Pinchart dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret); 163687244fe5SLaurent Pinchart return ret; 163787244fe5SLaurent Pinchart } 163887244fe5SLaurent Pinchart 163987244fe5SLaurent Pinchart ret = rcar_dmac_init(dmac); 164087244fe5SLaurent Pinchart pm_runtime_put(&pdev->dev); 164187244fe5SLaurent Pinchart 164287244fe5SLaurent Pinchart if (ret) { 164387244fe5SLaurent Pinchart dev_err(&pdev->dev, "failed to reset device\n"); 164487244fe5SLaurent Pinchart goto error; 164587244fe5SLaurent Pinchart } 164687244fe5SLaurent Pinchart 164787244fe5SLaurent Pinchart /* Initialize the channels. */ 164887244fe5SLaurent Pinchart INIT_LIST_HEAD(&dmac->engine.channels); 164987244fe5SLaurent Pinchart 165087244fe5SLaurent Pinchart for (i = 0; i < dmac->n_channels; ++i) { 165187244fe5SLaurent Pinchart ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i], i); 165287244fe5SLaurent Pinchart if (ret < 0) 165387244fe5SLaurent Pinchart goto error; 165487244fe5SLaurent Pinchart } 165587244fe5SLaurent Pinchart 165687244fe5SLaurent Pinchart /* Register the DMAC as a DMA provider for DT. */ 165787244fe5SLaurent Pinchart ret = of_dma_controller_register(pdev->dev.of_node, rcar_dmac_of_xlate, 165887244fe5SLaurent Pinchart NULL); 165987244fe5SLaurent Pinchart if (ret < 0) 166087244fe5SLaurent Pinchart goto error; 166187244fe5SLaurent Pinchart 166287244fe5SLaurent Pinchart /* 166387244fe5SLaurent Pinchart * Register the DMA engine device. 166487244fe5SLaurent Pinchart * 166587244fe5SLaurent Pinchart * Default transfer size of 32 bytes requires 32-byte alignment. 166687244fe5SLaurent Pinchart */ 166787244fe5SLaurent Pinchart engine = &dmac->engine; 166887244fe5SLaurent Pinchart dma_cap_set(DMA_MEMCPY, engine->cap_mask); 166987244fe5SLaurent Pinchart dma_cap_set(DMA_SLAVE, engine->cap_mask); 167087244fe5SLaurent Pinchart 167187244fe5SLaurent Pinchart engine->dev = &pdev->dev; 167287244fe5SLaurent Pinchart engine->copy_align = ilog2(RCAR_DMAC_MEMCPY_XFER_SIZE); 167387244fe5SLaurent Pinchart 167487244fe5SLaurent Pinchart engine->src_addr_widths = widths; 167587244fe5SLaurent Pinchart engine->dst_addr_widths = widths; 167687244fe5SLaurent Pinchart engine->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM); 167787244fe5SLaurent Pinchart engine->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; 167887244fe5SLaurent Pinchart 167987244fe5SLaurent Pinchart engine->device_alloc_chan_resources = rcar_dmac_alloc_chan_resources; 168087244fe5SLaurent Pinchart engine->device_free_chan_resources = rcar_dmac_free_chan_resources; 168187244fe5SLaurent Pinchart engine->device_prep_dma_memcpy = rcar_dmac_prep_dma_memcpy; 168287244fe5SLaurent Pinchart engine->device_prep_slave_sg = rcar_dmac_prep_slave_sg; 168387244fe5SLaurent Pinchart engine->device_prep_dma_cyclic = rcar_dmac_prep_dma_cyclic; 168487244fe5SLaurent Pinchart engine->device_config = rcar_dmac_device_config; 168587244fe5SLaurent Pinchart engine->device_terminate_all = rcar_dmac_chan_terminate_all; 168687244fe5SLaurent Pinchart engine->device_tx_status = rcar_dmac_tx_status; 168787244fe5SLaurent Pinchart engine->device_issue_pending = rcar_dmac_issue_pending; 168887244fe5SLaurent Pinchart 168987244fe5SLaurent Pinchart ret = dma_async_device_register(engine); 169087244fe5SLaurent Pinchart if (ret < 0) 169187244fe5SLaurent Pinchart goto error; 169287244fe5SLaurent Pinchart 169387244fe5SLaurent Pinchart return 0; 169487244fe5SLaurent Pinchart 169587244fe5SLaurent Pinchart error: 169687244fe5SLaurent Pinchart of_dma_controller_free(pdev->dev.of_node); 169787244fe5SLaurent Pinchart pm_runtime_disable(&pdev->dev); 169887244fe5SLaurent Pinchart return ret; 169987244fe5SLaurent Pinchart } 170087244fe5SLaurent Pinchart 170187244fe5SLaurent Pinchart static int rcar_dmac_remove(struct platform_device *pdev) 170287244fe5SLaurent Pinchart { 170387244fe5SLaurent Pinchart struct rcar_dmac *dmac = platform_get_drvdata(pdev); 170487244fe5SLaurent Pinchart 170587244fe5SLaurent Pinchart of_dma_controller_free(pdev->dev.of_node); 170687244fe5SLaurent Pinchart dma_async_device_unregister(&dmac->engine); 170787244fe5SLaurent Pinchart 170887244fe5SLaurent Pinchart pm_runtime_disable(&pdev->dev); 170987244fe5SLaurent Pinchart 171087244fe5SLaurent Pinchart return 0; 171187244fe5SLaurent Pinchart } 171287244fe5SLaurent Pinchart 171387244fe5SLaurent Pinchart static void rcar_dmac_shutdown(struct platform_device *pdev) 171487244fe5SLaurent Pinchart { 171587244fe5SLaurent Pinchart struct rcar_dmac *dmac = platform_get_drvdata(pdev); 171687244fe5SLaurent Pinchart 171787244fe5SLaurent Pinchart rcar_dmac_stop(dmac); 171887244fe5SLaurent Pinchart } 171987244fe5SLaurent Pinchart 172087244fe5SLaurent Pinchart static const struct of_device_id rcar_dmac_of_ids[] = { 172187244fe5SLaurent Pinchart { .compatible = "renesas,rcar-dmac", }, 172287244fe5SLaurent Pinchart { /* Sentinel */ } 172387244fe5SLaurent Pinchart }; 172487244fe5SLaurent Pinchart MODULE_DEVICE_TABLE(of, rcar_dmac_of_ids); 172587244fe5SLaurent Pinchart 172687244fe5SLaurent Pinchart static struct platform_driver rcar_dmac_driver = { 172787244fe5SLaurent Pinchart .driver = { 172887244fe5SLaurent Pinchart .pm = &rcar_dmac_pm, 172987244fe5SLaurent Pinchart .name = "rcar-dmac", 173087244fe5SLaurent Pinchart .of_match_table = rcar_dmac_of_ids, 173187244fe5SLaurent Pinchart }, 173287244fe5SLaurent Pinchart .probe = rcar_dmac_probe, 173387244fe5SLaurent Pinchart .remove = rcar_dmac_remove, 173487244fe5SLaurent Pinchart .shutdown = rcar_dmac_shutdown, 173587244fe5SLaurent Pinchart }; 173687244fe5SLaurent Pinchart 173787244fe5SLaurent Pinchart module_platform_driver(rcar_dmac_driver); 173887244fe5SLaurent Pinchart 173987244fe5SLaurent Pinchart MODULE_DESCRIPTION("R-Car Gen2 DMA Controller Driver"); 174087244fe5SLaurent Pinchart MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>"); 174187244fe5SLaurent Pinchart MODULE_LICENSE("GPL v2"); 1742