1c01faacaSFabio Estevam // SPDX-License-Identifier: GPL-2.0+ 2c01faacaSFabio Estevam // 3c01faacaSFabio Estevam // drivers/dma/imx-sdma.c 4c01faacaSFabio Estevam // 5c01faacaSFabio Estevam // This file contains a driver for the Freescale Smart DMA engine 6c01faacaSFabio Estevam // 7c01faacaSFabio Estevam // Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de> 8c01faacaSFabio Estevam // 9c01faacaSFabio Estevam // Based on code from Freescale: 10c01faacaSFabio Estevam // 11c01faacaSFabio Estevam // Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved. 121ec1e82fSSascha Hauer 131ec1e82fSSascha Hauer #include <linux/init.h> 141d069bfaSMichael Olbrich #include <linux/iopoll.h> 15f8de8f4cSAxel Lin #include <linux/module.h> 161ec1e82fSSascha Hauer #include <linux/types.h> 170bbc1413SRichard Zhao #include <linux/bitops.h> 181ec1e82fSSascha Hauer #include <linux/mm.h> 191ec1e82fSSascha Hauer #include <linux/interrupt.h> 201ec1e82fSSascha Hauer #include <linux/clk.h> 212ccaef05SRichard Zhao #include <linux/delay.h> 221ec1e82fSSascha Hauer #include <linux/sched.h> 231ec1e82fSSascha Hauer #include <linux/semaphore.h> 241ec1e82fSSascha Hauer #include <linux/spinlock.h> 251ec1e82fSSascha Hauer #include <linux/device.h> 261ec1e82fSSascha Hauer #include <linux/dma-mapping.h> 271ec1e82fSSascha Hauer #include <linux/firmware.h> 281ec1e82fSSascha Hauer #include <linux/slab.h> 291ec1e82fSSascha Hauer #include <linux/platform_device.h> 301ec1e82fSSascha Hauer #include <linux/dmaengine.h> 31580975d7SShawn Guo #include <linux/of.h> 328391ecf4SShengjiu Wang #include <linux/of_address.h> 33580975d7SShawn Guo #include <linux/of_device.h> 349479e17cSShawn Guo #include <linux/of_dma.h> 351ec1e82fSSascha Hauer 361ec1e82fSSascha Hauer #include <asm/irq.h> 3782906b13SArnd Bergmann #include <linux/platform_data/dma-imx-sdma.h> 3882906b13SArnd Bergmann #include <linux/platform_data/dma-imx.h> 39d078cd1bSZidan Wang #include <linux/regmap.h> 40d078cd1bSZidan Wang #include <linux/mfd/syscon.h> 41d078cd1bSZidan Wang #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> 421ec1e82fSSascha Hauer 43d2ebfb33SRussell King - ARM Linux #include "dmaengine.h" 44d2ebfb33SRussell King - ARM Linux 451ec1e82fSSascha Hauer /* SDMA registers */ 461ec1e82fSSascha Hauer #define SDMA_H_C0PTR 0x000 471ec1e82fSSascha Hauer #define SDMA_H_INTR 0x004 481ec1e82fSSascha Hauer #define SDMA_H_STATSTOP 0x008 491ec1e82fSSascha Hauer #define SDMA_H_START 0x00c 501ec1e82fSSascha Hauer #define SDMA_H_EVTOVR 0x010 511ec1e82fSSascha Hauer #define SDMA_H_DSPOVR 0x014 521ec1e82fSSascha Hauer #define SDMA_H_HOSTOVR 0x018 531ec1e82fSSascha Hauer #define SDMA_H_EVTPEND 0x01c 541ec1e82fSSascha Hauer #define SDMA_H_DSPENBL 0x020 551ec1e82fSSascha Hauer #define SDMA_H_RESET 0x024 561ec1e82fSSascha Hauer #define SDMA_H_EVTERR 0x028 571ec1e82fSSascha Hauer #define SDMA_H_INTRMSK 0x02c 581ec1e82fSSascha Hauer #define SDMA_H_PSW 0x030 591ec1e82fSSascha Hauer #define SDMA_H_EVTERRDBG 0x034 601ec1e82fSSascha Hauer #define SDMA_H_CONFIG 0x038 611ec1e82fSSascha Hauer #define SDMA_ONCE_ENB 0x040 621ec1e82fSSascha Hauer #define SDMA_ONCE_DATA 0x044 631ec1e82fSSascha Hauer #define SDMA_ONCE_INSTR 0x048 641ec1e82fSSascha Hauer #define SDMA_ONCE_STAT 0x04c 651ec1e82fSSascha Hauer #define SDMA_ONCE_CMD 0x050 661ec1e82fSSascha Hauer #define SDMA_EVT_MIRROR 0x054 671ec1e82fSSascha Hauer #define SDMA_ILLINSTADDR 0x058 681ec1e82fSSascha Hauer #define SDMA_CHN0ADDR 0x05c 691ec1e82fSSascha Hauer #define SDMA_ONCE_RTB 0x060 701ec1e82fSSascha Hauer #define SDMA_XTRIG_CONF1 0x070 711ec1e82fSSascha Hauer #define SDMA_XTRIG_CONF2 0x074 7262550cd7SShawn Guo #define SDMA_CHNENBL0_IMX35 0x200 7362550cd7SShawn Guo #define SDMA_CHNENBL0_IMX31 0x080 741ec1e82fSSascha Hauer #define SDMA_CHNPRI_0 0x100 751ec1e82fSSascha Hauer 761ec1e82fSSascha Hauer /* 771ec1e82fSSascha Hauer * Buffer descriptor status values. 781ec1e82fSSascha Hauer */ 791ec1e82fSSascha Hauer #define BD_DONE 0x01 801ec1e82fSSascha Hauer #define BD_WRAP 0x02 811ec1e82fSSascha Hauer #define BD_CONT 0x04 821ec1e82fSSascha Hauer #define BD_INTR 0x08 831ec1e82fSSascha Hauer #define BD_RROR 0x10 841ec1e82fSSascha Hauer #define BD_LAST 0x20 851ec1e82fSSascha Hauer #define BD_EXTD 0x80 861ec1e82fSSascha Hauer 871ec1e82fSSascha Hauer /* 881ec1e82fSSascha Hauer * Data Node descriptor status values. 891ec1e82fSSascha Hauer */ 901ec1e82fSSascha Hauer #define DND_END_OF_FRAME 0x80 911ec1e82fSSascha Hauer #define DND_END_OF_XFER 0x40 921ec1e82fSSascha Hauer #define DND_DONE 0x20 931ec1e82fSSascha Hauer #define DND_UNUSED 0x01 941ec1e82fSSascha Hauer 951ec1e82fSSascha Hauer /* 961ec1e82fSSascha Hauer * IPCV2 descriptor status values. 971ec1e82fSSascha Hauer */ 981ec1e82fSSascha Hauer #define BD_IPCV2_END_OF_FRAME 0x40 991ec1e82fSSascha Hauer 1001ec1e82fSSascha Hauer #define IPCV2_MAX_NODES 50 1011ec1e82fSSascha Hauer /* 1021ec1e82fSSascha Hauer * Error bit set in the CCB status field by the SDMA, 1031ec1e82fSSascha Hauer * in setbd routine, in case of a transfer error 1041ec1e82fSSascha Hauer */ 1051ec1e82fSSascha Hauer #define DATA_ERROR 0x10000000 1061ec1e82fSSascha Hauer 1071ec1e82fSSascha Hauer /* 1081ec1e82fSSascha Hauer * Buffer descriptor commands. 1091ec1e82fSSascha Hauer */ 1101ec1e82fSSascha Hauer #define C0_ADDR 0x01 1111ec1e82fSSascha Hauer #define C0_LOAD 0x02 1121ec1e82fSSascha Hauer #define C0_DUMP 0x03 1131ec1e82fSSascha Hauer #define C0_SETCTX 0x07 1141ec1e82fSSascha Hauer #define C0_GETCTX 0x03 1151ec1e82fSSascha Hauer #define C0_SETDM 0x01 1161ec1e82fSSascha Hauer #define C0_SETPM 0x04 1171ec1e82fSSascha Hauer #define C0_GETDM 0x02 1181ec1e82fSSascha Hauer #define C0_GETPM 0x08 1191ec1e82fSSascha Hauer /* 1201ec1e82fSSascha Hauer * Change endianness indicator in the BD command field 1211ec1e82fSSascha Hauer */ 1221ec1e82fSSascha Hauer #define CHANGE_ENDIANNESS 0x80 1231ec1e82fSSascha Hauer 1241ec1e82fSSascha Hauer /* 1258391ecf4SShengjiu Wang * p_2_p watermark_level description 1268391ecf4SShengjiu Wang * Bits Name Description 1278391ecf4SShengjiu Wang * 0-7 Lower WML Lower watermark level 1288391ecf4SShengjiu Wang * 8 PS 1: Pad Swallowing 1298391ecf4SShengjiu Wang * 0: No Pad Swallowing 1308391ecf4SShengjiu Wang * 9 PA 1: Pad Adding 1318391ecf4SShengjiu Wang * 0: No Pad Adding 1328391ecf4SShengjiu Wang * 10 SPDIF If this bit is set both source 1338391ecf4SShengjiu Wang * and destination are on SPBA 1348391ecf4SShengjiu Wang * 11 Source Bit(SP) 1: Source on SPBA 1358391ecf4SShengjiu Wang * 0: Source on AIPS 1368391ecf4SShengjiu Wang * 12 Destination Bit(DP) 1: Destination on SPBA 1378391ecf4SShengjiu Wang * 0: Destination on AIPS 1388391ecf4SShengjiu Wang * 13-15 --------- MUST BE 0 1398391ecf4SShengjiu Wang * 16-23 Higher WML HWML 1408391ecf4SShengjiu Wang * 24-27 N Total number of samples after 1418391ecf4SShengjiu Wang * which Pad adding/Swallowing 1428391ecf4SShengjiu Wang * must be done. It must be odd. 1438391ecf4SShengjiu Wang * 28 Lower WML Event(LWE) SDMA events reg to check for 1448391ecf4SShengjiu Wang * LWML event mask 1458391ecf4SShengjiu Wang * 0: LWE in EVENTS register 1468391ecf4SShengjiu Wang * 1: LWE in EVENTS2 register 1478391ecf4SShengjiu Wang * 29 Higher WML Event(HWE) SDMA events reg to check for 1488391ecf4SShengjiu Wang * HWML event mask 1498391ecf4SShengjiu Wang * 0: HWE in EVENTS register 1508391ecf4SShengjiu Wang * 1: HWE in EVENTS2 register 1518391ecf4SShengjiu Wang * 30 --------- MUST BE 0 1528391ecf4SShengjiu Wang * 31 CONT 1: Amount of samples to be 1538391ecf4SShengjiu Wang * transferred is unknown and 1548391ecf4SShengjiu Wang * script will keep on 1558391ecf4SShengjiu Wang * transferring samples as long as 1568391ecf4SShengjiu Wang * both events are detected and 1578391ecf4SShengjiu Wang * script must be manually stopped 1588391ecf4SShengjiu Wang * by the application 1598391ecf4SShengjiu Wang * 0: The amount of samples to be 1608391ecf4SShengjiu Wang * transferred is equal to the 1618391ecf4SShengjiu Wang * count field of mode word 1628391ecf4SShengjiu Wang */ 1638391ecf4SShengjiu Wang #define SDMA_WATERMARK_LEVEL_LWML 0xFF 1648391ecf4SShengjiu Wang #define SDMA_WATERMARK_LEVEL_PS BIT(8) 1658391ecf4SShengjiu Wang #define SDMA_WATERMARK_LEVEL_PA BIT(9) 1668391ecf4SShengjiu Wang #define SDMA_WATERMARK_LEVEL_SPDIF BIT(10) 1678391ecf4SShengjiu Wang #define SDMA_WATERMARK_LEVEL_SP BIT(11) 1688391ecf4SShengjiu Wang #define SDMA_WATERMARK_LEVEL_DP BIT(12) 1698391ecf4SShengjiu Wang #define SDMA_WATERMARK_LEVEL_HWML (0xFF << 16) 1708391ecf4SShengjiu Wang #define SDMA_WATERMARK_LEVEL_LWE BIT(28) 1718391ecf4SShengjiu Wang #define SDMA_WATERMARK_LEVEL_HWE BIT(29) 1728391ecf4SShengjiu Wang #define SDMA_WATERMARK_LEVEL_CONT BIT(31) 1738391ecf4SShengjiu Wang 174f9d4a398SNicolin Chen #define SDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ 175f9d4a398SNicolin Chen BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ 176f9d4a398SNicolin Chen BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) 177f9d4a398SNicolin Chen 178f9d4a398SNicolin Chen #define SDMA_DMA_DIRECTIONS (BIT(DMA_DEV_TO_MEM) | \ 179f9d4a398SNicolin Chen BIT(DMA_MEM_TO_DEV) | \ 180f9d4a398SNicolin Chen BIT(DMA_DEV_TO_DEV)) 181f9d4a398SNicolin Chen 1828391ecf4SShengjiu Wang /* 1831ec1e82fSSascha Hauer * Mode/Count of data node descriptors - IPCv2 1841ec1e82fSSascha Hauer */ 1851ec1e82fSSascha Hauer struct sdma_mode_count { 1861ec1e82fSSascha Hauer u32 count : 16; /* size of the buffer pointed by this BD */ 1871ec1e82fSSascha Hauer u32 status : 8; /* E,R,I,C,W,D status bits stored here */ 188e4b75760SMartin Kaiser u32 command : 8; /* command mostly used for channel 0 */ 1891ec1e82fSSascha Hauer }; 1901ec1e82fSSascha Hauer 1911ec1e82fSSascha Hauer /* 1921ec1e82fSSascha Hauer * Buffer descriptor 1931ec1e82fSSascha Hauer */ 1941ec1e82fSSascha Hauer struct sdma_buffer_descriptor { 1951ec1e82fSSascha Hauer struct sdma_mode_count mode; 1961ec1e82fSSascha Hauer u32 buffer_addr; /* address of the buffer described */ 1971ec1e82fSSascha Hauer u32 ext_buffer_addr; /* extended buffer address */ 1981ec1e82fSSascha Hauer } __attribute__ ((packed)); 1991ec1e82fSSascha Hauer 2001ec1e82fSSascha Hauer /** 2011ec1e82fSSascha Hauer * struct sdma_channel_control - Channel control Block 2021ec1e82fSSascha Hauer * 2031ec1e82fSSascha Hauer * @current_bd_ptr current buffer descriptor processed 2041ec1e82fSSascha Hauer * @base_bd_ptr first element of buffer descriptor array 2051ec1e82fSSascha Hauer * @unused padding. The SDMA engine expects an array of 128 byte 2061ec1e82fSSascha Hauer * control blocks 2071ec1e82fSSascha Hauer */ 2081ec1e82fSSascha Hauer struct sdma_channel_control { 2091ec1e82fSSascha Hauer u32 current_bd_ptr; 2101ec1e82fSSascha Hauer u32 base_bd_ptr; 2111ec1e82fSSascha Hauer u32 unused[2]; 2121ec1e82fSSascha Hauer } __attribute__ ((packed)); 2131ec1e82fSSascha Hauer 2141ec1e82fSSascha Hauer /** 2151ec1e82fSSascha Hauer * struct sdma_state_registers - SDMA context for a channel 2161ec1e82fSSascha Hauer * 2171ec1e82fSSascha Hauer * @pc: program counter 2181ec1e82fSSascha Hauer * @t: test bit: status of arithmetic & test instruction 2191ec1e82fSSascha Hauer * @rpc: return program counter 2201ec1e82fSSascha Hauer * @sf: source fault while loading data 2211ec1e82fSSascha Hauer * @spc: loop start program counter 2221ec1e82fSSascha Hauer * @df: destination fault while storing data 2231ec1e82fSSascha Hauer * @epc: loop end program counter 2241ec1e82fSSascha Hauer * @lm: loop mode 2251ec1e82fSSascha Hauer */ 2261ec1e82fSSascha Hauer struct sdma_state_registers { 2271ec1e82fSSascha Hauer u32 pc :14; 2281ec1e82fSSascha Hauer u32 unused1: 1; 2291ec1e82fSSascha Hauer u32 t : 1; 2301ec1e82fSSascha Hauer u32 rpc :14; 2311ec1e82fSSascha Hauer u32 unused0: 1; 2321ec1e82fSSascha Hauer u32 sf : 1; 2331ec1e82fSSascha Hauer u32 spc :14; 2341ec1e82fSSascha Hauer u32 unused2: 1; 2351ec1e82fSSascha Hauer u32 df : 1; 2361ec1e82fSSascha Hauer u32 epc :14; 2371ec1e82fSSascha Hauer u32 lm : 2; 2381ec1e82fSSascha Hauer } __attribute__ ((packed)); 2391ec1e82fSSascha Hauer 2401ec1e82fSSascha Hauer /** 2411ec1e82fSSascha Hauer * struct sdma_context_data - sdma context specific to a channel 2421ec1e82fSSascha Hauer * 2431ec1e82fSSascha Hauer * @channel_state: channel state bits 2441ec1e82fSSascha Hauer * @gReg: general registers 2451ec1e82fSSascha Hauer * @mda: burst dma destination address register 2461ec1e82fSSascha Hauer * @msa: burst dma source address register 2471ec1e82fSSascha Hauer * @ms: burst dma status register 2481ec1e82fSSascha Hauer * @md: burst dma data register 2491ec1e82fSSascha Hauer * @pda: peripheral dma destination address register 2501ec1e82fSSascha Hauer * @psa: peripheral dma source address register 2511ec1e82fSSascha Hauer * @ps: peripheral dma status register 2521ec1e82fSSascha Hauer * @pd: peripheral dma data register 2531ec1e82fSSascha Hauer * @ca: CRC polynomial register 2541ec1e82fSSascha Hauer * @cs: CRC accumulator register 2551ec1e82fSSascha Hauer * @dda: dedicated core destination address register 2561ec1e82fSSascha Hauer * @dsa: dedicated core source address register 2571ec1e82fSSascha Hauer * @ds: dedicated core status register 2581ec1e82fSSascha Hauer * @dd: dedicated core data register 2591ec1e82fSSascha Hauer */ 2601ec1e82fSSascha Hauer struct sdma_context_data { 2611ec1e82fSSascha Hauer struct sdma_state_registers channel_state; 2621ec1e82fSSascha Hauer u32 gReg[8]; 2631ec1e82fSSascha Hauer u32 mda; 2641ec1e82fSSascha Hauer u32 msa; 2651ec1e82fSSascha Hauer u32 ms; 2661ec1e82fSSascha Hauer u32 md; 2671ec1e82fSSascha Hauer u32 pda; 2681ec1e82fSSascha Hauer u32 psa; 2691ec1e82fSSascha Hauer u32 ps; 2701ec1e82fSSascha Hauer u32 pd; 2711ec1e82fSSascha Hauer u32 ca; 2721ec1e82fSSascha Hauer u32 cs; 2731ec1e82fSSascha Hauer u32 dda; 2741ec1e82fSSascha Hauer u32 dsa; 2751ec1e82fSSascha Hauer u32 ds; 2761ec1e82fSSascha Hauer u32 dd; 2771ec1e82fSSascha Hauer u32 scratch0; 2781ec1e82fSSascha Hauer u32 scratch1; 2791ec1e82fSSascha Hauer u32 scratch2; 2801ec1e82fSSascha Hauer u32 scratch3; 2811ec1e82fSSascha Hauer u32 scratch4; 2821ec1e82fSSascha Hauer u32 scratch5; 2831ec1e82fSSascha Hauer u32 scratch6; 2841ec1e82fSSascha Hauer u32 scratch7; 2851ec1e82fSSascha Hauer } __attribute__ ((packed)); 2861ec1e82fSSascha Hauer 2871ec1e82fSSascha Hauer #define NUM_BD (int)(PAGE_SIZE / sizeof(struct sdma_buffer_descriptor)) 2881ec1e82fSSascha Hauer 2891ec1e82fSSascha Hauer struct sdma_engine; 2901ec1e82fSSascha Hauer 2911ec1e82fSSascha Hauer /** 292*76c33d27SSascha Hauer * struct sdma_desc - descriptor structor for one transfer 293*76c33d27SSascha Hauer * @vd descriptor for virt dma 294*76c33d27SSascha Hauer * @num_bd max NUM_BD. number of descriptors currently handling 295*76c33d27SSascha Hauer * @buf_tail ID of the buffer that was processed 296*76c33d27SSascha Hauer * @buf_ptail ID of the previous buffer that was processed 297*76c33d27SSascha Hauer * @period_len period length, used in cyclic. 298*76c33d27SSascha Hauer * @chn_real_count the real count updated from bd->mode.count 299*76c33d27SSascha Hauer * @chn_count the transfer count setuped 300*76c33d27SSascha Hauer * @sdmac sdma_channel pointer 301*76c33d27SSascha Hauer * @bd pointer of alloced bd 302*76c33d27SSascha Hauer */ 303*76c33d27SSascha Hauer struct sdma_desc { 304*76c33d27SSascha Hauer unsigned int num_bd; 305*76c33d27SSascha Hauer dma_addr_t bd_phys; 306*76c33d27SSascha Hauer unsigned int buf_tail; 307*76c33d27SSascha Hauer unsigned int buf_ptail; 308*76c33d27SSascha Hauer unsigned int period_len; 309*76c33d27SSascha Hauer unsigned int chn_real_count; 310*76c33d27SSascha Hauer unsigned int chn_count; 311*76c33d27SSascha Hauer struct sdma_channel *sdmac; 312*76c33d27SSascha Hauer struct sdma_buffer_descriptor *bd; 313*76c33d27SSascha Hauer }; 314*76c33d27SSascha Hauer 315*76c33d27SSascha Hauer /** 3161ec1e82fSSascha Hauer * struct sdma_channel - housekeeping for a SDMA channel 3171ec1e82fSSascha Hauer * 3181ec1e82fSSascha Hauer * @sdma pointer to the SDMA engine for this channel 31923889c63SSascha Hauer * @channel the channel number, matches dmaengine chan_id + 1 3201ec1e82fSSascha Hauer * @direction transfer type. Needed for setting SDMA script 3211ec1e82fSSascha Hauer * @peripheral_type Peripheral type. Needed for setting SDMA script 3221ec1e82fSSascha Hauer * @event_id0 aka dma request line 3231ec1e82fSSascha Hauer * @event_id1 for channels that use 2 events 3241ec1e82fSSascha Hauer * @word_size peripheral access size 3251ec1e82fSSascha Hauer */ 3261ec1e82fSSascha Hauer struct sdma_channel { 327*76c33d27SSascha Hauer struct sdma_desc *desc; 328*76c33d27SSascha Hauer struct sdma_desc _desc; 3291ec1e82fSSascha Hauer struct sdma_engine *sdma; 3301ec1e82fSSascha Hauer unsigned int channel; 331db8196dfSVinod Koul enum dma_transfer_direction direction; 3321ec1e82fSSascha Hauer enum sdma_peripheral_type peripheral_type; 3331ec1e82fSSascha Hauer unsigned int event_id0; 3341ec1e82fSSascha Hauer unsigned int event_id1; 3351ec1e82fSSascha Hauer enum dma_slave_buswidth word_size; 3361ec1e82fSSascha Hauer unsigned int pc_from_device, pc_to_device; 3378391ecf4SShengjiu Wang unsigned int device_to_device; 3381ec1e82fSSascha Hauer unsigned long flags; 3398391ecf4SShengjiu Wang dma_addr_t per_address, per_address2; 3400bbc1413SRichard Zhao unsigned long event_mask[2]; 3410bbc1413SRichard Zhao unsigned long watermark_level; 3421ec1e82fSSascha Hauer u32 shp_addr, per_addr; 3431ec1e82fSSascha Hauer struct dma_chan chan; 3441ec1e82fSSascha Hauer spinlock_t lock; 345*76c33d27SSascha Hauer struct dma_async_tx_descriptor txdesc; 3461ec1e82fSSascha Hauer enum dma_status status; 347abd9ccc8SHuang Shijie struct tasklet_struct tasklet; 3480b351865SNicolin Chen struct imx_dma_data data; 3492746e2c3SThierry Bultel bool enabled; 3501ec1e82fSSascha Hauer }; 3511ec1e82fSSascha Hauer 3520bbc1413SRichard Zhao #define IMX_DMA_SG_LOOP BIT(0) 3531ec1e82fSSascha Hauer 3541ec1e82fSSascha Hauer #define MAX_DMA_CHANNELS 32 3551ec1e82fSSascha Hauer #define MXC_SDMA_DEFAULT_PRIORITY 1 3561ec1e82fSSascha Hauer #define MXC_SDMA_MIN_PRIORITY 1 3571ec1e82fSSascha Hauer #define MXC_SDMA_MAX_PRIORITY 7 3581ec1e82fSSascha Hauer 3591ec1e82fSSascha Hauer #define SDMA_FIRMWARE_MAGIC 0x414d4453 3601ec1e82fSSascha Hauer 3611ec1e82fSSascha Hauer /** 3621ec1e82fSSascha Hauer * struct sdma_firmware_header - Layout of the firmware image 3631ec1e82fSSascha Hauer * 3641ec1e82fSSascha Hauer * @magic "SDMA" 3651ec1e82fSSascha Hauer * @version_major increased whenever layout of struct sdma_script_start_addrs 3661ec1e82fSSascha Hauer * changes. 3671ec1e82fSSascha Hauer * @version_minor firmware minor version (for binary compatible changes) 3681ec1e82fSSascha Hauer * @script_addrs_start offset of struct sdma_script_start_addrs in this image 3691ec1e82fSSascha Hauer * @num_script_addrs Number of script addresses in this image 3701ec1e82fSSascha Hauer * @ram_code_start offset of SDMA ram image in this firmware image 3711ec1e82fSSascha Hauer * @ram_code_size size of SDMA ram image 3721ec1e82fSSascha Hauer * @script_addrs Stores the start address of the SDMA scripts 3731ec1e82fSSascha Hauer * (in SDMA memory space) 3741ec1e82fSSascha Hauer */ 3751ec1e82fSSascha Hauer struct sdma_firmware_header { 3761ec1e82fSSascha Hauer u32 magic; 3771ec1e82fSSascha Hauer u32 version_major; 3781ec1e82fSSascha Hauer u32 version_minor; 3791ec1e82fSSascha Hauer u32 script_addrs_start; 3801ec1e82fSSascha Hauer u32 num_script_addrs; 3811ec1e82fSSascha Hauer u32 ram_code_start; 3821ec1e82fSSascha Hauer u32 ram_code_size; 3831ec1e82fSSascha Hauer }; 3841ec1e82fSSascha Hauer 38517bba72fSSascha Hauer struct sdma_driver_data { 38617bba72fSSascha Hauer int chnenbl0; 38717bba72fSSascha Hauer int num_events; 388dcfec3c0SSascha Hauer struct sdma_script_start_addrs *script_addrs; 38962550cd7SShawn Guo }; 39062550cd7SShawn Guo 3911ec1e82fSSascha Hauer struct sdma_engine { 3921ec1e82fSSascha Hauer struct device *dev; 393b9b3f82fSSascha Hauer struct device_dma_parameters dma_parms; 3941ec1e82fSSascha Hauer struct sdma_channel channel[MAX_DMA_CHANNELS]; 3951ec1e82fSSascha Hauer struct sdma_channel_control *channel_control; 3961ec1e82fSSascha Hauer void __iomem *regs; 3971ec1e82fSSascha Hauer struct sdma_context_data *context; 3981ec1e82fSSascha Hauer dma_addr_t context_phys; 3991ec1e82fSSascha Hauer struct dma_device dma_device; 4007560e3f3SSascha Hauer struct clk *clk_ipg; 4017560e3f3SSascha Hauer struct clk *clk_ahb; 4022ccaef05SRichard Zhao spinlock_t channel_0_lock; 403cd72b846SNicolin Chen u32 script_number; 4041ec1e82fSSascha Hauer struct sdma_script_start_addrs *script_addrs; 40517bba72fSSascha Hauer const struct sdma_driver_data *drvdata; 4068391ecf4SShengjiu Wang u32 spba_start_addr; 4078391ecf4SShengjiu Wang u32 spba_end_addr; 4085bb9dbb5SVinod Koul unsigned int irq; 409*76c33d27SSascha Hauer dma_addr_t bd0_phys; 410*76c33d27SSascha Hauer struct sdma_buffer_descriptor *bd0; 41117bba72fSSascha Hauer }; 41217bba72fSSascha Hauer 413e9fd58deSFabio Estevam static struct sdma_driver_data sdma_imx31 = { 41417bba72fSSascha Hauer .chnenbl0 = SDMA_CHNENBL0_IMX31, 41517bba72fSSascha Hauer .num_events = 32, 41617bba72fSSascha Hauer }; 41717bba72fSSascha Hauer 418dcfec3c0SSascha Hauer static struct sdma_script_start_addrs sdma_script_imx25 = { 419dcfec3c0SSascha Hauer .ap_2_ap_addr = 729, 420dcfec3c0SSascha Hauer .uart_2_mcu_addr = 904, 421dcfec3c0SSascha Hauer .per_2_app_addr = 1255, 422dcfec3c0SSascha Hauer .mcu_2_app_addr = 834, 423dcfec3c0SSascha Hauer .uartsh_2_mcu_addr = 1120, 424dcfec3c0SSascha Hauer .per_2_shp_addr = 1329, 425dcfec3c0SSascha Hauer .mcu_2_shp_addr = 1048, 426dcfec3c0SSascha Hauer .ata_2_mcu_addr = 1560, 427dcfec3c0SSascha Hauer .mcu_2_ata_addr = 1479, 428dcfec3c0SSascha Hauer .app_2_per_addr = 1189, 429dcfec3c0SSascha Hauer .app_2_mcu_addr = 770, 430dcfec3c0SSascha Hauer .shp_2_per_addr = 1407, 431dcfec3c0SSascha Hauer .shp_2_mcu_addr = 979, 432dcfec3c0SSascha Hauer }; 433dcfec3c0SSascha Hauer 434e9fd58deSFabio Estevam static struct sdma_driver_data sdma_imx25 = { 435dcfec3c0SSascha Hauer .chnenbl0 = SDMA_CHNENBL0_IMX35, 436dcfec3c0SSascha Hauer .num_events = 48, 437dcfec3c0SSascha Hauer .script_addrs = &sdma_script_imx25, 438dcfec3c0SSascha Hauer }; 439dcfec3c0SSascha Hauer 440e9fd58deSFabio Estevam static struct sdma_driver_data sdma_imx35 = { 44117bba72fSSascha Hauer .chnenbl0 = SDMA_CHNENBL0_IMX35, 44217bba72fSSascha Hauer .num_events = 48, 4431ec1e82fSSascha Hauer }; 4441ec1e82fSSascha Hauer 445dcfec3c0SSascha Hauer static struct sdma_script_start_addrs sdma_script_imx51 = { 446dcfec3c0SSascha Hauer .ap_2_ap_addr = 642, 447dcfec3c0SSascha Hauer .uart_2_mcu_addr = 817, 448dcfec3c0SSascha Hauer .mcu_2_app_addr = 747, 449dcfec3c0SSascha Hauer .mcu_2_shp_addr = 961, 450dcfec3c0SSascha Hauer .ata_2_mcu_addr = 1473, 451dcfec3c0SSascha Hauer .mcu_2_ata_addr = 1392, 452dcfec3c0SSascha Hauer .app_2_per_addr = 1033, 453dcfec3c0SSascha Hauer .app_2_mcu_addr = 683, 454dcfec3c0SSascha Hauer .shp_2_per_addr = 1251, 455dcfec3c0SSascha Hauer .shp_2_mcu_addr = 892, 456dcfec3c0SSascha Hauer }; 457dcfec3c0SSascha Hauer 458e9fd58deSFabio Estevam static struct sdma_driver_data sdma_imx51 = { 459dcfec3c0SSascha Hauer .chnenbl0 = SDMA_CHNENBL0_IMX35, 460dcfec3c0SSascha Hauer .num_events = 48, 461dcfec3c0SSascha Hauer .script_addrs = &sdma_script_imx51, 462dcfec3c0SSascha Hauer }; 463dcfec3c0SSascha Hauer 464dcfec3c0SSascha Hauer static struct sdma_script_start_addrs sdma_script_imx53 = { 465dcfec3c0SSascha Hauer .ap_2_ap_addr = 642, 466dcfec3c0SSascha Hauer .app_2_mcu_addr = 683, 467dcfec3c0SSascha Hauer .mcu_2_app_addr = 747, 468dcfec3c0SSascha Hauer .uart_2_mcu_addr = 817, 469dcfec3c0SSascha Hauer .shp_2_mcu_addr = 891, 470dcfec3c0SSascha Hauer .mcu_2_shp_addr = 960, 471dcfec3c0SSascha Hauer .uartsh_2_mcu_addr = 1032, 472dcfec3c0SSascha Hauer .spdif_2_mcu_addr = 1100, 473dcfec3c0SSascha Hauer .mcu_2_spdif_addr = 1134, 474dcfec3c0SSascha Hauer .firi_2_mcu_addr = 1193, 475dcfec3c0SSascha Hauer .mcu_2_firi_addr = 1290, 476dcfec3c0SSascha Hauer }; 477dcfec3c0SSascha Hauer 478e9fd58deSFabio Estevam static struct sdma_driver_data sdma_imx53 = { 479dcfec3c0SSascha Hauer .chnenbl0 = SDMA_CHNENBL0_IMX35, 480dcfec3c0SSascha Hauer .num_events = 48, 481dcfec3c0SSascha Hauer .script_addrs = &sdma_script_imx53, 482dcfec3c0SSascha Hauer }; 483dcfec3c0SSascha Hauer 484dcfec3c0SSascha Hauer static struct sdma_script_start_addrs sdma_script_imx6q = { 485dcfec3c0SSascha Hauer .ap_2_ap_addr = 642, 486dcfec3c0SSascha Hauer .uart_2_mcu_addr = 817, 487dcfec3c0SSascha Hauer .mcu_2_app_addr = 747, 488dcfec3c0SSascha Hauer .per_2_per_addr = 6331, 489dcfec3c0SSascha Hauer .uartsh_2_mcu_addr = 1032, 490dcfec3c0SSascha Hauer .mcu_2_shp_addr = 960, 491dcfec3c0SSascha Hauer .app_2_mcu_addr = 683, 492dcfec3c0SSascha Hauer .shp_2_mcu_addr = 891, 493dcfec3c0SSascha Hauer .spdif_2_mcu_addr = 1100, 494dcfec3c0SSascha Hauer .mcu_2_spdif_addr = 1134, 495dcfec3c0SSascha Hauer }; 496dcfec3c0SSascha Hauer 497e9fd58deSFabio Estevam static struct sdma_driver_data sdma_imx6q = { 498dcfec3c0SSascha Hauer .chnenbl0 = SDMA_CHNENBL0_IMX35, 499dcfec3c0SSascha Hauer .num_events = 48, 500dcfec3c0SSascha Hauer .script_addrs = &sdma_script_imx6q, 501dcfec3c0SSascha Hauer }; 502dcfec3c0SSascha Hauer 503b7d2648aSFabio Estevam static struct sdma_script_start_addrs sdma_script_imx7d = { 504b7d2648aSFabio Estevam .ap_2_ap_addr = 644, 505b7d2648aSFabio Estevam .uart_2_mcu_addr = 819, 506b7d2648aSFabio Estevam .mcu_2_app_addr = 749, 507b7d2648aSFabio Estevam .uartsh_2_mcu_addr = 1034, 508b7d2648aSFabio Estevam .mcu_2_shp_addr = 962, 509b7d2648aSFabio Estevam .app_2_mcu_addr = 685, 510b7d2648aSFabio Estevam .shp_2_mcu_addr = 893, 511b7d2648aSFabio Estevam .spdif_2_mcu_addr = 1102, 512b7d2648aSFabio Estevam .mcu_2_spdif_addr = 1136, 513b7d2648aSFabio Estevam }; 514b7d2648aSFabio Estevam 515b7d2648aSFabio Estevam static struct sdma_driver_data sdma_imx7d = { 516b7d2648aSFabio Estevam .chnenbl0 = SDMA_CHNENBL0_IMX35, 517b7d2648aSFabio Estevam .num_events = 48, 518b7d2648aSFabio Estevam .script_addrs = &sdma_script_imx7d, 519b7d2648aSFabio Estevam }; 520b7d2648aSFabio Estevam 521afe7cdedSKrzysztof Kozlowski static const struct platform_device_id sdma_devtypes[] = { 52262550cd7SShawn Guo { 523dcfec3c0SSascha Hauer .name = "imx25-sdma", 524dcfec3c0SSascha Hauer .driver_data = (unsigned long)&sdma_imx25, 525dcfec3c0SSascha Hauer }, { 52662550cd7SShawn Guo .name = "imx31-sdma", 52717bba72fSSascha Hauer .driver_data = (unsigned long)&sdma_imx31, 52862550cd7SShawn Guo }, { 52962550cd7SShawn Guo .name = "imx35-sdma", 53017bba72fSSascha Hauer .driver_data = (unsigned long)&sdma_imx35, 53162550cd7SShawn Guo }, { 532dcfec3c0SSascha Hauer .name = "imx51-sdma", 533dcfec3c0SSascha Hauer .driver_data = (unsigned long)&sdma_imx51, 534dcfec3c0SSascha Hauer }, { 535dcfec3c0SSascha Hauer .name = "imx53-sdma", 536dcfec3c0SSascha Hauer .driver_data = (unsigned long)&sdma_imx53, 537dcfec3c0SSascha Hauer }, { 538dcfec3c0SSascha Hauer .name = "imx6q-sdma", 539dcfec3c0SSascha Hauer .driver_data = (unsigned long)&sdma_imx6q, 540dcfec3c0SSascha Hauer }, { 541b7d2648aSFabio Estevam .name = "imx7d-sdma", 542b7d2648aSFabio Estevam .driver_data = (unsigned long)&sdma_imx7d, 543b7d2648aSFabio Estevam }, { 54462550cd7SShawn Guo /* sentinel */ 54562550cd7SShawn Guo } 54662550cd7SShawn Guo }; 54762550cd7SShawn Guo MODULE_DEVICE_TABLE(platform, sdma_devtypes); 54862550cd7SShawn Guo 549580975d7SShawn Guo static const struct of_device_id sdma_dt_ids[] = { 550dcfec3c0SSascha Hauer { .compatible = "fsl,imx6q-sdma", .data = &sdma_imx6q, }, 551dcfec3c0SSascha Hauer { .compatible = "fsl,imx53-sdma", .data = &sdma_imx53, }, 552dcfec3c0SSascha Hauer { .compatible = "fsl,imx51-sdma", .data = &sdma_imx51, }, 55317bba72fSSascha Hauer { .compatible = "fsl,imx35-sdma", .data = &sdma_imx35, }, 554dcfec3c0SSascha Hauer { .compatible = "fsl,imx31-sdma", .data = &sdma_imx31, }, 55563edea16SMarkus Pargmann { .compatible = "fsl,imx25-sdma", .data = &sdma_imx25, }, 556b7d2648aSFabio Estevam { .compatible = "fsl,imx7d-sdma", .data = &sdma_imx7d, }, 557580975d7SShawn Guo { /* sentinel */ } 558580975d7SShawn Guo }; 559580975d7SShawn Guo MODULE_DEVICE_TABLE(of, sdma_dt_ids); 560580975d7SShawn Guo 5610bbc1413SRichard Zhao #define SDMA_H_CONFIG_DSPDMA BIT(12) /* indicates if the DSPDMA is used */ 5620bbc1413SRichard Zhao #define SDMA_H_CONFIG_RTD_PINS BIT(11) /* indicates if Real-Time Debug pins are enabled */ 5630bbc1413SRichard Zhao #define SDMA_H_CONFIG_ACR BIT(4) /* indicates if AHB freq /core freq = 2 or 1 */ 5641ec1e82fSSascha Hauer #define SDMA_H_CONFIG_CSM (3) /* indicates which context switch mode is selected*/ 5651ec1e82fSSascha Hauer 5661ec1e82fSSascha Hauer static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event) 5671ec1e82fSSascha Hauer { 56817bba72fSSascha Hauer u32 chnenbl0 = sdma->drvdata->chnenbl0; 5691ec1e82fSSascha Hauer return chnenbl0 + event * 4; 5701ec1e82fSSascha Hauer } 5711ec1e82fSSascha Hauer 5721ec1e82fSSascha Hauer static int sdma_config_ownership(struct sdma_channel *sdmac, 5731ec1e82fSSascha Hauer bool event_override, bool mcu_override, bool dsp_override) 5741ec1e82fSSascha Hauer { 5751ec1e82fSSascha Hauer struct sdma_engine *sdma = sdmac->sdma; 5761ec1e82fSSascha Hauer int channel = sdmac->channel; 5770bbc1413SRichard Zhao unsigned long evt, mcu, dsp; 5781ec1e82fSSascha Hauer 5791ec1e82fSSascha Hauer if (event_override && mcu_override && dsp_override) 5801ec1e82fSSascha Hauer return -EINVAL; 5811ec1e82fSSascha Hauer 582c4b56857SRichard Zhao evt = readl_relaxed(sdma->regs + SDMA_H_EVTOVR); 583c4b56857SRichard Zhao mcu = readl_relaxed(sdma->regs + SDMA_H_HOSTOVR); 584c4b56857SRichard Zhao dsp = readl_relaxed(sdma->regs + SDMA_H_DSPOVR); 5851ec1e82fSSascha Hauer 5861ec1e82fSSascha Hauer if (dsp_override) 5870bbc1413SRichard Zhao __clear_bit(channel, &dsp); 5881ec1e82fSSascha Hauer else 5890bbc1413SRichard Zhao __set_bit(channel, &dsp); 5901ec1e82fSSascha Hauer 5911ec1e82fSSascha Hauer if (event_override) 5920bbc1413SRichard Zhao __clear_bit(channel, &evt); 5931ec1e82fSSascha Hauer else 5940bbc1413SRichard Zhao __set_bit(channel, &evt); 5951ec1e82fSSascha Hauer 5961ec1e82fSSascha Hauer if (mcu_override) 5970bbc1413SRichard Zhao __clear_bit(channel, &mcu); 5981ec1e82fSSascha Hauer else 5990bbc1413SRichard Zhao __set_bit(channel, &mcu); 6001ec1e82fSSascha Hauer 601c4b56857SRichard Zhao writel_relaxed(evt, sdma->regs + SDMA_H_EVTOVR); 602c4b56857SRichard Zhao writel_relaxed(mcu, sdma->regs + SDMA_H_HOSTOVR); 603c4b56857SRichard Zhao writel_relaxed(dsp, sdma->regs + SDMA_H_DSPOVR); 6041ec1e82fSSascha Hauer 6051ec1e82fSSascha Hauer return 0; 6061ec1e82fSSascha Hauer } 6071ec1e82fSSascha Hauer 608b9a59166SRichard Zhao static void sdma_enable_channel(struct sdma_engine *sdma, int channel) 609b9a59166SRichard Zhao { 6102746e2c3SThierry Bultel unsigned long flags; 6112746e2c3SThierry Bultel struct sdma_channel *sdmac = &sdma->channel[channel]; 6122746e2c3SThierry Bultel 6130bbc1413SRichard Zhao writel(BIT(channel), sdma->regs + SDMA_H_START); 6142746e2c3SThierry Bultel 6152746e2c3SThierry Bultel spin_lock_irqsave(&sdmac->lock, flags); 6162746e2c3SThierry Bultel sdmac->enabled = true; 6172746e2c3SThierry Bultel spin_unlock_irqrestore(&sdmac->lock, flags); 618b9a59166SRichard Zhao } 619b9a59166SRichard Zhao 6201ec1e82fSSascha Hauer /* 6212ccaef05SRichard Zhao * sdma_run_channel0 - run a channel and wait till it's done 6221ec1e82fSSascha Hauer */ 6232ccaef05SRichard Zhao static int sdma_run_channel0(struct sdma_engine *sdma) 6241ec1e82fSSascha Hauer { 6251ec1e82fSSascha Hauer int ret; 6261d069bfaSMichael Olbrich u32 reg; 6271ec1e82fSSascha Hauer 6282ccaef05SRichard Zhao sdma_enable_channel(sdma, 0); 6291ec1e82fSSascha Hauer 6301d069bfaSMichael Olbrich ret = readl_relaxed_poll_timeout_atomic(sdma->regs + SDMA_H_STATSTOP, 6311d069bfaSMichael Olbrich reg, !(reg & 1), 1, 500); 6321d069bfaSMichael Olbrich if (ret) 6332ccaef05SRichard Zhao dev_err(sdma->dev, "Timeout waiting for CH0 ready\n"); 6341ec1e82fSSascha Hauer 635855832e4SRobin Gong /* Set bits of CONFIG register with dynamic context switching */ 636855832e4SRobin Gong if (readl(sdma->regs + SDMA_H_CONFIG) == 0) 637855832e4SRobin Gong writel_relaxed(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG); 638855832e4SRobin Gong 6391d069bfaSMichael Olbrich return ret; 6401ec1e82fSSascha Hauer } 6411ec1e82fSSascha Hauer 6421ec1e82fSSascha Hauer static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size, 6431ec1e82fSSascha Hauer u32 address) 6441ec1e82fSSascha Hauer { 645*76c33d27SSascha Hauer struct sdma_buffer_descriptor *bd0 = sdma->bd0; 6461ec1e82fSSascha Hauer void *buf_virt; 6471ec1e82fSSascha Hauer dma_addr_t buf_phys; 6481ec1e82fSSascha Hauer int ret; 6492ccaef05SRichard Zhao unsigned long flags; 65073eab978SSascha Hauer 6511ec1e82fSSascha Hauer buf_virt = dma_alloc_coherent(NULL, 6521ec1e82fSSascha Hauer size, 6531ec1e82fSSascha Hauer &buf_phys, GFP_KERNEL); 65473eab978SSascha Hauer if (!buf_virt) { 6552ccaef05SRichard Zhao return -ENOMEM; 65673eab978SSascha Hauer } 6571ec1e82fSSascha Hauer 6582ccaef05SRichard Zhao spin_lock_irqsave(&sdma->channel_0_lock, flags); 6592ccaef05SRichard Zhao 6601ec1e82fSSascha Hauer bd0->mode.command = C0_SETPM; 6611ec1e82fSSascha Hauer bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD; 6621ec1e82fSSascha Hauer bd0->mode.count = size / 2; 6631ec1e82fSSascha Hauer bd0->buffer_addr = buf_phys; 6641ec1e82fSSascha Hauer bd0->ext_buffer_addr = address; 6651ec1e82fSSascha Hauer 6661ec1e82fSSascha Hauer memcpy(buf_virt, buf, size); 6671ec1e82fSSascha Hauer 6682ccaef05SRichard Zhao ret = sdma_run_channel0(sdma); 6692ccaef05SRichard Zhao 6702ccaef05SRichard Zhao spin_unlock_irqrestore(&sdma->channel_0_lock, flags); 6711ec1e82fSSascha Hauer 6721ec1e82fSSascha Hauer dma_free_coherent(NULL, size, buf_virt, buf_phys); 6731ec1e82fSSascha Hauer 6741ec1e82fSSascha Hauer return ret; 6751ec1e82fSSascha Hauer } 6761ec1e82fSSascha Hauer 6771ec1e82fSSascha Hauer static void sdma_event_enable(struct sdma_channel *sdmac, unsigned int event) 6781ec1e82fSSascha Hauer { 6791ec1e82fSSascha Hauer struct sdma_engine *sdma = sdmac->sdma; 6801ec1e82fSSascha Hauer int channel = sdmac->channel; 6810bbc1413SRichard Zhao unsigned long val; 6821ec1e82fSSascha Hauer u32 chnenbl = chnenbl_ofs(sdma, event); 6831ec1e82fSSascha Hauer 684c4b56857SRichard Zhao val = readl_relaxed(sdma->regs + chnenbl); 6850bbc1413SRichard Zhao __set_bit(channel, &val); 686c4b56857SRichard Zhao writel_relaxed(val, sdma->regs + chnenbl); 6871ec1e82fSSascha Hauer } 6881ec1e82fSSascha Hauer 6891ec1e82fSSascha Hauer static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event) 6901ec1e82fSSascha Hauer { 6911ec1e82fSSascha Hauer struct sdma_engine *sdma = sdmac->sdma; 6921ec1e82fSSascha Hauer int channel = sdmac->channel; 6931ec1e82fSSascha Hauer u32 chnenbl = chnenbl_ofs(sdma, event); 6940bbc1413SRichard Zhao unsigned long val; 6951ec1e82fSSascha Hauer 696c4b56857SRichard Zhao val = readl_relaxed(sdma->regs + chnenbl); 6970bbc1413SRichard Zhao __clear_bit(channel, &val); 698c4b56857SRichard Zhao writel_relaxed(val, sdma->regs + chnenbl); 6991ec1e82fSSascha Hauer } 7001ec1e82fSSascha Hauer 701d1a792f3SRussell King - ARM Linux static void sdma_update_channel_loop(struct sdma_channel *sdmac) 702d1a792f3SRussell King - ARM Linux { 7031ec1e82fSSascha Hauer struct sdma_buffer_descriptor *bd; 7045881826dSNandor Han int error = 0; 7055881826dSNandor Han enum dma_status old_status = sdmac->status; 7062746e2c3SThierry Bultel unsigned long flags; 7072746e2c3SThierry Bultel 7082746e2c3SThierry Bultel spin_lock_irqsave(&sdmac->lock, flags); 7092746e2c3SThierry Bultel if (!sdmac->enabled) { 7102746e2c3SThierry Bultel spin_unlock_irqrestore(&sdmac->lock, flags); 7112746e2c3SThierry Bultel return; 7122746e2c3SThierry Bultel } 7132746e2c3SThierry Bultel spin_unlock_irqrestore(&sdmac->lock, flags); 7141ec1e82fSSascha Hauer 7151ec1e82fSSascha Hauer /* 7161ec1e82fSSascha Hauer * loop mode. Iterate over descriptors, re-setup them and 7171ec1e82fSSascha Hauer * call callback function. 7181ec1e82fSSascha Hauer */ 7191ec1e82fSSascha Hauer while (1) { 720*76c33d27SSascha Hauer struct sdma_desc *desc = sdmac->desc; 721*76c33d27SSascha Hauer 722*76c33d27SSascha Hauer bd = &desc->bd[desc->buf_tail]; 7231ec1e82fSSascha Hauer 7241ec1e82fSSascha Hauer if (bd->mode.status & BD_DONE) 7251ec1e82fSSascha Hauer break; 7261ec1e82fSSascha Hauer 7275881826dSNandor Han if (bd->mode.status & BD_RROR) { 7285881826dSNandor Han bd->mode.status &= ~BD_RROR; 7291ec1e82fSSascha Hauer sdmac->status = DMA_ERROR; 7305881826dSNandor Han error = -EIO; 7315881826dSNandor Han } 7321ec1e82fSSascha Hauer 7335881826dSNandor Han /* 7345881826dSNandor Han * We use bd->mode.count to calculate the residue, since contains 7355881826dSNandor Han * the number of bytes present in the current buffer descriptor. 7365881826dSNandor Han */ 7375881826dSNandor Han 738*76c33d27SSascha Hauer desc->chn_real_count = bd->mode.count; 7391ec1e82fSSascha Hauer bd->mode.status |= BD_DONE; 740*76c33d27SSascha Hauer bd->mode.count = desc->period_len; 741*76c33d27SSascha Hauer desc->buf_ptail = desc->buf_tail; 742*76c33d27SSascha Hauer desc->buf_tail = (desc->buf_tail + 1) % desc->num_bd; 74315f30f51SNandor Han 74415f30f51SNandor Han /* 74515f30f51SNandor Han * The callback is called from the interrupt context in order 74615f30f51SNandor Han * to reduce latency and to avoid the risk of altering the 74715f30f51SNandor Han * SDMA transaction status by the time the client tasklet is 74815f30f51SNandor Han * executed. 74915f30f51SNandor Han */ 75015f30f51SNandor Han 751*76c33d27SSascha Hauer dmaengine_desc_get_callback_invoke(&sdmac->txdesc, NULL); 75215f30f51SNandor Han 7535881826dSNandor Han if (error) 7545881826dSNandor Han sdmac->status = old_status; 7551ec1e82fSSascha Hauer } 7561ec1e82fSSascha Hauer } 7571ec1e82fSSascha Hauer 75815f30f51SNandor Han static void mxc_sdma_handle_channel_normal(unsigned long data) 7591ec1e82fSSascha Hauer { 76015f30f51SNandor Han struct sdma_channel *sdmac = (struct sdma_channel *) data; 7611ec1e82fSSascha Hauer struct sdma_buffer_descriptor *bd; 7621ec1e82fSSascha Hauer int i, error = 0; 7631ec1e82fSSascha Hauer 764*76c33d27SSascha Hauer sdmac->desc->chn_real_count = 0; 7651ec1e82fSSascha Hauer /* 7661ec1e82fSSascha Hauer * non loop mode. Iterate over all descriptors, collect 7671ec1e82fSSascha Hauer * errors and call callback function 7681ec1e82fSSascha Hauer */ 769*76c33d27SSascha Hauer for (i = 0; i < sdmac->desc->num_bd; i++) { 770*76c33d27SSascha Hauer bd = &sdmac->desc->bd[i]; 7711ec1e82fSSascha Hauer 7721ec1e82fSSascha Hauer if (bd->mode.status & (BD_DONE | BD_RROR)) 7731ec1e82fSSascha Hauer error = -EIO; 774*76c33d27SSascha Hauer sdmac->desc->chn_real_count += bd->mode.count; 7751ec1e82fSSascha Hauer } 7761ec1e82fSSascha Hauer 7771ec1e82fSSascha Hauer if (error) 7781ec1e82fSSascha Hauer sdmac->status = DMA_ERROR; 7791ec1e82fSSascha Hauer else 780409bff6aSVinod Koul sdmac->status = DMA_COMPLETE; 7811ec1e82fSSascha Hauer 782*76c33d27SSascha Hauer dma_cookie_complete(&sdmac->txdesc); 78348dc77e2SDave Jiang 784*76c33d27SSascha Hauer dmaengine_desc_get_callback_invoke(&sdmac->txdesc, NULL); 7851ec1e82fSSascha Hauer } 7861ec1e82fSSascha Hauer 7871ec1e82fSSascha Hauer static irqreturn_t sdma_int_handler(int irq, void *dev_id) 7881ec1e82fSSascha Hauer { 7891ec1e82fSSascha Hauer struct sdma_engine *sdma = dev_id; 7900bbc1413SRichard Zhao unsigned long stat; 7911ec1e82fSSascha Hauer 792c4b56857SRichard Zhao stat = readl_relaxed(sdma->regs + SDMA_H_INTR); 793c4b56857SRichard Zhao writel_relaxed(stat, sdma->regs + SDMA_H_INTR); 7941d069bfaSMichael Olbrich /* channel 0 is special and not handled here, see run_channel0() */ 7951d069bfaSMichael Olbrich stat &= ~1; 7961ec1e82fSSascha Hauer 7971ec1e82fSSascha Hauer while (stat) { 7981ec1e82fSSascha Hauer int channel = fls(stat) - 1; 7991ec1e82fSSascha Hauer struct sdma_channel *sdmac = &sdma->channel[channel]; 8001ec1e82fSSascha Hauer 801d1a792f3SRussell King - ARM Linux if (sdmac->flags & IMX_DMA_SG_LOOP) 802d1a792f3SRussell King - ARM Linux sdma_update_channel_loop(sdmac); 80315f30f51SNandor Han else 804abd9ccc8SHuang Shijie tasklet_schedule(&sdmac->tasklet); 8051ec1e82fSSascha Hauer 8060bbc1413SRichard Zhao __clear_bit(channel, &stat); 8071ec1e82fSSascha Hauer } 8081ec1e82fSSascha Hauer 8091ec1e82fSSascha Hauer return IRQ_HANDLED; 8101ec1e82fSSascha Hauer } 8111ec1e82fSSascha Hauer 8121ec1e82fSSascha Hauer /* 8131ec1e82fSSascha Hauer * sets the pc of SDMA script according to the peripheral type 8141ec1e82fSSascha Hauer */ 8151ec1e82fSSascha Hauer static void sdma_get_pc(struct sdma_channel *sdmac, 8161ec1e82fSSascha Hauer enum sdma_peripheral_type peripheral_type) 8171ec1e82fSSascha Hauer { 8181ec1e82fSSascha Hauer struct sdma_engine *sdma = sdmac->sdma; 8191ec1e82fSSascha Hauer int per_2_emi = 0, emi_2_per = 0; 8201ec1e82fSSascha Hauer /* 8211ec1e82fSSascha Hauer * These are needed once we start to support transfers between 8221ec1e82fSSascha Hauer * two peripherals or memory-to-memory transfers 8231ec1e82fSSascha Hauer */ 8240d605ba0SVinod Koul int per_2_per = 0; 8251ec1e82fSSascha Hauer 8261ec1e82fSSascha Hauer sdmac->pc_from_device = 0; 8271ec1e82fSSascha Hauer sdmac->pc_to_device = 0; 8288391ecf4SShengjiu Wang sdmac->device_to_device = 0; 8291ec1e82fSSascha Hauer 8301ec1e82fSSascha Hauer switch (peripheral_type) { 8311ec1e82fSSascha Hauer case IMX_DMATYPE_MEMORY: 8321ec1e82fSSascha Hauer break; 8331ec1e82fSSascha Hauer case IMX_DMATYPE_DSP: 8341ec1e82fSSascha Hauer emi_2_per = sdma->script_addrs->bp_2_ap_addr; 8351ec1e82fSSascha Hauer per_2_emi = sdma->script_addrs->ap_2_bp_addr; 8361ec1e82fSSascha Hauer break; 8371ec1e82fSSascha Hauer case IMX_DMATYPE_FIRI: 8381ec1e82fSSascha Hauer per_2_emi = sdma->script_addrs->firi_2_mcu_addr; 8391ec1e82fSSascha Hauer emi_2_per = sdma->script_addrs->mcu_2_firi_addr; 8401ec1e82fSSascha Hauer break; 8411ec1e82fSSascha Hauer case IMX_DMATYPE_UART: 8421ec1e82fSSascha Hauer per_2_emi = sdma->script_addrs->uart_2_mcu_addr; 8431ec1e82fSSascha Hauer emi_2_per = sdma->script_addrs->mcu_2_app_addr; 8441ec1e82fSSascha Hauer break; 8451ec1e82fSSascha Hauer case IMX_DMATYPE_UART_SP: 8461ec1e82fSSascha Hauer per_2_emi = sdma->script_addrs->uartsh_2_mcu_addr; 8471ec1e82fSSascha Hauer emi_2_per = sdma->script_addrs->mcu_2_shp_addr; 8481ec1e82fSSascha Hauer break; 8491ec1e82fSSascha Hauer case IMX_DMATYPE_ATA: 8501ec1e82fSSascha Hauer per_2_emi = sdma->script_addrs->ata_2_mcu_addr; 8511ec1e82fSSascha Hauer emi_2_per = sdma->script_addrs->mcu_2_ata_addr; 8521ec1e82fSSascha Hauer break; 8531ec1e82fSSascha Hauer case IMX_DMATYPE_CSPI: 8541ec1e82fSSascha Hauer case IMX_DMATYPE_EXT: 8551ec1e82fSSascha Hauer case IMX_DMATYPE_SSI: 85629aebfdeSNicolin Chen case IMX_DMATYPE_SAI: 8571ec1e82fSSascha Hauer per_2_emi = sdma->script_addrs->app_2_mcu_addr; 8581ec1e82fSSascha Hauer emi_2_per = sdma->script_addrs->mcu_2_app_addr; 8591ec1e82fSSascha Hauer break; 8601a895578SNicolin Chen case IMX_DMATYPE_SSI_DUAL: 8611a895578SNicolin Chen per_2_emi = sdma->script_addrs->ssish_2_mcu_addr; 8621a895578SNicolin Chen emi_2_per = sdma->script_addrs->mcu_2_ssish_addr; 8631a895578SNicolin Chen break; 8641ec1e82fSSascha Hauer case IMX_DMATYPE_SSI_SP: 8651ec1e82fSSascha Hauer case IMX_DMATYPE_MMC: 8661ec1e82fSSascha Hauer case IMX_DMATYPE_SDHC: 8671ec1e82fSSascha Hauer case IMX_DMATYPE_CSPI_SP: 8681ec1e82fSSascha Hauer case IMX_DMATYPE_ESAI: 8691ec1e82fSSascha Hauer case IMX_DMATYPE_MSHC_SP: 8701ec1e82fSSascha Hauer per_2_emi = sdma->script_addrs->shp_2_mcu_addr; 8711ec1e82fSSascha Hauer emi_2_per = sdma->script_addrs->mcu_2_shp_addr; 8721ec1e82fSSascha Hauer break; 8731ec1e82fSSascha Hauer case IMX_DMATYPE_ASRC: 8741ec1e82fSSascha Hauer per_2_emi = sdma->script_addrs->asrc_2_mcu_addr; 8751ec1e82fSSascha Hauer emi_2_per = sdma->script_addrs->asrc_2_mcu_addr; 8761ec1e82fSSascha Hauer per_2_per = sdma->script_addrs->per_2_per_addr; 8771ec1e82fSSascha Hauer break; 878f892afb0SNicolin Chen case IMX_DMATYPE_ASRC_SP: 879f892afb0SNicolin Chen per_2_emi = sdma->script_addrs->shp_2_mcu_addr; 880f892afb0SNicolin Chen emi_2_per = sdma->script_addrs->mcu_2_shp_addr; 881f892afb0SNicolin Chen per_2_per = sdma->script_addrs->per_2_per_addr; 882f892afb0SNicolin Chen break; 8831ec1e82fSSascha Hauer case IMX_DMATYPE_MSHC: 8841ec1e82fSSascha Hauer per_2_emi = sdma->script_addrs->mshc_2_mcu_addr; 8851ec1e82fSSascha Hauer emi_2_per = sdma->script_addrs->mcu_2_mshc_addr; 8861ec1e82fSSascha Hauer break; 8871ec1e82fSSascha Hauer case IMX_DMATYPE_CCM: 8881ec1e82fSSascha Hauer per_2_emi = sdma->script_addrs->dptc_dvfs_addr; 8891ec1e82fSSascha Hauer break; 8901ec1e82fSSascha Hauer case IMX_DMATYPE_SPDIF: 8911ec1e82fSSascha Hauer per_2_emi = sdma->script_addrs->spdif_2_mcu_addr; 8921ec1e82fSSascha Hauer emi_2_per = sdma->script_addrs->mcu_2_spdif_addr; 8931ec1e82fSSascha Hauer break; 8941ec1e82fSSascha Hauer case IMX_DMATYPE_IPU_MEMORY: 8951ec1e82fSSascha Hauer emi_2_per = sdma->script_addrs->ext_mem_2_ipu_addr; 8961ec1e82fSSascha Hauer break; 8971ec1e82fSSascha Hauer default: 8981ec1e82fSSascha Hauer break; 8991ec1e82fSSascha Hauer } 9001ec1e82fSSascha Hauer 9011ec1e82fSSascha Hauer sdmac->pc_from_device = per_2_emi; 9021ec1e82fSSascha Hauer sdmac->pc_to_device = emi_2_per; 9038391ecf4SShengjiu Wang sdmac->device_to_device = per_2_per; 9041ec1e82fSSascha Hauer } 9051ec1e82fSSascha Hauer 9061ec1e82fSSascha Hauer static int sdma_load_context(struct sdma_channel *sdmac) 9071ec1e82fSSascha Hauer { 9081ec1e82fSSascha Hauer struct sdma_engine *sdma = sdmac->sdma; 9091ec1e82fSSascha Hauer int channel = sdmac->channel; 9101ec1e82fSSascha Hauer int load_address; 9111ec1e82fSSascha Hauer struct sdma_context_data *context = sdma->context; 912*76c33d27SSascha Hauer struct sdma_buffer_descriptor *bd0 = sdma->bd0; 9131ec1e82fSSascha Hauer int ret; 9142ccaef05SRichard Zhao unsigned long flags; 9151ec1e82fSSascha Hauer 9168391ecf4SShengjiu Wang if (sdmac->direction == DMA_DEV_TO_MEM) 9171ec1e82fSSascha Hauer load_address = sdmac->pc_from_device; 9188391ecf4SShengjiu Wang else if (sdmac->direction == DMA_DEV_TO_DEV) 9198391ecf4SShengjiu Wang load_address = sdmac->device_to_device; 9208391ecf4SShengjiu Wang else 9211ec1e82fSSascha Hauer load_address = sdmac->pc_to_device; 9221ec1e82fSSascha Hauer 9231ec1e82fSSascha Hauer if (load_address < 0) 9241ec1e82fSSascha Hauer return load_address; 9251ec1e82fSSascha Hauer 9261ec1e82fSSascha Hauer dev_dbg(sdma->dev, "load_address = %d\n", load_address); 9270bbc1413SRichard Zhao dev_dbg(sdma->dev, "wml = 0x%08x\n", (u32)sdmac->watermark_level); 9281ec1e82fSSascha Hauer dev_dbg(sdma->dev, "shp_addr = 0x%08x\n", sdmac->shp_addr); 9291ec1e82fSSascha Hauer dev_dbg(sdma->dev, "per_addr = 0x%08x\n", sdmac->per_addr); 9300bbc1413SRichard Zhao dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", (u32)sdmac->event_mask[0]); 9310bbc1413SRichard Zhao dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", (u32)sdmac->event_mask[1]); 9321ec1e82fSSascha Hauer 9332ccaef05SRichard Zhao spin_lock_irqsave(&sdma->channel_0_lock, flags); 93473eab978SSascha Hauer 9351ec1e82fSSascha Hauer memset(context, 0, sizeof(*context)); 9361ec1e82fSSascha Hauer context->channel_state.pc = load_address; 9371ec1e82fSSascha Hauer 9381ec1e82fSSascha Hauer /* Send by context the event mask,base address for peripheral 9391ec1e82fSSascha Hauer * and watermark level 9401ec1e82fSSascha Hauer */ 9410bbc1413SRichard Zhao context->gReg[0] = sdmac->event_mask[1]; 9420bbc1413SRichard Zhao context->gReg[1] = sdmac->event_mask[0]; 9431ec1e82fSSascha Hauer context->gReg[2] = sdmac->per_addr; 9441ec1e82fSSascha Hauer context->gReg[6] = sdmac->shp_addr; 9451ec1e82fSSascha Hauer context->gReg[7] = sdmac->watermark_level; 9461ec1e82fSSascha Hauer 9471ec1e82fSSascha Hauer bd0->mode.command = C0_SETDM; 9481ec1e82fSSascha Hauer bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD; 9491ec1e82fSSascha Hauer bd0->mode.count = sizeof(*context) / 4; 9501ec1e82fSSascha Hauer bd0->buffer_addr = sdma->context_phys; 9511ec1e82fSSascha Hauer bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel; 9522ccaef05SRichard Zhao ret = sdma_run_channel0(sdma); 9531ec1e82fSSascha Hauer 9542ccaef05SRichard Zhao spin_unlock_irqrestore(&sdma->channel_0_lock, flags); 95573eab978SSascha Hauer 9561ec1e82fSSascha Hauer return ret; 9571ec1e82fSSascha Hauer } 9581ec1e82fSSascha Hauer 9597b350ab0SMaxime Ripard static struct sdma_channel *to_sdma_chan(struct dma_chan *chan) 9601ec1e82fSSascha Hauer { 9617b350ab0SMaxime Ripard return container_of(chan, struct sdma_channel, chan); 9627b350ab0SMaxime Ripard } 9637b350ab0SMaxime Ripard 9647b350ab0SMaxime Ripard static int sdma_disable_channel(struct dma_chan *chan) 9657b350ab0SMaxime Ripard { 9667b350ab0SMaxime Ripard struct sdma_channel *sdmac = to_sdma_chan(chan); 9671ec1e82fSSascha Hauer struct sdma_engine *sdma = sdmac->sdma; 9681ec1e82fSSascha Hauer int channel = sdmac->channel; 9692746e2c3SThierry Bultel unsigned long flags; 9701ec1e82fSSascha Hauer 9710bbc1413SRichard Zhao writel_relaxed(BIT(channel), sdma->regs + SDMA_H_STATSTOP); 9721ec1e82fSSascha Hauer sdmac->status = DMA_ERROR; 9737b350ab0SMaxime Ripard 9742746e2c3SThierry Bultel spin_lock_irqsave(&sdmac->lock, flags); 9752746e2c3SThierry Bultel sdmac->enabled = false; 9762746e2c3SThierry Bultel spin_unlock_irqrestore(&sdmac->lock, flags); 9772746e2c3SThierry Bultel 9787b350ab0SMaxime Ripard return 0; 9791ec1e82fSSascha Hauer } 9801ec1e82fSSascha Hauer 9817f3ff14bSJiada Wang static int sdma_disable_channel_with_delay(struct dma_chan *chan) 9827f3ff14bSJiada Wang { 9837f3ff14bSJiada Wang sdma_disable_channel(chan); 9847f3ff14bSJiada Wang 9857f3ff14bSJiada Wang /* 9867f3ff14bSJiada Wang * According to NXP R&D team a delay of one BD SDMA cost time 9877f3ff14bSJiada Wang * (maximum is 1ms) should be added after disable of the channel 9887f3ff14bSJiada Wang * bit, to ensure SDMA core has really been stopped after SDMA 9897f3ff14bSJiada Wang * clients call .device_terminate_all. 9907f3ff14bSJiada Wang */ 9917f3ff14bSJiada Wang mdelay(1); 9927f3ff14bSJiada Wang 9937f3ff14bSJiada Wang return 0; 9947f3ff14bSJiada Wang } 9957f3ff14bSJiada Wang 9968391ecf4SShengjiu Wang static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac) 9978391ecf4SShengjiu Wang { 9988391ecf4SShengjiu Wang struct sdma_engine *sdma = sdmac->sdma; 9998391ecf4SShengjiu Wang 10008391ecf4SShengjiu Wang int lwml = sdmac->watermark_level & SDMA_WATERMARK_LEVEL_LWML; 10018391ecf4SShengjiu Wang int hwml = (sdmac->watermark_level & SDMA_WATERMARK_LEVEL_HWML) >> 16; 10028391ecf4SShengjiu Wang 10038391ecf4SShengjiu Wang set_bit(sdmac->event_id0 % 32, &sdmac->event_mask[1]); 10048391ecf4SShengjiu Wang set_bit(sdmac->event_id1 % 32, &sdmac->event_mask[0]); 10058391ecf4SShengjiu Wang 10068391ecf4SShengjiu Wang if (sdmac->event_id0 > 31) 10078391ecf4SShengjiu Wang sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_LWE; 10088391ecf4SShengjiu Wang 10098391ecf4SShengjiu Wang if (sdmac->event_id1 > 31) 10108391ecf4SShengjiu Wang sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_HWE; 10118391ecf4SShengjiu Wang 10128391ecf4SShengjiu Wang /* 10138391ecf4SShengjiu Wang * If LWML(src_maxburst) > HWML(dst_maxburst), we need 10148391ecf4SShengjiu Wang * swap LWML and HWML of INFO(A.3.2.5.1), also need swap 10158391ecf4SShengjiu Wang * r0(event_mask[1]) and r1(event_mask[0]). 10168391ecf4SShengjiu Wang */ 10178391ecf4SShengjiu Wang if (lwml > hwml) { 10188391ecf4SShengjiu Wang sdmac->watermark_level &= ~(SDMA_WATERMARK_LEVEL_LWML | 10198391ecf4SShengjiu Wang SDMA_WATERMARK_LEVEL_HWML); 10208391ecf4SShengjiu Wang sdmac->watermark_level |= hwml; 10218391ecf4SShengjiu Wang sdmac->watermark_level |= lwml << 16; 10228391ecf4SShengjiu Wang swap(sdmac->event_mask[0], sdmac->event_mask[1]); 10238391ecf4SShengjiu Wang } 10248391ecf4SShengjiu Wang 10258391ecf4SShengjiu Wang if (sdmac->per_address2 >= sdma->spba_start_addr && 10268391ecf4SShengjiu Wang sdmac->per_address2 <= sdma->spba_end_addr) 10278391ecf4SShengjiu Wang sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_SP; 10288391ecf4SShengjiu Wang 10298391ecf4SShengjiu Wang if (sdmac->per_address >= sdma->spba_start_addr && 10308391ecf4SShengjiu Wang sdmac->per_address <= sdma->spba_end_addr) 10318391ecf4SShengjiu Wang sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_DP; 10328391ecf4SShengjiu Wang 10338391ecf4SShengjiu Wang sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_CONT; 10348391ecf4SShengjiu Wang } 10358391ecf4SShengjiu Wang 10367b350ab0SMaxime Ripard static int sdma_config_channel(struct dma_chan *chan) 10371ec1e82fSSascha Hauer { 10387b350ab0SMaxime Ripard struct sdma_channel *sdmac = to_sdma_chan(chan); 10391ec1e82fSSascha Hauer int ret; 10401ec1e82fSSascha Hauer 10417b350ab0SMaxime Ripard sdma_disable_channel(chan); 10421ec1e82fSSascha Hauer 10430bbc1413SRichard Zhao sdmac->event_mask[0] = 0; 10440bbc1413SRichard Zhao sdmac->event_mask[1] = 0; 10451ec1e82fSSascha Hauer sdmac->shp_addr = 0; 10461ec1e82fSSascha Hauer sdmac->per_addr = 0; 10471ec1e82fSSascha Hauer 10481ec1e82fSSascha Hauer if (sdmac->event_id0) { 104917bba72fSSascha Hauer if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events) 10501ec1e82fSSascha Hauer return -EINVAL; 10511ec1e82fSSascha Hauer sdma_event_enable(sdmac, sdmac->event_id0); 10521ec1e82fSSascha Hauer } 10531ec1e82fSSascha Hauer 10548391ecf4SShengjiu Wang if (sdmac->event_id1) { 10558391ecf4SShengjiu Wang if (sdmac->event_id1 >= sdmac->sdma->drvdata->num_events) 10568391ecf4SShengjiu Wang return -EINVAL; 10578391ecf4SShengjiu Wang sdma_event_enable(sdmac, sdmac->event_id1); 10588391ecf4SShengjiu Wang } 10598391ecf4SShengjiu Wang 10601ec1e82fSSascha Hauer switch (sdmac->peripheral_type) { 10611ec1e82fSSascha Hauer case IMX_DMATYPE_DSP: 10621ec1e82fSSascha Hauer sdma_config_ownership(sdmac, false, true, true); 10631ec1e82fSSascha Hauer break; 10641ec1e82fSSascha Hauer case IMX_DMATYPE_MEMORY: 10651ec1e82fSSascha Hauer sdma_config_ownership(sdmac, false, true, false); 10661ec1e82fSSascha Hauer break; 10671ec1e82fSSascha Hauer default: 10681ec1e82fSSascha Hauer sdma_config_ownership(sdmac, true, true, false); 10691ec1e82fSSascha Hauer break; 10701ec1e82fSSascha Hauer } 10711ec1e82fSSascha Hauer 10721ec1e82fSSascha Hauer sdma_get_pc(sdmac, sdmac->peripheral_type); 10731ec1e82fSSascha Hauer 10741ec1e82fSSascha Hauer if ((sdmac->peripheral_type != IMX_DMATYPE_MEMORY) && 10751ec1e82fSSascha Hauer (sdmac->peripheral_type != IMX_DMATYPE_DSP)) { 10761ec1e82fSSascha Hauer /* Handle multiple event channels differently */ 10771ec1e82fSSascha Hauer if (sdmac->event_id1) { 10788391ecf4SShengjiu Wang if (sdmac->peripheral_type == IMX_DMATYPE_ASRC_SP || 10798391ecf4SShengjiu Wang sdmac->peripheral_type == IMX_DMATYPE_ASRC) 10808391ecf4SShengjiu Wang sdma_set_watermarklevel_for_p2p(sdmac); 10818391ecf4SShengjiu Wang } else 10820bbc1413SRichard Zhao __set_bit(sdmac->event_id0, sdmac->event_mask); 10838391ecf4SShengjiu Wang 10841ec1e82fSSascha Hauer /* Address */ 10851ec1e82fSSascha Hauer sdmac->shp_addr = sdmac->per_address; 10868391ecf4SShengjiu Wang sdmac->per_addr = sdmac->per_address2; 10871ec1e82fSSascha Hauer } else { 10881ec1e82fSSascha Hauer sdmac->watermark_level = 0; /* FIXME: M3_BASE_ADDRESS */ 10891ec1e82fSSascha Hauer } 10901ec1e82fSSascha Hauer 10911ec1e82fSSascha Hauer ret = sdma_load_context(sdmac); 10921ec1e82fSSascha Hauer 10931ec1e82fSSascha Hauer return ret; 10941ec1e82fSSascha Hauer } 10951ec1e82fSSascha Hauer 10961ec1e82fSSascha Hauer static int sdma_set_channel_priority(struct sdma_channel *sdmac, 10971ec1e82fSSascha Hauer unsigned int priority) 10981ec1e82fSSascha Hauer { 10991ec1e82fSSascha Hauer struct sdma_engine *sdma = sdmac->sdma; 11001ec1e82fSSascha Hauer int channel = sdmac->channel; 11011ec1e82fSSascha Hauer 11021ec1e82fSSascha Hauer if (priority < MXC_SDMA_MIN_PRIORITY 11031ec1e82fSSascha Hauer || priority > MXC_SDMA_MAX_PRIORITY) { 11041ec1e82fSSascha Hauer return -EINVAL; 11051ec1e82fSSascha Hauer } 11061ec1e82fSSascha Hauer 1107c4b56857SRichard Zhao writel_relaxed(priority, sdma->regs + SDMA_CHNPRI_0 + 4 * channel); 11081ec1e82fSSascha Hauer 11091ec1e82fSSascha Hauer return 0; 11101ec1e82fSSascha Hauer } 11111ec1e82fSSascha Hauer 11121ec1e82fSSascha Hauer static int sdma_request_channel(struct sdma_channel *sdmac) 11131ec1e82fSSascha Hauer { 11141ec1e82fSSascha Hauer struct sdma_engine *sdma = sdmac->sdma; 1115*76c33d27SSascha Hauer struct sdma_desc *desc; 11161ec1e82fSSascha Hauer int channel = sdmac->channel; 11171ec1e82fSSascha Hauer int ret = -EBUSY; 11181ec1e82fSSascha Hauer 1119*76c33d27SSascha Hauer sdmac->desc = &sdmac->_desc; 1120*76c33d27SSascha Hauer desc = sdmac->desc; 1121*76c33d27SSascha Hauer 1122*76c33d27SSascha Hauer desc->bd = dma_zalloc_coherent(NULL, PAGE_SIZE, &desc->bd_phys, 11239f92d223SJoe Perches GFP_KERNEL); 1124*76c33d27SSascha Hauer if (!desc->bd) { 11251ec1e82fSSascha Hauer ret = -ENOMEM; 11261ec1e82fSSascha Hauer goto out; 11271ec1e82fSSascha Hauer } 11281ec1e82fSSascha Hauer 1129*76c33d27SSascha Hauer sdma->channel_control[channel].base_bd_ptr = desc->bd_phys; 1130*76c33d27SSascha Hauer sdma->channel_control[channel].current_bd_ptr = desc->bd_phys; 11311ec1e82fSSascha Hauer 11321ec1e82fSSascha Hauer sdma_set_channel_priority(sdmac, MXC_SDMA_DEFAULT_PRIORITY); 11331ec1e82fSSascha Hauer return 0; 11341ec1e82fSSascha Hauer out: 11351ec1e82fSSascha Hauer 11361ec1e82fSSascha Hauer return ret; 11371ec1e82fSSascha Hauer } 11381ec1e82fSSascha Hauer 11391ec1e82fSSascha Hauer static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx) 11401ec1e82fSSascha Hauer { 1141f69f2e26SHaitao Zhang unsigned long flags; 11421ec1e82fSSascha Hauer struct sdma_channel *sdmac = to_sdma_chan(tx->chan); 11431ec1e82fSSascha Hauer dma_cookie_t cookie; 11441ec1e82fSSascha Hauer 1145f69f2e26SHaitao Zhang spin_lock_irqsave(&sdmac->lock, flags); 11461ec1e82fSSascha Hauer 1147884485e1SRussell King - ARM Linux cookie = dma_cookie_assign(tx); 11481ec1e82fSSascha Hauer 1149f69f2e26SHaitao Zhang spin_unlock_irqrestore(&sdmac->lock, flags); 11501ec1e82fSSascha Hauer 11511ec1e82fSSascha Hauer return cookie; 11521ec1e82fSSascha Hauer } 11531ec1e82fSSascha Hauer 11541ec1e82fSSascha Hauer static int sdma_alloc_chan_resources(struct dma_chan *chan) 11551ec1e82fSSascha Hauer { 11561ec1e82fSSascha Hauer struct sdma_channel *sdmac = to_sdma_chan(chan); 11571ec1e82fSSascha Hauer struct imx_dma_data *data = chan->private; 11581ec1e82fSSascha Hauer int prio, ret; 11591ec1e82fSSascha Hauer 11601ec1e82fSSascha Hauer if (!data) 11611ec1e82fSSascha Hauer return -EINVAL; 11621ec1e82fSSascha Hauer 11631ec1e82fSSascha Hauer switch (data->priority) { 11641ec1e82fSSascha Hauer case DMA_PRIO_HIGH: 11651ec1e82fSSascha Hauer prio = 3; 11661ec1e82fSSascha Hauer break; 11671ec1e82fSSascha Hauer case DMA_PRIO_MEDIUM: 11681ec1e82fSSascha Hauer prio = 2; 11691ec1e82fSSascha Hauer break; 11701ec1e82fSSascha Hauer case DMA_PRIO_LOW: 11711ec1e82fSSascha Hauer default: 11721ec1e82fSSascha Hauer prio = 1; 11731ec1e82fSSascha Hauer break; 11741ec1e82fSSascha Hauer } 11751ec1e82fSSascha Hauer 11761ec1e82fSSascha Hauer sdmac->peripheral_type = data->peripheral_type; 11771ec1e82fSSascha Hauer sdmac->event_id0 = data->dma_request; 11788391ecf4SShengjiu Wang sdmac->event_id1 = data->dma_request2; 1179c2c744d3SRichard Zhao 1180b93edcddSFabio Estevam ret = clk_enable(sdmac->sdma->clk_ipg); 1181b93edcddSFabio Estevam if (ret) 1182b93edcddSFabio Estevam return ret; 1183b93edcddSFabio Estevam ret = clk_enable(sdmac->sdma->clk_ahb); 1184b93edcddSFabio Estevam if (ret) 1185b93edcddSFabio Estevam goto disable_clk_ipg; 1186c2c744d3SRichard Zhao 11873bb5e7caSRichard Zhao ret = sdma_request_channel(sdmac); 11881ec1e82fSSascha Hauer if (ret) 1189b93edcddSFabio Estevam goto disable_clk_ahb; 11901ec1e82fSSascha Hauer 11913bb5e7caSRichard Zhao ret = sdma_set_channel_priority(sdmac, prio); 11921ec1e82fSSascha Hauer if (ret) 1193b93edcddSFabio Estevam goto disable_clk_ahb; 11941ec1e82fSSascha Hauer 1195*76c33d27SSascha Hauer dma_async_tx_descriptor_init(&sdmac->txdesc, chan); 1196*76c33d27SSascha Hauer sdmac->txdesc.tx_submit = sdma_tx_submit; 11971ec1e82fSSascha Hauer /* txd.flags will be overwritten in prep funcs */ 1198*76c33d27SSascha Hauer sdmac->txdesc.flags = DMA_CTRL_ACK; 11991ec1e82fSSascha Hauer 12001ec1e82fSSascha Hauer return 0; 1201b93edcddSFabio Estevam 1202b93edcddSFabio Estevam disable_clk_ahb: 1203b93edcddSFabio Estevam clk_disable(sdmac->sdma->clk_ahb); 1204b93edcddSFabio Estevam disable_clk_ipg: 1205b93edcddSFabio Estevam clk_disable(sdmac->sdma->clk_ipg); 1206b93edcddSFabio Estevam return ret; 12071ec1e82fSSascha Hauer } 12081ec1e82fSSascha Hauer 12091ec1e82fSSascha Hauer static void sdma_free_chan_resources(struct dma_chan *chan) 12101ec1e82fSSascha Hauer { 12111ec1e82fSSascha Hauer struct sdma_channel *sdmac = to_sdma_chan(chan); 12121ec1e82fSSascha Hauer struct sdma_engine *sdma = sdmac->sdma; 1213*76c33d27SSascha Hauer struct sdma_desc *desc = sdmac->desc; 12141ec1e82fSSascha Hauer 12157b350ab0SMaxime Ripard sdma_disable_channel(chan); 12161ec1e82fSSascha Hauer 12171ec1e82fSSascha Hauer if (sdmac->event_id0) 12181ec1e82fSSascha Hauer sdma_event_disable(sdmac, sdmac->event_id0); 12191ec1e82fSSascha Hauer if (sdmac->event_id1) 12201ec1e82fSSascha Hauer sdma_event_disable(sdmac, sdmac->event_id1); 12211ec1e82fSSascha Hauer 12221ec1e82fSSascha Hauer sdmac->event_id0 = 0; 12231ec1e82fSSascha Hauer sdmac->event_id1 = 0; 12241ec1e82fSSascha Hauer 12251ec1e82fSSascha Hauer sdma_set_channel_priority(sdmac, 0); 12261ec1e82fSSascha Hauer 1227*76c33d27SSascha Hauer dma_free_coherent(NULL, PAGE_SIZE, desc->bd, desc->bd_phys); 12281ec1e82fSSascha Hauer 12297560e3f3SSascha Hauer clk_disable(sdma->clk_ipg); 12307560e3f3SSascha Hauer clk_disable(sdma->clk_ahb); 12311ec1e82fSSascha Hauer } 12321ec1e82fSSascha Hauer 12331ec1e82fSSascha Hauer static struct dma_async_tx_descriptor *sdma_prep_slave_sg( 12341ec1e82fSSascha Hauer struct dma_chan *chan, struct scatterlist *sgl, 1235db8196dfSVinod Koul unsigned int sg_len, enum dma_transfer_direction direction, 1236185ecb5fSAlexandre Bounine unsigned long flags, void *context) 12371ec1e82fSSascha Hauer { 12381ec1e82fSSascha Hauer struct sdma_channel *sdmac = to_sdma_chan(chan); 12391ec1e82fSSascha Hauer struct sdma_engine *sdma = sdmac->sdma; 12401ec1e82fSSascha Hauer int ret, i, count; 124123889c63SSascha Hauer int channel = sdmac->channel; 12421ec1e82fSSascha Hauer struct scatterlist *sg; 1243*76c33d27SSascha Hauer struct sdma_desc *desc = sdmac->desc; 12441ec1e82fSSascha Hauer 12451ec1e82fSSascha Hauer if (sdmac->status == DMA_IN_PROGRESS) 12461ec1e82fSSascha Hauer return NULL; 12471ec1e82fSSascha Hauer sdmac->status = DMA_IN_PROGRESS; 12481ec1e82fSSascha Hauer 12491ec1e82fSSascha Hauer sdmac->flags = 0; 12501ec1e82fSSascha Hauer 1251*76c33d27SSascha Hauer desc->buf_tail = 0; 1252*76c33d27SSascha Hauer desc->buf_ptail = 0; 1253*76c33d27SSascha Hauer desc->chn_real_count = 0; 12548e2e27c7SRichard Zhao 12551ec1e82fSSascha Hauer dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n", 12561ec1e82fSSascha Hauer sg_len, channel); 12571ec1e82fSSascha Hauer 12581ec1e82fSSascha Hauer sdmac->direction = direction; 12591ec1e82fSSascha Hauer ret = sdma_load_context(sdmac); 12601ec1e82fSSascha Hauer if (ret) 12611ec1e82fSSascha Hauer goto err_out; 12621ec1e82fSSascha Hauer 12631ec1e82fSSascha Hauer if (sg_len > NUM_BD) { 12641ec1e82fSSascha Hauer dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n", 12651ec1e82fSSascha Hauer channel, sg_len, NUM_BD); 12661ec1e82fSSascha Hauer ret = -EINVAL; 12671ec1e82fSSascha Hauer goto err_out; 12681ec1e82fSSascha Hauer } 12691ec1e82fSSascha Hauer 1270*76c33d27SSascha Hauer desc->chn_count = 0; 12711ec1e82fSSascha Hauer for_each_sg(sgl, sg, sg_len, i) { 1272*76c33d27SSascha Hauer struct sdma_buffer_descriptor *bd = &desc->bd[i]; 12731ec1e82fSSascha Hauer int param; 12741ec1e82fSSascha Hauer 1275d2f5c276SAnatolij Gustschin bd->buffer_addr = sg->dma_address; 12761ec1e82fSSascha Hauer 1277fdaf9c4bSLars-Peter Clausen count = sg_dma_len(sg); 12781ec1e82fSSascha Hauer 12791ec1e82fSSascha Hauer if (count > 0xffff) { 12801ec1e82fSSascha Hauer dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n", 12811ec1e82fSSascha Hauer channel, count, 0xffff); 12821ec1e82fSSascha Hauer ret = -EINVAL; 12831ec1e82fSSascha Hauer goto err_out; 12841ec1e82fSSascha Hauer } 12851ec1e82fSSascha Hauer 12861ec1e82fSSascha Hauer bd->mode.count = count; 1287*76c33d27SSascha Hauer desc->chn_count += count; 12881ec1e82fSSascha Hauer 12891ec1e82fSSascha Hauer if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) { 12901ec1e82fSSascha Hauer ret = -EINVAL; 12911ec1e82fSSascha Hauer goto err_out; 12921ec1e82fSSascha Hauer } 12931fa81c27SSascha Hauer 12941fa81c27SSascha Hauer switch (sdmac->word_size) { 12951fa81c27SSascha Hauer case DMA_SLAVE_BUSWIDTH_4_BYTES: 12961ec1e82fSSascha Hauer bd->mode.command = 0; 12971fa81c27SSascha Hauer if (count & 3 || sg->dma_address & 3) 12981fa81c27SSascha Hauer return NULL; 12991fa81c27SSascha Hauer break; 13001fa81c27SSascha Hauer case DMA_SLAVE_BUSWIDTH_2_BYTES: 13011fa81c27SSascha Hauer bd->mode.command = 2; 13021fa81c27SSascha Hauer if (count & 1 || sg->dma_address & 1) 13031fa81c27SSascha Hauer return NULL; 13041fa81c27SSascha Hauer break; 13051fa81c27SSascha Hauer case DMA_SLAVE_BUSWIDTH_1_BYTE: 13061fa81c27SSascha Hauer bd->mode.command = 1; 13071fa81c27SSascha Hauer break; 13081fa81c27SSascha Hauer default: 13091fa81c27SSascha Hauer return NULL; 13101fa81c27SSascha Hauer } 13111ec1e82fSSascha Hauer 13121ec1e82fSSascha Hauer param = BD_DONE | BD_EXTD | BD_CONT; 13131ec1e82fSSascha Hauer 1314341b9419SShawn Guo if (i + 1 == sg_len) { 13151ec1e82fSSascha Hauer param |= BD_INTR; 1316341b9419SShawn Guo param |= BD_LAST; 1317341b9419SShawn Guo param &= ~BD_CONT; 13181ec1e82fSSascha Hauer } 13191ec1e82fSSascha Hauer 1320c3cc74b2SOlof Johansson dev_dbg(sdma->dev, "entry %d: count: %d dma: %#llx %s%s\n", 1321c3cc74b2SOlof Johansson i, count, (u64)sg->dma_address, 13221ec1e82fSSascha Hauer param & BD_WRAP ? "wrap" : "", 13231ec1e82fSSascha Hauer param & BD_INTR ? " intr" : ""); 13241ec1e82fSSascha Hauer 13251ec1e82fSSascha Hauer bd->mode.status = param; 13261ec1e82fSSascha Hauer } 13271ec1e82fSSascha Hauer 1328*76c33d27SSascha Hauer desc->num_bd = sg_len; 1329*76c33d27SSascha Hauer sdma->channel_control[channel].current_bd_ptr = desc->bd_phys; 13301ec1e82fSSascha Hauer 1331*76c33d27SSascha Hauer return &sdmac->txdesc; 13321ec1e82fSSascha Hauer err_out: 13334b2ce9ddSShawn Guo sdmac->status = DMA_ERROR; 13341ec1e82fSSascha Hauer return NULL; 13351ec1e82fSSascha Hauer } 13361ec1e82fSSascha Hauer 13371ec1e82fSSascha Hauer static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic( 13381ec1e82fSSascha Hauer struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, 1339185ecb5fSAlexandre Bounine size_t period_len, enum dma_transfer_direction direction, 134031c1e5a1SLaurent Pinchart unsigned long flags) 13411ec1e82fSSascha Hauer { 13421ec1e82fSSascha Hauer struct sdma_channel *sdmac = to_sdma_chan(chan); 13431ec1e82fSSascha Hauer struct sdma_engine *sdma = sdmac->sdma; 13441ec1e82fSSascha Hauer int num_periods = buf_len / period_len; 134523889c63SSascha Hauer int channel = sdmac->channel; 13461ec1e82fSSascha Hauer int ret, i = 0, buf = 0; 1347*76c33d27SSascha Hauer struct sdma_desc *desc = sdmac->desc; 13481ec1e82fSSascha Hauer 13491ec1e82fSSascha Hauer dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel); 13501ec1e82fSSascha Hauer 13511ec1e82fSSascha Hauer if (sdmac->status == DMA_IN_PROGRESS) 13521ec1e82fSSascha Hauer return NULL; 13531ec1e82fSSascha Hauer 13541ec1e82fSSascha Hauer sdmac->status = DMA_IN_PROGRESS; 13551ec1e82fSSascha Hauer 1356*76c33d27SSascha Hauer desc->buf_tail = 0; 1357*76c33d27SSascha Hauer desc->buf_ptail = 0; 1358*76c33d27SSascha Hauer desc->chn_real_count = 0; 1359*76c33d27SSascha Hauer desc->period_len = period_len; 13608e2e27c7SRichard Zhao 13611ec1e82fSSascha Hauer sdmac->flags |= IMX_DMA_SG_LOOP; 13621ec1e82fSSascha Hauer sdmac->direction = direction; 13631ec1e82fSSascha Hauer ret = sdma_load_context(sdmac); 13641ec1e82fSSascha Hauer if (ret) 13651ec1e82fSSascha Hauer goto err_out; 13661ec1e82fSSascha Hauer 13671ec1e82fSSascha Hauer if (num_periods > NUM_BD) { 13681ec1e82fSSascha Hauer dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n", 13691ec1e82fSSascha Hauer channel, num_periods, NUM_BD); 13701ec1e82fSSascha Hauer goto err_out; 13711ec1e82fSSascha Hauer } 13721ec1e82fSSascha Hauer 13731ec1e82fSSascha Hauer if (period_len > 0xffff) { 1374ba6ab3b3SArvind Yadav dev_err(sdma->dev, "SDMA channel %d: maximum period size exceeded: %zu > %d\n", 13751ec1e82fSSascha Hauer channel, period_len, 0xffff); 13761ec1e82fSSascha Hauer goto err_out; 13771ec1e82fSSascha Hauer } 13781ec1e82fSSascha Hauer 13791ec1e82fSSascha Hauer while (buf < buf_len) { 1380*76c33d27SSascha Hauer struct sdma_buffer_descriptor *bd = &desc->bd[i]; 13811ec1e82fSSascha Hauer int param; 13821ec1e82fSSascha Hauer 13831ec1e82fSSascha Hauer bd->buffer_addr = dma_addr; 13841ec1e82fSSascha Hauer 13851ec1e82fSSascha Hauer bd->mode.count = period_len; 13861ec1e82fSSascha Hauer 13871ec1e82fSSascha Hauer if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) 13881ec1e82fSSascha Hauer goto err_out; 13891ec1e82fSSascha Hauer if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES) 13901ec1e82fSSascha Hauer bd->mode.command = 0; 13911ec1e82fSSascha Hauer else 13921ec1e82fSSascha Hauer bd->mode.command = sdmac->word_size; 13931ec1e82fSSascha Hauer 13941ec1e82fSSascha Hauer param = BD_DONE | BD_EXTD | BD_CONT | BD_INTR; 13951ec1e82fSSascha Hauer if (i + 1 == num_periods) 13961ec1e82fSSascha Hauer param |= BD_WRAP; 13971ec1e82fSSascha Hauer 1398ba6ab3b3SArvind Yadav dev_dbg(sdma->dev, "entry %d: count: %zu dma: %#llx %s%s\n", 1399c3cc74b2SOlof Johansson i, period_len, (u64)dma_addr, 14001ec1e82fSSascha Hauer param & BD_WRAP ? "wrap" : "", 14011ec1e82fSSascha Hauer param & BD_INTR ? " intr" : ""); 14021ec1e82fSSascha Hauer 14031ec1e82fSSascha Hauer bd->mode.status = param; 14041ec1e82fSSascha Hauer 14051ec1e82fSSascha Hauer dma_addr += period_len; 14061ec1e82fSSascha Hauer buf += period_len; 14071ec1e82fSSascha Hauer 14081ec1e82fSSascha Hauer i++; 14091ec1e82fSSascha Hauer } 14101ec1e82fSSascha Hauer 1411*76c33d27SSascha Hauer desc->num_bd = num_periods; 1412*76c33d27SSascha Hauer sdma->channel_control[channel].current_bd_ptr = desc->bd_phys; 14131ec1e82fSSascha Hauer 1414*76c33d27SSascha Hauer return &sdmac->txdesc; 14151ec1e82fSSascha Hauer err_out: 14161ec1e82fSSascha Hauer sdmac->status = DMA_ERROR; 14171ec1e82fSSascha Hauer return NULL; 14181ec1e82fSSascha Hauer } 14191ec1e82fSSascha Hauer 14207b350ab0SMaxime Ripard static int sdma_config(struct dma_chan *chan, 14217b350ab0SMaxime Ripard struct dma_slave_config *dmaengine_cfg) 14221ec1e82fSSascha Hauer { 14231ec1e82fSSascha Hauer struct sdma_channel *sdmac = to_sdma_chan(chan); 14241ec1e82fSSascha Hauer 1425db8196dfSVinod Koul if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) { 14261ec1e82fSSascha Hauer sdmac->per_address = dmaengine_cfg->src_addr; 142794ac27a5SPhilippe Rétornaz sdmac->watermark_level = dmaengine_cfg->src_maxburst * 142894ac27a5SPhilippe Rétornaz dmaengine_cfg->src_addr_width; 14291ec1e82fSSascha Hauer sdmac->word_size = dmaengine_cfg->src_addr_width; 14308391ecf4SShengjiu Wang } else if (dmaengine_cfg->direction == DMA_DEV_TO_DEV) { 14318391ecf4SShengjiu Wang sdmac->per_address2 = dmaengine_cfg->src_addr; 14328391ecf4SShengjiu Wang sdmac->per_address = dmaengine_cfg->dst_addr; 14338391ecf4SShengjiu Wang sdmac->watermark_level = dmaengine_cfg->src_maxburst & 14348391ecf4SShengjiu Wang SDMA_WATERMARK_LEVEL_LWML; 14358391ecf4SShengjiu Wang sdmac->watermark_level |= (dmaengine_cfg->dst_maxburst << 16) & 14368391ecf4SShengjiu Wang SDMA_WATERMARK_LEVEL_HWML; 14378391ecf4SShengjiu Wang sdmac->word_size = dmaengine_cfg->dst_addr_width; 14381ec1e82fSSascha Hauer } else { 14391ec1e82fSSascha Hauer sdmac->per_address = dmaengine_cfg->dst_addr; 144094ac27a5SPhilippe Rétornaz sdmac->watermark_level = dmaengine_cfg->dst_maxburst * 144194ac27a5SPhilippe Rétornaz dmaengine_cfg->dst_addr_width; 14421ec1e82fSSascha Hauer sdmac->word_size = dmaengine_cfg->dst_addr_width; 14431ec1e82fSSascha Hauer } 1444e6966433SHuang Shijie sdmac->direction = dmaengine_cfg->direction; 14457b350ab0SMaxime Ripard return sdma_config_channel(chan); 14461ec1e82fSSascha Hauer } 14471ec1e82fSSascha Hauer 14481ec1e82fSSascha Hauer static enum dma_status sdma_tx_status(struct dma_chan *chan, 14491ec1e82fSSascha Hauer dma_cookie_t cookie, 14501ec1e82fSSascha Hauer struct dma_tx_state *txstate) 14511ec1e82fSSascha Hauer { 14521ec1e82fSSascha Hauer struct sdma_channel *sdmac = to_sdma_chan(chan); 1453*76c33d27SSascha Hauer struct sdma_desc *desc = sdmac->desc; 1454d1a792f3SRussell King - ARM Linux u32 residue; 1455d1a792f3SRussell King - ARM Linux 1456d1a792f3SRussell King - ARM Linux if (sdmac->flags & IMX_DMA_SG_LOOP) 1457*76c33d27SSascha Hauer residue = (desc->num_bd - desc->buf_ptail) * 1458*76c33d27SSascha Hauer desc->period_len - desc->chn_real_count; 1459d1a792f3SRussell King - ARM Linux else 1460*76c33d27SSascha Hauer residue = desc->chn_count - desc->chn_real_count; 14611ec1e82fSSascha Hauer 1462e8e3a790SAndy Shevchenko dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, 1463d1a792f3SRussell King - ARM Linux residue); 14641ec1e82fSSascha Hauer 14658a965911SShawn Guo return sdmac->status; 14661ec1e82fSSascha Hauer } 14671ec1e82fSSascha Hauer 14681ec1e82fSSascha Hauer static void sdma_issue_pending(struct dma_chan *chan) 14691ec1e82fSSascha Hauer { 14702b4f130eSSascha Hauer struct sdma_channel *sdmac = to_sdma_chan(chan); 14712b4f130eSSascha Hauer struct sdma_engine *sdma = sdmac->sdma; 14722b4f130eSSascha Hauer 14732b4f130eSSascha Hauer if (sdmac->status == DMA_IN_PROGRESS) 14742b4f130eSSascha Hauer sdma_enable_channel(sdma, sdmac->channel); 14751ec1e82fSSascha Hauer } 14761ec1e82fSSascha Hauer 14775b28aa31SSascha Hauer #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34 1478cd72b846SNicolin Chen #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2 38 1479a572460bSFabio Estevam #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3 41 1480b7d2648aSFabio Estevam #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4 42 14815b28aa31SSascha Hauer 14825b28aa31SSascha Hauer static void sdma_add_scripts(struct sdma_engine *sdma, 14835b28aa31SSascha Hauer const struct sdma_script_start_addrs *addr) 14845b28aa31SSascha Hauer { 14855b28aa31SSascha Hauer s32 *addr_arr = (u32 *)addr; 14865b28aa31SSascha Hauer s32 *saddr_arr = (u32 *)sdma->script_addrs; 14875b28aa31SSascha Hauer int i; 14885b28aa31SSascha Hauer 148970dabaedSNicolin Chen /* use the default firmware in ROM if missing external firmware */ 149070dabaedSNicolin Chen if (!sdma->script_number) 149170dabaedSNicolin Chen sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; 149270dabaedSNicolin Chen 1493cd72b846SNicolin Chen for (i = 0; i < sdma->script_number; i++) 14945b28aa31SSascha Hauer if (addr_arr[i] > 0) 14955b28aa31SSascha Hauer saddr_arr[i] = addr_arr[i]; 14965b28aa31SSascha Hauer } 14975b28aa31SSascha Hauer 14987b4b88e0SSascha Hauer static void sdma_load_firmware(const struct firmware *fw, void *context) 14995b28aa31SSascha Hauer { 15007b4b88e0SSascha Hauer struct sdma_engine *sdma = context; 15015b28aa31SSascha Hauer const struct sdma_firmware_header *header; 15025b28aa31SSascha Hauer const struct sdma_script_start_addrs *addr; 15035b28aa31SSascha Hauer unsigned short *ram_code; 15045b28aa31SSascha Hauer 15057b4b88e0SSascha Hauer if (!fw) { 15060f927a11SSascha Hauer dev_info(sdma->dev, "external firmware not found, using ROM firmware\n"); 15070f927a11SSascha Hauer /* In this case we just use the ROM firmware. */ 15087b4b88e0SSascha Hauer return; 15097b4b88e0SSascha Hauer } 15105b28aa31SSascha Hauer 15115b28aa31SSascha Hauer if (fw->size < sizeof(*header)) 15125b28aa31SSascha Hauer goto err_firmware; 15135b28aa31SSascha Hauer 15145b28aa31SSascha Hauer header = (struct sdma_firmware_header *)fw->data; 15155b28aa31SSascha Hauer 15165b28aa31SSascha Hauer if (header->magic != SDMA_FIRMWARE_MAGIC) 15175b28aa31SSascha Hauer goto err_firmware; 15185b28aa31SSascha Hauer if (header->ram_code_start + header->ram_code_size > fw->size) 15195b28aa31SSascha Hauer goto err_firmware; 1520cd72b846SNicolin Chen switch (header->version_major) { 1521cd72b846SNicolin Chen case 1: 1522cd72b846SNicolin Chen sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; 1523cd72b846SNicolin Chen break; 1524cd72b846SNicolin Chen case 2: 1525cd72b846SNicolin Chen sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2; 1526cd72b846SNicolin Chen break; 1527a572460bSFabio Estevam case 3: 1528a572460bSFabio Estevam sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3; 1529a572460bSFabio Estevam break; 1530b7d2648aSFabio Estevam case 4: 1531b7d2648aSFabio Estevam sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4; 1532b7d2648aSFabio Estevam break; 1533cd72b846SNicolin Chen default: 1534cd72b846SNicolin Chen dev_err(sdma->dev, "unknown firmware version\n"); 1535cd72b846SNicolin Chen goto err_firmware; 1536cd72b846SNicolin Chen } 15375b28aa31SSascha Hauer 15385b28aa31SSascha Hauer addr = (void *)header + header->script_addrs_start; 15395b28aa31SSascha Hauer ram_code = (void *)header + header->ram_code_start; 15405b28aa31SSascha Hauer 15417560e3f3SSascha Hauer clk_enable(sdma->clk_ipg); 15427560e3f3SSascha Hauer clk_enable(sdma->clk_ahb); 15435b28aa31SSascha Hauer /* download the RAM image for SDMA */ 15445b28aa31SSascha Hauer sdma_load_script(sdma, ram_code, 15455b28aa31SSascha Hauer header->ram_code_size, 15466866fd3bSSascha Hauer addr->ram_code_start_addr); 15477560e3f3SSascha Hauer clk_disable(sdma->clk_ipg); 15487560e3f3SSascha Hauer clk_disable(sdma->clk_ahb); 15495b28aa31SSascha Hauer 15505b28aa31SSascha Hauer sdma_add_scripts(sdma, addr); 15515b28aa31SSascha Hauer 15525b28aa31SSascha Hauer dev_info(sdma->dev, "loaded firmware %d.%d\n", 15535b28aa31SSascha Hauer header->version_major, 15545b28aa31SSascha Hauer header->version_minor); 15555b28aa31SSascha Hauer 15565b28aa31SSascha Hauer err_firmware: 15575b28aa31SSascha Hauer release_firmware(fw); 15587b4b88e0SSascha Hauer } 15597b4b88e0SSascha Hauer 1560d078cd1bSZidan Wang #define EVENT_REMAP_CELLS 3 1561d078cd1bSZidan Wang 156229f493daSJason Liu static int sdma_event_remap(struct sdma_engine *sdma) 1563d078cd1bSZidan Wang { 1564d078cd1bSZidan Wang struct device_node *np = sdma->dev->of_node; 1565d078cd1bSZidan Wang struct device_node *gpr_np = of_parse_phandle(np, "gpr", 0); 1566d078cd1bSZidan Wang struct property *event_remap; 1567d078cd1bSZidan Wang struct regmap *gpr; 1568d078cd1bSZidan Wang char propname[] = "fsl,sdma-event-remap"; 1569d078cd1bSZidan Wang u32 reg, val, shift, num_map, i; 1570d078cd1bSZidan Wang int ret = 0; 1571d078cd1bSZidan Wang 1572d078cd1bSZidan Wang if (IS_ERR(np) || IS_ERR(gpr_np)) 1573d078cd1bSZidan Wang goto out; 1574d078cd1bSZidan Wang 1575d078cd1bSZidan Wang event_remap = of_find_property(np, propname, NULL); 1576d078cd1bSZidan Wang num_map = event_remap ? (event_remap->length / sizeof(u32)) : 0; 1577d078cd1bSZidan Wang if (!num_map) { 1578ce078af7SFabio Estevam dev_dbg(sdma->dev, "no event needs to be remapped\n"); 1579d078cd1bSZidan Wang goto out; 1580d078cd1bSZidan Wang } else if (num_map % EVENT_REMAP_CELLS) { 1581d078cd1bSZidan Wang dev_err(sdma->dev, "the property %s must modulo %d\n", 1582d078cd1bSZidan Wang propname, EVENT_REMAP_CELLS); 1583d078cd1bSZidan Wang ret = -EINVAL; 1584d078cd1bSZidan Wang goto out; 1585d078cd1bSZidan Wang } 1586d078cd1bSZidan Wang 1587d078cd1bSZidan Wang gpr = syscon_node_to_regmap(gpr_np); 1588d078cd1bSZidan Wang if (IS_ERR(gpr)) { 1589d078cd1bSZidan Wang dev_err(sdma->dev, "failed to get gpr regmap\n"); 1590d078cd1bSZidan Wang ret = PTR_ERR(gpr); 1591d078cd1bSZidan Wang goto out; 1592d078cd1bSZidan Wang } 1593d078cd1bSZidan Wang 1594d078cd1bSZidan Wang for (i = 0; i < num_map; i += EVENT_REMAP_CELLS) { 1595d078cd1bSZidan Wang ret = of_property_read_u32_index(np, propname, i, ®); 1596d078cd1bSZidan Wang if (ret) { 1597d078cd1bSZidan Wang dev_err(sdma->dev, "failed to read property %s index %d\n", 1598d078cd1bSZidan Wang propname, i); 1599d078cd1bSZidan Wang goto out; 1600d078cd1bSZidan Wang } 1601d078cd1bSZidan Wang 1602d078cd1bSZidan Wang ret = of_property_read_u32_index(np, propname, i + 1, &shift); 1603d078cd1bSZidan Wang if (ret) { 1604d078cd1bSZidan Wang dev_err(sdma->dev, "failed to read property %s index %d\n", 1605d078cd1bSZidan Wang propname, i + 1); 1606d078cd1bSZidan Wang goto out; 1607d078cd1bSZidan Wang } 1608d078cd1bSZidan Wang 1609d078cd1bSZidan Wang ret = of_property_read_u32_index(np, propname, i + 2, &val); 1610d078cd1bSZidan Wang if (ret) { 1611d078cd1bSZidan Wang dev_err(sdma->dev, "failed to read property %s index %d\n", 1612d078cd1bSZidan Wang propname, i + 2); 1613d078cd1bSZidan Wang goto out; 1614d078cd1bSZidan Wang } 1615d078cd1bSZidan Wang 1616d078cd1bSZidan Wang regmap_update_bits(gpr, reg, BIT(shift), val << shift); 1617d078cd1bSZidan Wang } 1618d078cd1bSZidan Wang 1619d078cd1bSZidan Wang out: 1620d078cd1bSZidan Wang if (!IS_ERR(gpr_np)) 1621d078cd1bSZidan Wang of_node_put(gpr_np); 1622d078cd1bSZidan Wang 1623d078cd1bSZidan Wang return ret; 1624d078cd1bSZidan Wang } 1625d078cd1bSZidan Wang 1626fe6cf289SArnd Bergmann static int sdma_get_firmware(struct sdma_engine *sdma, 16277b4b88e0SSascha Hauer const char *fw_name) 16287b4b88e0SSascha Hauer { 16297b4b88e0SSascha Hauer int ret; 16307b4b88e0SSascha Hauer 16317b4b88e0SSascha Hauer ret = request_firmware_nowait(THIS_MODULE, 16327b4b88e0SSascha Hauer FW_ACTION_HOTPLUG, fw_name, sdma->dev, 16337b4b88e0SSascha Hauer GFP_KERNEL, sdma, sdma_load_firmware); 16345b28aa31SSascha Hauer 16355b28aa31SSascha Hauer return ret; 16365b28aa31SSascha Hauer } 16375b28aa31SSascha Hauer 163819bfc772SJingoo Han static int sdma_init(struct sdma_engine *sdma) 16391ec1e82fSSascha Hauer { 16401ec1e82fSSascha Hauer int i, ret; 16411ec1e82fSSascha Hauer dma_addr_t ccb_phys; 16421ec1e82fSSascha Hauer 1643b93edcddSFabio Estevam ret = clk_enable(sdma->clk_ipg); 1644b93edcddSFabio Estevam if (ret) 1645b93edcddSFabio Estevam return ret; 1646b93edcddSFabio Estevam ret = clk_enable(sdma->clk_ahb); 1647b93edcddSFabio Estevam if (ret) 1648b93edcddSFabio Estevam goto disable_clk_ipg; 16491ec1e82fSSascha Hauer 16501ec1e82fSSascha Hauer /* Be sure SDMA has not started yet */ 1651c4b56857SRichard Zhao writel_relaxed(0, sdma->regs + SDMA_H_C0PTR); 16521ec1e82fSSascha Hauer 16531ec1e82fSSascha Hauer sdma->channel_control = dma_alloc_coherent(NULL, 16541ec1e82fSSascha Hauer MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control) + 16551ec1e82fSSascha Hauer sizeof(struct sdma_context_data), 16561ec1e82fSSascha Hauer &ccb_phys, GFP_KERNEL); 16571ec1e82fSSascha Hauer 16581ec1e82fSSascha Hauer if (!sdma->channel_control) { 16591ec1e82fSSascha Hauer ret = -ENOMEM; 16601ec1e82fSSascha Hauer goto err_dma_alloc; 16611ec1e82fSSascha Hauer } 16621ec1e82fSSascha Hauer 16631ec1e82fSSascha Hauer sdma->context = (void *)sdma->channel_control + 16641ec1e82fSSascha Hauer MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control); 16651ec1e82fSSascha Hauer sdma->context_phys = ccb_phys + 16661ec1e82fSSascha Hauer MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control); 16671ec1e82fSSascha Hauer 16681ec1e82fSSascha Hauer /* Zero-out the CCB structures array just allocated */ 16691ec1e82fSSascha Hauer memset(sdma->channel_control, 0, 16701ec1e82fSSascha Hauer MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control)); 16711ec1e82fSSascha Hauer 16721ec1e82fSSascha Hauer /* disable all channels */ 167317bba72fSSascha Hauer for (i = 0; i < sdma->drvdata->num_events; i++) 1674c4b56857SRichard Zhao writel_relaxed(0, sdma->regs + chnenbl_ofs(sdma, i)); 16751ec1e82fSSascha Hauer 16761ec1e82fSSascha Hauer /* All channels have priority 0 */ 16771ec1e82fSSascha Hauer for (i = 0; i < MAX_DMA_CHANNELS; i++) 1678c4b56857SRichard Zhao writel_relaxed(0, sdma->regs + SDMA_CHNPRI_0 + i * 4); 16791ec1e82fSSascha Hauer 16801ec1e82fSSascha Hauer ret = sdma_request_channel(&sdma->channel[0]); 16811ec1e82fSSascha Hauer if (ret) 16821ec1e82fSSascha Hauer goto err_dma_alloc; 16831ec1e82fSSascha Hauer 1684*76c33d27SSascha Hauer sdma->bd0 = sdma->channel[0].desc->bd; 1685*76c33d27SSascha Hauer 16861ec1e82fSSascha Hauer sdma_config_ownership(&sdma->channel[0], false, true, false); 16871ec1e82fSSascha Hauer 16881ec1e82fSSascha Hauer /* Set Command Channel (Channel Zero) */ 1689c4b56857SRichard Zhao writel_relaxed(0x4050, sdma->regs + SDMA_CHN0ADDR); 16901ec1e82fSSascha Hauer 16911ec1e82fSSascha Hauer /* Set bits of CONFIG register but with static context switching */ 16921ec1e82fSSascha Hauer /* FIXME: Check whether to set ACR bit depending on clock ratios */ 1693c4b56857SRichard Zhao writel_relaxed(0, sdma->regs + SDMA_H_CONFIG); 16941ec1e82fSSascha Hauer 1695c4b56857SRichard Zhao writel_relaxed(ccb_phys, sdma->regs + SDMA_H_C0PTR); 16961ec1e82fSSascha Hauer 16971ec1e82fSSascha Hauer /* Initializes channel's priorities */ 16981ec1e82fSSascha Hauer sdma_set_channel_priority(&sdma->channel[0], 7); 16991ec1e82fSSascha Hauer 17007560e3f3SSascha Hauer clk_disable(sdma->clk_ipg); 17017560e3f3SSascha Hauer clk_disable(sdma->clk_ahb); 17021ec1e82fSSascha Hauer 17031ec1e82fSSascha Hauer return 0; 17041ec1e82fSSascha Hauer 17051ec1e82fSSascha Hauer err_dma_alloc: 17067560e3f3SSascha Hauer clk_disable(sdma->clk_ahb); 1707b93edcddSFabio Estevam disable_clk_ipg: 1708b93edcddSFabio Estevam clk_disable(sdma->clk_ipg); 17091ec1e82fSSascha Hauer dev_err(sdma->dev, "initialisation failed with %d\n", ret); 17101ec1e82fSSascha Hauer return ret; 17111ec1e82fSSascha Hauer } 17121ec1e82fSSascha Hauer 17139479e17cSShawn Guo static bool sdma_filter_fn(struct dma_chan *chan, void *fn_param) 17149479e17cSShawn Guo { 17150b351865SNicolin Chen struct sdma_channel *sdmac = to_sdma_chan(chan); 17169479e17cSShawn Guo struct imx_dma_data *data = fn_param; 17179479e17cSShawn Guo 17189479e17cSShawn Guo if (!imx_dma_is_general_purpose(chan)) 17199479e17cSShawn Guo return false; 17209479e17cSShawn Guo 17210b351865SNicolin Chen sdmac->data = *data; 17220b351865SNicolin Chen chan->private = &sdmac->data; 17239479e17cSShawn Guo 17249479e17cSShawn Guo return true; 17259479e17cSShawn Guo } 17269479e17cSShawn Guo 17279479e17cSShawn Guo static struct dma_chan *sdma_xlate(struct of_phandle_args *dma_spec, 17289479e17cSShawn Guo struct of_dma *ofdma) 17299479e17cSShawn Guo { 17309479e17cSShawn Guo struct sdma_engine *sdma = ofdma->of_dma_data; 17319479e17cSShawn Guo dma_cap_mask_t mask = sdma->dma_device.cap_mask; 17329479e17cSShawn Guo struct imx_dma_data data; 17339479e17cSShawn Guo 17349479e17cSShawn Guo if (dma_spec->args_count != 3) 17359479e17cSShawn Guo return NULL; 17369479e17cSShawn Guo 17379479e17cSShawn Guo data.dma_request = dma_spec->args[0]; 17389479e17cSShawn Guo data.peripheral_type = dma_spec->args[1]; 17399479e17cSShawn Guo data.priority = dma_spec->args[2]; 17408391ecf4SShengjiu Wang /* 17418391ecf4SShengjiu Wang * init dma_request2 to zero, which is not used by the dts. 17428391ecf4SShengjiu Wang * For P2P, dma_request2 is init from dma_request_channel(), 17438391ecf4SShengjiu Wang * chan->private will point to the imx_dma_data, and in 17448391ecf4SShengjiu Wang * device_alloc_chan_resources(), imx_dma_data.dma_request2 will 17458391ecf4SShengjiu Wang * be set to sdmac->event_id1. 17468391ecf4SShengjiu Wang */ 17478391ecf4SShengjiu Wang data.dma_request2 = 0; 17489479e17cSShawn Guo 17499479e17cSShawn Guo return dma_request_channel(mask, sdma_filter_fn, &data); 17509479e17cSShawn Guo } 17519479e17cSShawn Guo 1752e34b731fSMark Brown static int sdma_probe(struct platform_device *pdev) 17531ec1e82fSSascha Hauer { 1754580975d7SShawn Guo const struct of_device_id *of_id = 1755580975d7SShawn Guo of_match_device(sdma_dt_ids, &pdev->dev); 1756580975d7SShawn Guo struct device_node *np = pdev->dev.of_node; 17578391ecf4SShengjiu Wang struct device_node *spba_bus; 1758580975d7SShawn Guo const char *fw_name; 17591ec1e82fSSascha Hauer int ret; 17601ec1e82fSSascha Hauer int irq; 17611ec1e82fSSascha Hauer struct resource *iores; 17628391ecf4SShengjiu Wang struct resource spba_res; 1763d4adcc01SJingoo Han struct sdma_platform_data *pdata = dev_get_platdata(&pdev->dev); 17641ec1e82fSSascha Hauer int i; 17651ec1e82fSSascha Hauer struct sdma_engine *sdma; 176636e2f21aSSascha Hauer s32 *saddr_arr; 176717bba72fSSascha Hauer const struct sdma_driver_data *drvdata = NULL; 176817bba72fSSascha Hauer 176917bba72fSSascha Hauer if (of_id) 177017bba72fSSascha Hauer drvdata = of_id->data; 177117bba72fSSascha Hauer else if (pdev->id_entry) 177217bba72fSSascha Hauer drvdata = (void *)pdev->id_entry->driver_data; 177317bba72fSSascha Hauer 177417bba72fSSascha Hauer if (!drvdata) { 177517bba72fSSascha Hauer dev_err(&pdev->dev, "unable to find driver data\n"); 177617bba72fSSascha Hauer return -EINVAL; 177717bba72fSSascha Hauer } 17781ec1e82fSSascha Hauer 177942536b9fSPhilippe Retornaz ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 178042536b9fSPhilippe Retornaz if (ret) 178142536b9fSPhilippe Retornaz return ret; 178242536b9fSPhilippe Retornaz 17837f24e0eeSFabio Estevam sdma = devm_kzalloc(&pdev->dev, sizeof(*sdma), GFP_KERNEL); 17841ec1e82fSSascha Hauer if (!sdma) 17851ec1e82fSSascha Hauer return -ENOMEM; 17861ec1e82fSSascha Hauer 17872ccaef05SRichard Zhao spin_lock_init(&sdma->channel_0_lock); 178873eab978SSascha Hauer 17891ec1e82fSSascha Hauer sdma->dev = &pdev->dev; 179017bba72fSSascha Hauer sdma->drvdata = drvdata; 17911ec1e82fSSascha Hauer 17921ec1e82fSSascha Hauer irq = platform_get_irq(pdev, 0); 17937f24e0eeSFabio Estevam if (irq < 0) 179463c72e02SFabio Estevam return irq; 17951ec1e82fSSascha Hauer 17967f24e0eeSFabio Estevam iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); 17977f24e0eeSFabio Estevam sdma->regs = devm_ioremap_resource(&pdev->dev, iores); 17987f24e0eeSFabio Estevam if (IS_ERR(sdma->regs)) 17997f24e0eeSFabio Estevam return PTR_ERR(sdma->regs); 18001ec1e82fSSascha Hauer 18017560e3f3SSascha Hauer sdma->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); 18027f24e0eeSFabio Estevam if (IS_ERR(sdma->clk_ipg)) 18037f24e0eeSFabio Estevam return PTR_ERR(sdma->clk_ipg); 18041ec1e82fSSascha Hauer 18057560e3f3SSascha Hauer sdma->clk_ahb = devm_clk_get(&pdev->dev, "ahb"); 18067f24e0eeSFabio Estevam if (IS_ERR(sdma->clk_ahb)) 18077f24e0eeSFabio Estevam return PTR_ERR(sdma->clk_ahb); 18087560e3f3SSascha Hauer 1809fb9caf37SArvind Yadav ret = clk_prepare(sdma->clk_ipg); 1810fb9caf37SArvind Yadav if (ret) 1811fb9caf37SArvind Yadav return ret; 1812fb9caf37SArvind Yadav 1813fb9caf37SArvind Yadav ret = clk_prepare(sdma->clk_ahb); 1814fb9caf37SArvind Yadav if (ret) 1815fb9caf37SArvind Yadav goto err_clk; 18167560e3f3SSascha Hauer 18177f24e0eeSFabio Estevam ret = devm_request_irq(&pdev->dev, irq, sdma_int_handler, 0, "sdma", 18187f24e0eeSFabio Estevam sdma); 18191ec1e82fSSascha Hauer if (ret) 1820fb9caf37SArvind Yadav goto err_irq; 18211ec1e82fSSascha Hauer 18225bb9dbb5SVinod Koul sdma->irq = irq; 18235bb9dbb5SVinod Koul 18245b28aa31SSascha Hauer sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL); 1825fb9caf37SArvind Yadav if (!sdma->script_addrs) { 1826fb9caf37SArvind Yadav ret = -ENOMEM; 1827fb9caf37SArvind Yadav goto err_irq; 1828fb9caf37SArvind Yadav } 18291ec1e82fSSascha Hauer 183036e2f21aSSascha Hauer /* initially no scripts available */ 183136e2f21aSSascha Hauer saddr_arr = (s32 *)sdma->script_addrs; 183236e2f21aSSascha Hauer for (i = 0; i < SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; i++) 183336e2f21aSSascha Hauer saddr_arr[i] = -EINVAL; 183436e2f21aSSascha Hauer 18357214a8b1SSascha Hauer dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask); 18367214a8b1SSascha Hauer dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask); 18377214a8b1SSascha Hauer 18381ec1e82fSSascha Hauer INIT_LIST_HEAD(&sdma->dma_device.channels); 18391ec1e82fSSascha Hauer /* Initialize channel parameters */ 18401ec1e82fSSascha Hauer for (i = 0; i < MAX_DMA_CHANNELS; i++) { 18411ec1e82fSSascha Hauer struct sdma_channel *sdmac = &sdma->channel[i]; 18421ec1e82fSSascha Hauer 18431ec1e82fSSascha Hauer sdmac->sdma = sdma; 18441ec1e82fSSascha Hauer spin_lock_init(&sdmac->lock); 18451ec1e82fSSascha Hauer 18461ec1e82fSSascha Hauer sdmac->chan.device = &sdma->dma_device; 18478ac69546SRussell King - ARM Linux dma_cookie_init(&sdmac->chan); 18481ec1e82fSSascha Hauer sdmac->channel = i; 18491ec1e82fSSascha Hauer 185015f30f51SNandor Han tasklet_init(&sdmac->tasklet, mxc_sdma_handle_channel_normal, 1851abd9ccc8SHuang Shijie (unsigned long) sdmac); 185223889c63SSascha Hauer /* 185323889c63SSascha Hauer * Add the channel to the DMAC list. Do not add channel 0 though 185423889c63SSascha Hauer * because we need it internally in the SDMA driver. This also means 185523889c63SSascha Hauer * that channel 0 in dmaengine counting matches sdma channel 1. 185623889c63SSascha Hauer */ 185723889c63SSascha Hauer if (i) 185823889c63SSascha Hauer list_add_tail(&sdmac->chan.device_node, 185923889c63SSascha Hauer &sdma->dma_device.channels); 18601ec1e82fSSascha Hauer } 18611ec1e82fSSascha Hauer 18625b28aa31SSascha Hauer ret = sdma_init(sdma); 18631ec1e82fSSascha Hauer if (ret) 18641ec1e82fSSascha Hauer goto err_init; 18651ec1e82fSSascha Hauer 1866d078cd1bSZidan Wang ret = sdma_event_remap(sdma); 1867d078cd1bSZidan Wang if (ret) 1868d078cd1bSZidan Wang goto err_init; 1869d078cd1bSZidan Wang 1870dcfec3c0SSascha Hauer if (sdma->drvdata->script_addrs) 1871dcfec3c0SSascha Hauer sdma_add_scripts(sdma, sdma->drvdata->script_addrs); 1872580975d7SShawn Guo if (pdata && pdata->script_addrs) 18735b28aa31SSascha Hauer sdma_add_scripts(sdma, pdata->script_addrs); 18745b28aa31SSascha Hauer 1875580975d7SShawn Guo if (pdata) { 18766d0d7e2dSFabio Estevam ret = sdma_get_firmware(sdma, pdata->fw_name); 18776d0d7e2dSFabio Estevam if (ret) 1878ad1122e5SFabio Estevam dev_warn(&pdev->dev, "failed to get firmware from platform data\n"); 1879580975d7SShawn Guo } else { 1880580975d7SShawn Guo /* 1881580975d7SShawn Guo * Because that device tree does not encode ROM script address, 1882580975d7SShawn Guo * the RAM script in firmware is mandatory for device tree 1883580975d7SShawn Guo * probe, otherwise it fails. 1884580975d7SShawn Guo */ 1885580975d7SShawn Guo ret = of_property_read_string(np, "fsl,sdma-ram-script-name", 1886580975d7SShawn Guo &fw_name); 18876602b0ddSFabio Estevam if (ret) 1888ad1122e5SFabio Estevam dev_warn(&pdev->dev, "failed to get firmware name\n"); 18896602b0ddSFabio Estevam else { 1890580975d7SShawn Guo ret = sdma_get_firmware(sdma, fw_name); 18916602b0ddSFabio Estevam if (ret) 1892ad1122e5SFabio Estevam dev_warn(&pdev->dev, "failed to get firmware from device tree\n"); 1893580975d7SShawn Guo } 1894580975d7SShawn Guo } 18955b28aa31SSascha Hauer 18961ec1e82fSSascha Hauer sdma->dma_device.dev = &pdev->dev; 18971ec1e82fSSascha Hauer 18981ec1e82fSSascha Hauer sdma->dma_device.device_alloc_chan_resources = sdma_alloc_chan_resources; 18991ec1e82fSSascha Hauer sdma->dma_device.device_free_chan_resources = sdma_free_chan_resources; 19001ec1e82fSSascha Hauer sdma->dma_device.device_tx_status = sdma_tx_status; 19011ec1e82fSSascha Hauer sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg; 19021ec1e82fSSascha Hauer sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic; 19037b350ab0SMaxime Ripard sdma->dma_device.device_config = sdma_config; 19047f3ff14bSJiada Wang sdma->dma_device.device_terminate_all = sdma_disable_channel_with_delay; 1905f9d4a398SNicolin Chen sdma->dma_device.src_addr_widths = SDMA_DMA_BUSWIDTHS; 1906f9d4a398SNicolin Chen sdma->dma_device.dst_addr_widths = SDMA_DMA_BUSWIDTHS; 1907f9d4a398SNicolin Chen sdma->dma_device.directions = SDMA_DMA_DIRECTIONS; 19086f3125ceSLucas Stach sdma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; 19091ec1e82fSSascha Hauer sdma->dma_device.device_issue_pending = sdma_issue_pending; 1910b9b3f82fSSascha Hauer sdma->dma_device.dev->dma_parms = &sdma->dma_parms; 1911b9b3f82fSSascha Hauer dma_set_max_seg_size(sdma->dma_device.dev, 65535); 19121ec1e82fSSascha Hauer 191323e11811SVignesh Raman platform_set_drvdata(pdev, sdma); 191423e11811SVignesh Raman 19151ec1e82fSSascha Hauer ret = dma_async_device_register(&sdma->dma_device); 19161ec1e82fSSascha Hauer if (ret) { 19171ec1e82fSSascha Hauer dev_err(&pdev->dev, "unable to register\n"); 19181ec1e82fSSascha Hauer goto err_init; 19191ec1e82fSSascha Hauer } 19201ec1e82fSSascha Hauer 19219479e17cSShawn Guo if (np) { 19229479e17cSShawn Guo ret = of_dma_controller_register(np, sdma_xlate, sdma); 19239479e17cSShawn Guo if (ret) { 19249479e17cSShawn Guo dev_err(&pdev->dev, "failed to register controller\n"); 19259479e17cSShawn Guo goto err_register; 19269479e17cSShawn Guo } 19278391ecf4SShengjiu Wang 19288391ecf4SShengjiu Wang spba_bus = of_find_compatible_node(NULL, NULL, "fsl,spba-bus"); 19298391ecf4SShengjiu Wang ret = of_address_to_resource(spba_bus, 0, &spba_res); 19308391ecf4SShengjiu Wang if (!ret) { 19318391ecf4SShengjiu Wang sdma->spba_start_addr = spba_res.start; 19328391ecf4SShengjiu Wang sdma->spba_end_addr = spba_res.end; 19338391ecf4SShengjiu Wang } 19348391ecf4SShengjiu Wang of_node_put(spba_bus); 19359479e17cSShawn Guo } 19369479e17cSShawn Guo 19371ec1e82fSSascha Hauer return 0; 19381ec1e82fSSascha Hauer 19399479e17cSShawn Guo err_register: 19409479e17cSShawn Guo dma_async_device_unregister(&sdma->dma_device); 19411ec1e82fSSascha Hauer err_init: 19421ec1e82fSSascha Hauer kfree(sdma->script_addrs); 1943fb9caf37SArvind Yadav err_irq: 1944fb9caf37SArvind Yadav clk_unprepare(sdma->clk_ahb); 1945fb9caf37SArvind Yadav err_clk: 1946fb9caf37SArvind Yadav clk_unprepare(sdma->clk_ipg); 1947939fd4f0SShawn Guo return ret; 19481ec1e82fSSascha Hauer } 19491ec1e82fSSascha Hauer 19501d1bbd30SMaxin B. John static int sdma_remove(struct platform_device *pdev) 19511ec1e82fSSascha Hauer { 195223e11811SVignesh Raman struct sdma_engine *sdma = platform_get_drvdata(pdev); 1953c12fe497SVignesh Raman int i; 195423e11811SVignesh Raman 19555bb9dbb5SVinod Koul devm_free_irq(&pdev->dev, sdma->irq, sdma); 195623e11811SVignesh Raman dma_async_device_unregister(&sdma->dma_device); 195723e11811SVignesh Raman kfree(sdma->script_addrs); 1958fb9caf37SArvind Yadav clk_unprepare(sdma->clk_ahb); 1959fb9caf37SArvind Yadav clk_unprepare(sdma->clk_ipg); 1960c12fe497SVignesh Raman /* Kill the tasklet */ 1961c12fe497SVignesh Raman for (i = 0; i < MAX_DMA_CHANNELS; i++) { 1962c12fe497SVignesh Raman struct sdma_channel *sdmac = &sdma->channel[i]; 1963c12fe497SVignesh Raman 1964c12fe497SVignesh Raman tasklet_kill(&sdmac->tasklet); 1965c12fe497SVignesh Raman } 196623e11811SVignesh Raman 196723e11811SVignesh Raman platform_set_drvdata(pdev, NULL); 196823e11811SVignesh Raman return 0; 19691ec1e82fSSascha Hauer } 19701ec1e82fSSascha Hauer 19711ec1e82fSSascha Hauer static struct platform_driver sdma_driver = { 19721ec1e82fSSascha Hauer .driver = { 19731ec1e82fSSascha Hauer .name = "imx-sdma", 1974580975d7SShawn Guo .of_match_table = sdma_dt_ids, 19751ec1e82fSSascha Hauer }, 197662550cd7SShawn Guo .id_table = sdma_devtypes, 19771d1bbd30SMaxin B. John .remove = sdma_remove, 197823e11811SVignesh Raman .probe = sdma_probe, 19791ec1e82fSSascha Hauer }; 19801ec1e82fSSascha Hauer 198123e11811SVignesh Raman module_platform_driver(sdma_driver); 19821ec1e82fSSascha Hauer 19831ec1e82fSSascha Hauer MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>"); 19841ec1e82fSSascha Hauer MODULE_DESCRIPTION("i.MX SDMA driver"); 1985c0879342SNicolas Chauvet #if IS_ENABLED(CONFIG_SOC_IMX6Q) 1986c0879342SNicolas Chauvet MODULE_FIRMWARE("imx/sdma/sdma-imx6q.bin"); 1987c0879342SNicolas Chauvet #endif 1988c0879342SNicolas Chauvet #if IS_ENABLED(CONFIG_SOC_IMX7D) 1989c0879342SNicolas Chauvet MODULE_FIRMWARE("imx/sdma/sdma-imx7d.bin"); 1990c0879342SNicolas Chauvet #endif 19911ec1e82fSSascha Hauer MODULE_LICENSE("GPL"); 1992