1c01faacaSFabio Estevam // SPDX-License-Identifier: GPL-2.0+ 2c01faacaSFabio Estevam // 3c01faacaSFabio Estevam // drivers/dma/imx-sdma.c 4c01faacaSFabio Estevam // 5c01faacaSFabio Estevam // This file contains a driver for the Freescale Smart DMA engine 6c01faacaSFabio Estevam // 7c01faacaSFabio Estevam // Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de> 8c01faacaSFabio Estevam // 9c01faacaSFabio Estevam // Based on code from Freescale: 10c01faacaSFabio Estevam // 11c01faacaSFabio Estevam // Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved. 121ec1e82fSSascha Hauer 131ec1e82fSSascha Hauer #include <linux/init.h> 141d069bfaSMichael Olbrich #include <linux/iopoll.h> 15f8de8f4cSAxel Lin #include <linux/module.h> 161ec1e82fSSascha Hauer #include <linux/types.h> 170bbc1413SRichard Zhao #include <linux/bitops.h> 181ec1e82fSSascha Hauer #include <linux/mm.h> 191ec1e82fSSascha Hauer #include <linux/interrupt.h> 201ec1e82fSSascha Hauer #include <linux/clk.h> 212ccaef05SRichard Zhao #include <linux/delay.h> 221ec1e82fSSascha Hauer #include <linux/sched.h> 231ec1e82fSSascha Hauer #include <linux/semaphore.h> 241ec1e82fSSascha Hauer #include <linux/spinlock.h> 251ec1e82fSSascha Hauer #include <linux/device.h> 261ec1e82fSSascha Hauer #include <linux/dma-mapping.h> 271ec1e82fSSascha Hauer #include <linux/firmware.h> 281ec1e82fSSascha Hauer #include <linux/slab.h> 291ec1e82fSSascha Hauer #include <linux/platform_device.h> 301ec1e82fSSascha Hauer #include <linux/dmaengine.h> 31580975d7SShawn Guo #include <linux/of.h> 328391ecf4SShengjiu Wang #include <linux/of_address.h> 33580975d7SShawn Guo #include <linux/of_device.h> 349479e17cSShawn Guo #include <linux/of_dma.h> 351ec1e82fSSascha Hauer 361ec1e82fSSascha Hauer #include <asm/irq.h> 3782906b13SArnd Bergmann #include <linux/platform_data/dma-imx-sdma.h> 3882906b13SArnd Bergmann #include <linux/platform_data/dma-imx.h> 39d078cd1bSZidan Wang #include <linux/regmap.h> 40d078cd1bSZidan Wang #include <linux/mfd/syscon.h> 41d078cd1bSZidan Wang #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> 421ec1e82fSSascha Hauer 43d2ebfb33SRussell King - ARM Linux #include "dmaengine.h" 44*57b772b8SRobin Gong #include "virt-dma.h" 45d2ebfb33SRussell King - ARM Linux 461ec1e82fSSascha Hauer /* SDMA registers */ 471ec1e82fSSascha Hauer #define SDMA_H_C0PTR 0x000 481ec1e82fSSascha Hauer #define SDMA_H_INTR 0x004 491ec1e82fSSascha Hauer #define SDMA_H_STATSTOP 0x008 501ec1e82fSSascha Hauer #define SDMA_H_START 0x00c 511ec1e82fSSascha Hauer #define SDMA_H_EVTOVR 0x010 521ec1e82fSSascha Hauer #define SDMA_H_DSPOVR 0x014 531ec1e82fSSascha Hauer #define SDMA_H_HOSTOVR 0x018 541ec1e82fSSascha Hauer #define SDMA_H_EVTPEND 0x01c 551ec1e82fSSascha Hauer #define SDMA_H_DSPENBL 0x020 561ec1e82fSSascha Hauer #define SDMA_H_RESET 0x024 571ec1e82fSSascha Hauer #define SDMA_H_EVTERR 0x028 581ec1e82fSSascha Hauer #define SDMA_H_INTRMSK 0x02c 591ec1e82fSSascha Hauer #define SDMA_H_PSW 0x030 601ec1e82fSSascha Hauer #define SDMA_H_EVTERRDBG 0x034 611ec1e82fSSascha Hauer #define SDMA_H_CONFIG 0x038 621ec1e82fSSascha Hauer #define SDMA_ONCE_ENB 0x040 631ec1e82fSSascha Hauer #define SDMA_ONCE_DATA 0x044 641ec1e82fSSascha Hauer #define SDMA_ONCE_INSTR 0x048 651ec1e82fSSascha Hauer #define SDMA_ONCE_STAT 0x04c 661ec1e82fSSascha Hauer #define SDMA_ONCE_CMD 0x050 671ec1e82fSSascha Hauer #define SDMA_EVT_MIRROR 0x054 681ec1e82fSSascha Hauer #define SDMA_ILLINSTADDR 0x058 691ec1e82fSSascha Hauer #define SDMA_CHN0ADDR 0x05c 701ec1e82fSSascha Hauer #define SDMA_ONCE_RTB 0x060 711ec1e82fSSascha Hauer #define SDMA_XTRIG_CONF1 0x070 721ec1e82fSSascha Hauer #define SDMA_XTRIG_CONF2 0x074 7362550cd7SShawn Guo #define SDMA_CHNENBL0_IMX35 0x200 7462550cd7SShawn Guo #define SDMA_CHNENBL0_IMX31 0x080 751ec1e82fSSascha Hauer #define SDMA_CHNPRI_0 0x100 761ec1e82fSSascha Hauer 771ec1e82fSSascha Hauer /* 781ec1e82fSSascha Hauer * Buffer descriptor status values. 791ec1e82fSSascha Hauer */ 801ec1e82fSSascha Hauer #define BD_DONE 0x01 811ec1e82fSSascha Hauer #define BD_WRAP 0x02 821ec1e82fSSascha Hauer #define BD_CONT 0x04 831ec1e82fSSascha Hauer #define BD_INTR 0x08 841ec1e82fSSascha Hauer #define BD_RROR 0x10 851ec1e82fSSascha Hauer #define BD_LAST 0x20 861ec1e82fSSascha Hauer #define BD_EXTD 0x80 871ec1e82fSSascha Hauer 881ec1e82fSSascha Hauer /* 891ec1e82fSSascha Hauer * Data Node descriptor status values. 901ec1e82fSSascha Hauer */ 911ec1e82fSSascha Hauer #define DND_END_OF_FRAME 0x80 921ec1e82fSSascha Hauer #define DND_END_OF_XFER 0x40 931ec1e82fSSascha Hauer #define DND_DONE 0x20 941ec1e82fSSascha Hauer #define DND_UNUSED 0x01 951ec1e82fSSascha Hauer 961ec1e82fSSascha Hauer /* 971ec1e82fSSascha Hauer * IPCV2 descriptor status values. 981ec1e82fSSascha Hauer */ 991ec1e82fSSascha Hauer #define BD_IPCV2_END_OF_FRAME 0x40 1001ec1e82fSSascha Hauer 1011ec1e82fSSascha Hauer #define IPCV2_MAX_NODES 50 1021ec1e82fSSascha Hauer /* 1031ec1e82fSSascha Hauer * Error bit set in the CCB status field by the SDMA, 1041ec1e82fSSascha Hauer * in setbd routine, in case of a transfer error 1051ec1e82fSSascha Hauer */ 1061ec1e82fSSascha Hauer #define DATA_ERROR 0x10000000 1071ec1e82fSSascha Hauer 1081ec1e82fSSascha Hauer /* 1091ec1e82fSSascha Hauer * Buffer descriptor commands. 1101ec1e82fSSascha Hauer */ 1111ec1e82fSSascha Hauer #define C0_ADDR 0x01 1121ec1e82fSSascha Hauer #define C0_LOAD 0x02 1131ec1e82fSSascha Hauer #define C0_DUMP 0x03 1141ec1e82fSSascha Hauer #define C0_SETCTX 0x07 1151ec1e82fSSascha Hauer #define C0_GETCTX 0x03 1161ec1e82fSSascha Hauer #define C0_SETDM 0x01 1171ec1e82fSSascha Hauer #define C0_SETPM 0x04 1181ec1e82fSSascha Hauer #define C0_GETDM 0x02 1191ec1e82fSSascha Hauer #define C0_GETPM 0x08 1201ec1e82fSSascha Hauer /* 1211ec1e82fSSascha Hauer * Change endianness indicator in the BD command field 1221ec1e82fSSascha Hauer */ 1231ec1e82fSSascha Hauer #define CHANGE_ENDIANNESS 0x80 1241ec1e82fSSascha Hauer 1251ec1e82fSSascha Hauer /* 1268391ecf4SShengjiu Wang * p_2_p watermark_level description 1278391ecf4SShengjiu Wang * Bits Name Description 1288391ecf4SShengjiu Wang * 0-7 Lower WML Lower watermark level 1298391ecf4SShengjiu Wang * 8 PS 1: Pad Swallowing 1308391ecf4SShengjiu Wang * 0: No Pad Swallowing 1318391ecf4SShengjiu Wang * 9 PA 1: Pad Adding 1328391ecf4SShengjiu Wang * 0: No Pad Adding 1338391ecf4SShengjiu Wang * 10 SPDIF If this bit is set both source 1348391ecf4SShengjiu Wang * and destination are on SPBA 1358391ecf4SShengjiu Wang * 11 Source Bit(SP) 1: Source on SPBA 1368391ecf4SShengjiu Wang * 0: Source on AIPS 1378391ecf4SShengjiu Wang * 12 Destination Bit(DP) 1: Destination on SPBA 1388391ecf4SShengjiu Wang * 0: Destination on AIPS 1398391ecf4SShengjiu Wang * 13-15 --------- MUST BE 0 1408391ecf4SShengjiu Wang * 16-23 Higher WML HWML 1418391ecf4SShengjiu Wang * 24-27 N Total number of samples after 1428391ecf4SShengjiu Wang * which Pad adding/Swallowing 1438391ecf4SShengjiu Wang * must be done. It must be odd. 1448391ecf4SShengjiu Wang * 28 Lower WML Event(LWE) SDMA events reg to check for 1458391ecf4SShengjiu Wang * LWML event mask 1468391ecf4SShengjiu Wang * 0: LWE in EVENTS register 1478391ecf4SShengjiu Wang * 1: LWE in EVENTS2 register 1488391ecf4SShengjiu Wang * 29 Higher WML Event(HWE) SDMA events reg to check for 1498391ecf4SShengjiu Wang * HWML event mask 1508391ecf4SShengjiu Wang * 0: HWE in EVENTS register 1518391ecf4SShengjiu Wang * 1: HWE in EVENTS2 register 1528391ecf4SShengjiu Wang * 30 --------- MUST BE 0 1538391ecf4SShengjiu Wang * 31 CONT 1: Amount of samples to be 1548391ecf4SShengjiu Wang * transferred is unknown and 1558391ecf4SShengjiu Wang * script will keep on 1568391ecf4SShengjiu Wang * transferring samples as long as 1578391ecf4SShengjiu Wang * both events are detected and 1588391ecf4SShengjiu Wang * script must be manually stopped 1598391ecf4SShengjiu Wang * by the application 1608391ecf4SShengjiu Wang * 0: The amount of samples to be 1618391ecf4SShengjiu Wang * transferred is equal to the 1628391ecf4SShengjiu Wang * count field of mode word 1638391ecf4SShengjiu Wang */ 1648391ecf4SShengjiu Wang #define SDMA_WATERMARK_LEVEL_LWML 0xFF 1658391ecf4SShengjiu Wang #define SDMA_WATERMARK_LEVEL_PS BIT(8) 1668391ecf4SShengjiu Wang #define SDMA_WATERMARK_LEVEL_PA BIT(9) 1678391ecf4SShengjiu Wang #define SDMA_WATERMARK_LEVEL_SPDIF BIT(10) 1688391ecf4SShengjiu Wang #define SDMA_WATERMARK_LEVEL_SP BIT(11) 1698391ecf4SShengjiu Wang #define SDMA_WATERMARK_LEVEL_DP BIT(12) 1708391ecf4SShengjiu Wang #define SDMA_WATERMARK_LEVEL_HWML (0xFF << 16) 1718391ecf4SShengjiu Wang #define SDMA_WATERMARK_LEVEL_LWE BIT(28) 1728391ecf4SShengjiu Wang #define SDMA_WATERMARK_LEVEL_HWE BIT(29) 1738391ecf4SShengjiu Wang #define SDMA_WATERMARK_LEVEL_CONT BIT(31) 1748391ecf4SShengjiu Wang 175f9d4a398SNicolin Chen #define SDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ 176f9d4a398SNicolin Chen BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ 177f9d4a398SNicolin Chen BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) 178f9d4a398SNicolin Chen 179f9d4a398SNicolin Chen #define SDMA_DMA_DIRECTIONS (BIT(DMA_DEV_TO_MEM) | \ 180f9d4a398SNicolin Chen BIT(DMA_MEM_TO_DEV) | \ 181f9d4a398SNicolin Chen BIT(DMA_DEV_TO_DEV)) 182f9d4a398SNicolin Chen 1838391ecf4SShengjiu Wang /* 1841ec1e82fSSascha Hauer * Mode/Count of data node descriptors - IPCv2 1851ec1e82fSSascha Hauer */ 1861ec1e82fSSascha Hauer struct sdma_mode_count { 1871ec1e82fSSascha Hauer u32 count : 16; /* size of the buffer pointed by this BD */ 1881ec1e82fSSascha Hauer u32 status : 8; /* E,R,I,C,W,D status bits stored here */ 189e4b75760SMartin Kaiser u32 command : 8; /* command mostly used for channel 0 */ 1901ec1e82fSSascha Hauer }; 1911ec1e82fSSascha Hauer 1921ec1e82fSSascha Hauer /* 1931ec1e82fSSascha Hauer * Buffer descriptor 1941ec1e82fSSascha Hauer */ 1951ec1e82fSSascha Hauer struct sdma_buffer_descriptor { 1961ec1e82fSSascha Hauer struct sdma_mode_count mode; 1971ec1e82fSSascha Hauer u32 buffer_addr; /* address of the buffer described */ 1981ec1e82fSSascha Hauer u32 ext_buffer_addr; /* extended buffer address */ 1991ec1e82fSSascha Hauer } __attribute__ ((packed)); 2001ec1e82fSSascha Hauer 2011ec1e82fSSascha Hauer /** 2021ec1e82fSSascha Hauer * struct sdma_channel_control - Channel control Block 2031ec1e82fSSascha Hauer * 2041ec1e82fSSascha Hauer * @current_bd_ptr current buffer descriptor processed 2051ec1e82fSSascha Hauer * @base_bd_ptr first element of buffer descriptor array 2061ec1e82fSSascha Hauer * @unused padding. The SDMA engine expects an array of 128 byte 2071ec1e82fSSascha Hauer * control blocks 2081ec1e82fSSascha Hauer */ 2091ec1e82fSSascha Hauer struct sdma_channel_control { 2101ec1e82fSSascha Hauer u32 current_bd_ptr; 2111ec1e82fSSascha Hauer u32 base_bd_ptr; 2121ec1e82fSSascha Hauer u32 unused[2]; 2131ec1e82fSSascha Hauer } __attribute__ ((packed)); 2141ec1e82fSSascha Hauer 2151ec1e82fSSascha Hauer /** 2161ec1e82fSSascha Hauer * struct sdma_state_registers - SDMA context for a channel 2171ec1e82fSSascha Hauer * 2181ec1e82fSSascha Hauer * @pc: program counter 2191ec1e82fSSascha Hauer * @t: test bit: status of arithmetic & test instruction 2201ec1e82fSSascha Hauer * @rpc: return program counter 2211ec1e82fSSascha Hauer * @sf: source fault while loading data 2221ec1e82fSSascha Hauer * @spc: loop start program counter 2231ec1e82fSSascha Hauer * @df: destination fault while storing data 2241ec1e82fSSascha Hauer * @epc: loop end program counter 2251ec1e82fSSascha Hauer * @lm: loop mode 2261ec1e82fSSascha Hauer */ 2271ec1e82fSSascha Hauer struct sdma_state_registers { 2281ec1e82fSSascha Hauer u32 pc :14; 2291ec1e82fSSascha Hauer u32 unused1: 1; 2301ec1e82fSSascha Hauer u32 t : 1; 2311ec1e82fSSascha Hauer u32 rpc :14; 2321ec1e82fSSascha Hauer u32 unused0: 1; 2331ec1e82fSSascha Hauer u32 sf : 1; 2341ec1e82fSSascha Hauer u32 spc :14; 2351ec1e82fSSascha Hauer u32 unused2: 1; 2361ec1e82fSSascha Hauer u32 df : 1; 2371ec1e82fSSascha Hauer u32 epc :14; 2381ec1e82fSSascha Hauer u32 lm : 2; 2391ec1e82fSSascha Hauer } __attribute__ ((packed)); 2401ec1e82fSSascha Hauer 2411ec1e82fSSascha Hauer /** 2421ec1e82fSSascha Hauer * struct sdma_context_data - sdma context specific to a channel 2431ec1e82fSSascha Hauer * 2441ec1e82fSSascha Hauer * @channel_state: channel state bits 2451ec1e82fSSascha Hauer * @gReg: general registers 2461ec1e82fSSascha Hauer * @mda: burst dma destination address register 2471ec1e82fSSascha Hauer * @msa: burst dma source address register 2481ec1e82fSSascha Hauer * @ms: burst dma status register 2491ec1e82fSSascha Hauer * @md: burst dma data register 2501ec1e82fSSascha Hauer * @pda: peripheral dma destination address register 2511ec1e82fSSascha Hauer * @psa: peripheral dma source address register 2521ec1e82fSSascha Hauer * @ps: peripheral dma status register 2531ec1e82fSSascha Hauer * @pd: peripheral dma data register 2541ec1e82fSSascha Hauer * @ca: CRC polynomial register 2551ec1e82fSSascha Hauer * @cs: CRC accumulator register 2561ec1e82fSSascha Hauer * @dda: dedicated core destination address register 2571ec1e82fSSascha Hauer * @dsa: dedicated core source address register 2581ec1e82fSSascha Hauer * @ds: dedicated core status register 2591ec1e82fSSascha Hauer * @dd: dedicated core data register 2601ec1e82fSSascha Hauer */ 2611ec1e82fSSascha Hauer struct sdma_context_data { 2621ec1e82fSSascha Hauer struct sdma_state_registers channel_state; 2631ec1e82fSSascha Hauer u32 gReg[8]; 2641ec1e82fSSascha Hauer u32 mda; 2651ec1e82fSSascha Hauer u32 msa; 2661ec1e82fSSascha Hauer u32 ms; 2671ec1e82fSSascha Hauer u32 md; 2681ec1e82fSSascha Hauer u32 pda; 2691ec1e82fSSascha Hauer u32 psa; 2701ec1e82fSSascha Hauer u32 ps; 2711ec1e82fSSascha Hauer u32 pd; 2721ec1e82fSSascha Hauer u32 ca; 2731ec1e82fSSascha Hauer u32 cs; 2741ec1e82fSSascha Hauer u32 dda; 2751ec1e82fSSascha Hauer u32 dsa; 2761ec1e82fSSascha Hauer u32 ds; 2771ec1e82fSSascha Hauer u32 dd; 2781ec1e82fSSascha Hauer u32 scratch0; 2791ec1e82fSSascha Hauer u32 scratch1; 2801ec1e82fSSascha Hauer u32 scratch2; 2811ec1e82fSSascha Hauer u32 scratch3; 2821ec1e82fSSascha Hauer u32 scratch4; 2831ec1e82fSSascha Hauer u32 scratch5; 2841ec1e82fSSascha Hauer u32 scratch6; 2851ec1e82fSSascha Hauer u32 scratch7; 2861ec1e82fSSascha Hauer } __attribute__ ((packed)); 2871ec1e82fSSascha Hauer 2881ec1e82fSSascha Hauer #define NUM_BD (int)(PAGE_SIZE / sizeof(struct sdma_buffer_descriptor)) 2891ec1e82fSSascha Hauer 2901ec1e82fSSascha Hauer struct sdma_engine; 2911ec1e82fSSascha Hauer 2921ec1e82fSSascha Hauer /** 29376c33d27SSascha Hauer * struct sdma_desc - descriptor structor for one transfer 29476c33d27SSascha Hauer * @vd descriptor for virt dma 29576c33d27SSascha Hauer * @num_bd max NUM_BD. number of descriptors currently handling 29676c33d27SSascha Hauer * @buf_tail ID of the buffer that was processed 29776c33d27SSascha Hauer * @buf_ptail ID of the previous buffer that was processed 29876c33d27SSascha Hauer * @period_len period length, used in cyclic. 29976c33d27SSascha Hauer * @chn_real_count the real count updated from bd->mode.count 30076c33d27SSascha Hauer * @chn_count the transfer count setuped 30176c33d27SSascha Hauer * @sdmac sdma_channel pointer 30276c33d27SSascha Hauer * @bd pointer of alloced bd 30376c33d27SSascha Hauer */ 30476c33d27SSascha Hauer struct sdma_desc { 305*57b772b8SRobin Gong struct virt_dma_desc vd; 30676c33d27SSascha Hauer unsigned int num_bd; 30776c33d27SSascha Hauer dma_addr_t bd_phys; 30876c33d27SSascha Hauer unsigned int buf_tail; 30976c33d27SSascha Hauer unsigned int buf_ptail; 31076c33d27SSascha Hauer unsigned int period_len; 31176c33d27SSascha Hauer unsigned int chn_real_count; 31276c33d27SSascha Hauer unsigned int chn_count; 31376c33d27SSascha Hauer struct sdma_channel *sdmac; 31476c33d27SSascha Hauer struct sdma_buffer_descriptor *bd; 31576c33d27SSascha Hauer }; 31676c33d27SSascha Hauer 31776c33d27SSascha Hauer /** 3181ec1e82fSSascha Hauer * struct sdma_channel - housekeeping for a SDMA channel 3191ec1e82fSSascha Hauer * 3201ec1e82fSSascha Hauer * @sdma pointer to the SDMA engine for this channel 32123889c63SSascha Hauer * @channel the channel number, matches dmaengine chan_id + 1 3221ec1e82fSSascha Hauer * @direction transfer type. Needed for setting SDMA script 3231ec1e82fSSascha Hauer * @peripheral_type Peripheral type. Needed for setting SDMA script 3241ec1e82fSSascha Hauer * @event_id0 aka dma request line 3251ec1e82fSSascha Hauer * @event_id1 for channels that use 2 events 3261ec1e82fSSascha Hauer * @word_size peripheral access size 3271ec1e82fSSascha Hauer */ 3281ec1e82fSSascha Hauer struct sdma_channel { 329*57b772b8SRobin Gong struct virt_dma_chan vc; 33076c33d27SSascha Hauer struct sdma_desc *desc; 3311ec1e82fSSascha Hauer struct sdma_engine *sdma; 3321ec1e82fSSascha Hauer unsigned int channel; 333db8196dfSVinod Koul enum dma_transfer_direction direction; 3341ec1e82fSSascha Hauer enum sdma_peripheral_type peripheral_type; 3351ec1e82fSSascha Hauer unsigned int event_id0; 3361ec1e82fSSascha Hauer unsigned int event_id1; 3371ec1e82fSSascha Hauer enum dma_slave_buswidth word_size; 3381ec1e82fSSascha Hauer unsigned int pc_from_device, pc_to_device; 3398391ecf4SShengjiu Wang unsigned int device_to_device; 3401ec1e82fSSascha Hauer unsigned long flags; 3418391ecf4SShengjiu Wang dma_addr_t per_address, per_address2; 3420bbc1413SRichard Zhao unsigned long event_mask[2]; 3430bbc1413SRichard Zhao unsigned long watermark_level; 3441ec1e82fSSascha Hauer u32 shp_addr, per_addr; 3451ec1e82fSSascha Hauer spinlock_t lock; 3461ec1e82fSSascha Hauer enum dma_status status; 3470b351865SNicolin Chen struct imx_dma_data data; 3482746e2c3SThierry Bultel bool enabled; 3491ec1e82fSSascha Hauer }; 3501ec1e82fSSascha Hauer 3510bbc1413SRichard Zhao #define IMX_DMA_SG_LOOP BIT(0) 3521ec1e82fSSascha Hauer 3531ec1e82fSSascha Hauer #define MAX_DMA_CHANNELS 32 3541ec1e82fSSascha Hauer #define MXC_SDMA_DEFAULT_PRIORITY 1 3551ec1e82fSSascha Hauer #define MXC_SDMA_MIN_PRIORITY 1 3561ec1e82fSSascha Hauer #define MXC_SDMA_MAX_PRIORITY 7 3571ec1e82fSSascha Hauer 3581ec1e82fSSascha Hauer #define SDMA_FIRMWARE_MAGIC 0x414d4453 3591ec1e82fSSascha Hauer 3601ec1e82fSSascha Hauer /** 3611ec1e82fSSascha Hauer * struct sdma_firmware_header - Layout of the firmware image 3621ec1e82fSSascha Hauer * 3631ec1e82fSSascha Hauer * @magic "SDMA" 3641ec1e82fSSascha Hauer * @version_major increased whenever layout of struct sdma_script_start_addrs 3651ec1e82fSSascha Hauer * changes. 3661ec1e82fSSascha Hauer * @version_minor firmware minor version (for binary compatible changes) 3671ec1e82fSSascha Hauer * @script_addrs_start offset of struct sdma_script_start_addrs in this image 3681ec1e82fSSascha Hauer * @num_script_addrs Number of script addresses in this image 3691ec1e82fSSascha Hauer * @ram_code_start offset of SDMA ram image in this firmware image 3701ec1e82fSSascha Hauer * @ram_code_size size of SDMA ram image 3711ec1e82fSSascha Hauer * @script_addrs Stores the start address of the SDMA scripts 3721ec1e82fSSascha Hauer * (in SDMA memory space) 3731ec1e82fSSascha Hauer */ 3741ec1e82fSSascha Hauer struct sdma_firmware_header { 3751ec1e82fSSascha Hauer u32 magic; 3761ec1e82fSSascha Hauer u32 version_major; 3771ec1e82fSSascha Hauer u32 version_minor; 3781ec1e82fSSascha Hauer u32 script_addrs_start; 3791ec1e82fSSascha Hauer u32 num_script_addrs; 3801ec1e82fSSascha Hauer u32 ram_code_start; 3811ec1e82fSSascha Hauer u32 ram_code_size; 3821ec1e82fSSascha Hauer }; 3831ec1e82fSSascha Hauer 38417bba72fSSascha Hauer struct sdma_driver_data { 38517bba72fSSascha Hauer int chnenbl0; 38617bba72fSSascha Hauer int num_events; 387dcfec3c0SSascha Hauer struct sdma_script_start_addrs *script_addrs; 38862550cd7SShawn Guo }; 38962550cd7SShawn Guo 3901ec1e82fSSascha Hauer struct sdma_engine { 3911ec1e82fSSascha Hauer struct device *dev; 392b9b3f82fSSascha Hauer struct device_dma_parameters dma_parms; 3931ec1e82fSSascha Hauer struct sdma_channel channel[MAX_DMA_CHANNELS]; 3941ec1e82fSSascha Hauer struct sdma_channel_control *channel_control; 3951ec1e82fSSascha Hauer void __iomem *regs; 3961ec1e82fSSascha Hauer struct sdma_context_data *context; 3971ec1e82fSSascha Hauer dma_addr_t context_phys; 3981ec1e82fSSascha Hauer struct dma_device dma_device; 3997560e3f3SSascha Hauer struct clk *clk_ipg; 4007560e3f3SSascha Hauer struct clk *clk_ahb; 4012ccaef05SRichard Zhao spinlock_t channel_0_lock; 402cd72b846SNicolin Chen u32 script_number; 4031ec1e82fSSascha Hauer struct sdma_script_start_addrs *script_addrs; 40417bba72fSSascha Hauer const struct sdma_driver_data *drvdata; 4058391ecf4SShengjiu Wang u32 spba_start_addr; 4068391ecf4SShengjiu Wang u32 spba_end_addr; 4075bb9dbb5SVinod Koul unsigned int irq; 40876c33d27SSascha Hauer dma_addr_t bd0_phys; 40976c33d27SSascha Hauer struct sdma_buffer_descriptor *bd0; 41017bba72fSSascha Hauer }; 41117bba72fSSascha Hauer 412e9fd58deSFabio Estevam static struct sdma_driver_data sdma_imx31 = { 41317bba72fSSascha Hauer .chnenbl0 = SDMA_CHNENBL0_IMX31, 41417bba72fSSascha Hauer .num_events = 32, 41517bba72fSSascha Hauer }; 41617bba72fSSascha Hauer 417dcfec3c0SSascha Hauer static struct sdma_script_start_addrs sdma_script_imx25 = { 418dcfec3c0SSascha Hauer .ap_2_ap_addr = 729, 419dcfec3c0SSascha Hauer .uart_2_mcu_addr = 904, 420dcfec3c0SSascha Hauer .per_2_app_addr = 1255, 421dcfec3c0SSascha Hauer .mcu_2_app_addr = 834, 422dcfec3c0SSascha Hauer .uartsh_2_mcu_addr = 1120, 423dcfec3c0SSascha Hauer .per_2_shp_addr = 1329, 424dcfec3c0SSascha Hauer .mcu_2_shp_addr = 1048, 425dcfec3c0SSascha Hauer .ata_2_mcu_addr = 1560, 426dcfec3c0SSascha Hauer .mcu_2_ata_addr = 1479, 427dcfec3c0SSascha Hauer .app_2_per_addr = 1189, 428dcfec3c0SSascha Hauer .app_2_mcu_addr = 770, 429dcfec3c0SSascha Hauer .shp_2_per_addr = 1407, 430dcfec3c0SSascha Hauer .shp_2_mcu_addr = 979, 431dcfec3c0SSascha Hauer }; 432dcfec3c0SSascha Hauer 433e9fd58deSFabio Estevam static struct sdma_driver_data sdma_imx25 = { 434dcfec3c0SSascha Hauer .chnenbl0 = SDMA_CHNENBL0_IMX35, 435dcfec3c0SSascha Hauer .num_events = 48, 436dcfec3c0SSascha Hauer .script_addrs = &sdma_script_imx25, 437dcfec3c0SSascha Hauer }; 438dcfec3c0SSascha Hauer 439e9fd58deSFabio Estevam static struct sdma_driver_data sdma_imx35 = { 44017bba72fSSascha Hauer .chnenbl0 = SDMA_CHNENBL0_IMX35, 44117bba72fSSascha Hauer .num_events = 48, 4421ec1e82fSSascha Hauer }; 4431ec1e82fSSascha Hauer 444dcfec3c0SSascha Hauer static struct sdma_script_start_addrs sdma_script_imx51 = { 445dcfec3c0SSascha Hauer .ap_2_ap_addr = 642, 446dcfec3c0SSascha Hauer .uart_2_mcu_addr = 817, 447dcfec3c0SSascha Hauer .mcu_2_app_addr = 747, 448dcfec3c0SSascha Hauer .mcu_2_shp_addr = 961, 449dcfec3c0SSascha Hauer .ata_2_mcu_addr = 1473, 450dcfec3c0SSascha Hauer .mcu_2_ata_addr = 1392, 451dcfec3c0SSascha Hauer .app_2_per_addr = 1033, 452dcfec3c0SSascha Hauer .app_2_mcu_addr = 683, 453dcfec3c0SSascha Hauer .shp_2_per_addr = 1251, 454dcfec3c0SSascha Hauer .shp_2_mcu_addr = 892, 455dcfec3c0SSascha Hauer }; 456dcfec3c0SSascha Hauer 457e9fd58deSFabio Estevam static struct sdma_driver_data sdma_imx51 = { 458dcfec3c0SSascha Hauer .chnenbl0 = SDMA_CHNENBL0_IMX35, 459dcfec3c0SSascha Hauer .num_events = 48, 460dcfec3c0SSascha Hauer .script_addrs = &sdma_script_imx51, 461dcfec3c0SSascha Hauer }; 462dcfec3c0SSascha Hauer 463dcfec3c0SSascha Hauer static struct sdma_script_start_addrs sdma_script_imx53 = { 464dcfec3c0SSascha Hauer .ap_2_ap_addr = 642, 465dcfec3c0SSascha Hauer .app_2_mcu_addr = 683, 466dcfec3c0SSascha Hauer .mcu_2_app_addr = 747, 467dcfec3c0SSascha Hauer .uart_2_mcu_addr = 817, 468dcfec3c0SSascha Hauer .shp_2_mcu_addr = 891, 469dcfec3c0SSascha Hauer .mcu_2_shp_addr = 960, 470dcfec3c0SSascha Hauer .uartsh_2_mcu_addr = 1032, 471dcfec3c0SSascha Hauer .spdif_2_mcu_addr = 1100, 472dcfec3c0SSascha Hauer .mcu_2_spdif_addr = 1134, 473dcfec3c0SSascha Hauer .firi_2_mcu_addr = 1193, 474dcfec3c0SSascha Hauer .mcu_2_firi_addr = 1290, 475dcfec3c0SSascha Hauer }; 476dcfec3c0SSascha Hauer 477e9fd58deSFabio Estevam static struct sdma_driver_data sdma_imx53 = { 478dcfec3c0SSascha Hauer .chnenbl0 = SDMA_CHNENBL0_IMX35, 479dcfec3c0SSascha Hauer .num_events = 48, 480dcfec3c0SSascha Hauer .script_addrs = &sdma_script_imx53, 481dcfec3c0SSascha Hauer }; 482dcfec3c0SSascha Hauer 483dcfec3c0SSascha Hauer static struct sdma_script_start_addrs sdma_script_imx6q = { 484dcfec3c0SSascha Hauer .ap_2_ap_addr = 642, 485dcfec3c0SSascha Hauer .uart_2_mcu_addr = 817, 486dcfec3c0SSascha Hauer .mcu_2_app_addr = 747, 487dcfec3c0SSascha Hauer .per_2_per_addr = 6331, 488dcfec3c0SSascha Hauer .uartsh_2_mcu_addr = 1032, 489dcfec3c0SSascha Hauer .mcu_2_shp_addr = 960, 490dcfec3c0SSascha Hauer .app_2_mcu_addr = 683, 491dcfec3c0SSascha Hauer .shp_2_mcu_addr = 891, 492dcfec3c0SSascha Hauer .spdif_2_mcu_addr = 1100, 493dcfec3c0SSascha Hauer .mcu_2_spdif_addr = 1134, 494dcfec3c0SSascha Hauer }; 495dcfec3c0SSascha Hauer 496e9fd58deSFabio Estevam static struct sdma_driver_data sdma_imx6q = { 497dcfec3c0SSascha Hauer .chnenbl0 = SDMA_CHNENBL0_IMX35, 498dcfec3c0SSascha Hauer .num_events = 48, 499dcfec3c0SSascha Hauer .script_addrs = &sdma_script_imx6q, 500dcfec3c0SSascha Hauer }; 501dcfec3c0SSascha Hauer 502b7d2648aSFabio Estevam static struct sdma_script_start_addrs sdma_script_imx7d = { 503b7d2648aSFabio Estevam .ap_2_ap_addr = 644, 504b7d2648aSFabio Estevam .uart_2_mcu_addr = 819, 505b7d2648aSFabio Estevam .mcu_2_app_addr = 749, 506b7d2648aSFabio Estevam .uartsh_2_mcu_addr = 1034, 507b7d2648aSFabio Estevam .mcu_2_shp_addr = 962, 508b7d2648aSFabio Estevam .app_2_mcu_addr = 685, 509b7d2648aSFabio Estevam .shp_2_mcu_addr = 893, 510b7d2648aSFabio Estevam .spdif_2_mcu_addr = 1102, 511b7d2648aSFabio Estevam .mcu_2_spdif_addr = 1136, 512b7d2648aSFabio Estevam }; 513b7d2648aSFabio Estevam 514b7d2648aSFabio Estevam static struct sdma_driver_data sdma_imx7d = { 515b7d2648aSFabio Estevam .chnenbl0 = SDMA_CHNENBL0_IMX35, 516b7d2648aSFabio Estevam .num_events = 48, 517b7d2648aSFabio Estevam .script_addrs = &sdma_script_imx7d, 518b7d2648aSFabio Estevam }; 519b7d2648aSFabio Estevam 520afe7cdedSKrzysztof Kozlowski static const struct platform_device_id sdma_devtypes[] = { 52162550cd7SShawn Guo { 522dcfec3c0SSascha Hauer .name = "imx25-sdma", 523dcfec3c0SSascha Hauer .driver_data = (unsigned long)&sdma_imx25, 524dcfec3c0SSascha Hauer }, { 52562550cd7SShawn Guo .name = "imx31-sdma", 52617bba72fSSascha Hauer .driver_data = (unsigned long)&sdma_imx31, 52762550cd7SShawn Guo }, { 52862550cd7SShawn Guo .name = "imx35-sdma", 52917bba72fSSascha Hauer .driver_data = (unsigned long)&sdma_imx35, 53062550cd7SShawn Guo }, { 531dcfec3c0SSascha Hauer .name = "imx51-sdma", 532dcfec3c0SSascha Hauer .driver_data = (unsigned long)&sdma_imx51, 533dcfec3c0SSascha Hauer }, { 534dcfec3c0SSascha Hauer .name = "imx53-sdma", 535dcfec3c0SSascha Hauer .driver_data = (unsigned long)&sdma_imx53, 536dcfec3c0SSascha Hauer }, { 537dcfec3c0SSascha Hauer .name = "imx6q-sdma", 538dcfec3c0SSascha Hauer .driver_data = (unsigned long)&sdma_imx6q, 539dcfec3c0SSascha Hauer }, { 540b7d2648aSFabio Estevam .name = "imx7d-sdma", 541b7d2648aSFabio Estevam .driver_data = (unsigned long)&sdma_imx7d, 542b7d2648aSFabio Estevam }, { 54362550cd7SShawn Guo /* sentinel */ 54462550cd7SShawn Guo } 54562550cd7SShawn Guo }; 54662550cd7SShawn Guo MODULE_DEVICE_TABLE(platform, sdma_devtypes); 54762550cd7SShawn Guo 548580975d7SShawn Guo static const struct of_device_id sdma_dt_ids[] = { 549dcfec3c0SSascha Hauer { .compatible = "fsl,imx6q-sdma", .data = &sdma_imx6q, }, 550dcfec3c0SSascha Hauer { .compatible = "fsl,imx53-sdma", .data = &sdma_imx53, }, 551dcfec3c0SSascha Hauer { .compatible = "fsl,imx51-sdma", .data = &sdma_imx51, }, 55217bba72fSSascha Hauer { .compatible = "fsl,imx35-sdma", .data = &sdma_imx35, }, 553dcfec3c0SSascha Hauer { .compatible = "fsl,imx31-sdma", .data = &sdma_imx31, }, 55463edea16SMarkus Pargmann { .compatible = "fsl,imx25-sdma", .data = &sdma_imx25, }, 555b7d2648aSFabio Estevam { .compatible = "fsl,imx7d-sdma", .data = &sdma_imx7d, }, 556580975d7SShawn Guo { /* sentinel */ } 557580975d7SShawn Guo }; 558580975d7SShawn Guo MODULE_DEVICE_TABLE(of, sdma_dt_ids); 559580975d7SShawn Guo 5600bbc1413SRichard Zhao #define SDMA_H_CONFIG_DSPDMA BIT(12) /* indicates if the DSPDMA is used */ 5610bbc1413SRichard Zhao #define SDMA_H_CONFIG_RTD_PINS BIT(11) /* indicates if Real-Time Debug pins are enabled */ 5620bbc1413SRichard Zhao #define SDMA_H_CONFIG_ACR BIT(4) /* indicates if AHB freq /core freq = 2 or 1 */ 5631ec1e82fSSascha Hauer #define SDMA_H_CONFIG_CSM (3) /* indicates which context switch mode is selected*/ 5641ec1e82fSSascha Hauer 5651ec1e82fSSascha Hauer static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event) 5661ec1e82fSSascha Hauer { 56717bba72fSSascha Hauer u32 chnenbl0 = sdma->drvdata->chnenbl0; 5681ec1e82fSSascha Hauer return chnenbl0 + event * 4; 5691ec1e82fSSascha Hauer } 5701ec1e82fSSascha Hauer 5711ec1e82fSSascha Hauer static int sdma_config_ownership(struct sdma_channel *sdmac, 5721ec1e82fSSascha Hauer bool event_override, bool mcu_override, bool dsp_override) 5731ec1e82fSSascha Hauer { 5741ec1e82fSSascha Hauer struct sdma_engine *sdma = sdmac->sdma; 5751ec1e82fSSascha Hauer int channel = sdmac->channel; 5760bbc1413SRichard Zhao unsigned long evt, mcu, dsp; 5771ec1e82fSSascha Hauer 5781ec1e82fSSascha Hauer if (event_override && mcu_override && dsp_override) 5791ec1e82fSSascha Hauer return -EINVAL; 5801ec1e82fSSascha Hauer 581c4b56857SRichard Zhao evt = readl_relaxed(sdma->regs + SDMA_H_EVTOVR); 582c4b56857SRichard Zhao mcu = readl_relaxed(sdma->regs + SDMA_H_HOSTOVR); 583c4b56857SRichard Zhao dsp = readl_relaxed(sdma->regs + SDMA_H_DSPOVR); 5841ec1e82fSSascha Hauer 5851ec1e82fSSascha Hauer if (dsp_override) 5860bbc1413SRichard Zhao __clear_bit(channel, &dsp); 5871ec1e82fSSascha Hauer else 5880bbc1413SRichard Zhao __set_bit(channel, &dsp); 5891ec1e82fSSascha Hauer 5901ec1e82fSSascha Hauer if (event_override) 5910bbc1413SRichard Zhao __clear_bit(channel, &evt); 5921ec1e82fSSascha Hauer else 5930bbc1413SRichard Zhao __set_bit(channel, &evt); 5941ec1e82fSSascha Hauer 5951ec1e82fSSascha Hauer if (mcu_override) 5960bbc1413SRichard Zhao __clear_bit(channel, &mcu); 5971ec1e82fSSascha Hauer else 5980bbc1413SRichard Zhao __set_bit(channel, &mcu); 5991ec1e82fSSascha Hauer 600c4b56857SRichard Zhao writel_relaxed(evt, sdma->regs + SDMA_H_EVTOVR); 601c4b56857SRichard Zhao writel_relaxed(mcu, sdma->regs + SDMA_H_HOSTOVR); 602c4b56857SRichard Zhao writel_relaxed(dsp, sdma->regs + SDMA_H_DSPOVR); 6031ec1e82fSSascha Hauer 6041ec1e82fSSascha Hauer return 0; 6051ec1e82fSSascha Hauer } 6061ec1e82fSSascha Hauer 607b9a59166SRichard Zhao static void sdma_enable_channel(struct sdma_engine *sdma, int channel) 608b9a59166SRichard Zhao { 6092746e2c3SThierry Bultel unsigned long flags; 6102746e2c3SThierry Bultel struct sdma_channel *sdmac = &sdma->channel[channel]; 6112746e2c3SThierry Bultel 6120bbc1413SRichard Zhao writel(BIT(channel), sdma->regs + SDMA_H_START); 6132746e2c3SThierry Bultel 6142746e2c3SThierry Bultel spin_lock_irqsave(&sdmac->lock, flags); 6152746e2c3SThierry Bultel sdmac->enabled = true; 6162746e2c3SThierry Bultel spin_unlock_irqrestore(&sdmac->lock, flags); 617b9a59166SRichard Zhao } 618b9a59166SRichard Zhao 6191ec1e82fSSascha Hauer /* 6202ccaef05SRichard Zhao * sdma_run_channel0 - run a channel and wait till it's done 6211ec1e82fSSascha Hauer */ 6222ccaef05SRichard Zhao static int sdma_run_channel0(struct sdma_engine *sdma) 6231ec1e82fSSascha Hauer { 6241ec1e82fSSascha Hauer int ret; 6251d069bfaSMichael Olbrich u32 reg; 6261ec1e82fSSascha Hauer 6272ccaef05SRichard Zhao sdma_enable_channel(sdma, 0); 6281ec1e82fSSascha Hauer 6291d069bfaSMichael Olbrich ret = readl_relaxed_poll_timeout_atomic(sdma->regs + SDMA_H_STATSTOP, 6301d069bfaSMichael Olbrich reg, !(reg & 1), 1, 500); 6311d069bfaSMichael Olbrich if (ret) 6322ccaef05SRichard Zhao dev_err(sdma->dev, "Timeout waiting for CH0 ready\n"); 6331ec1e82fSSascha Hauer 634855832e4SRobin Gong /* Set bits of CONFIG register with dynamic context switching */ 635855832e4SRobin Gong if (readl(sdma->regs + SDMA_H_CONFIG) == 0) 636855832e4SRobin Gong writel_relaxed(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG); 637855832e4SRobin Gong 6381d069bfaSMichael Olbrich return ret; 6391ec1e82fSSascha Hauer } 6401ec1e82fSSascha Hauer 6411ec1e82fSSascha Hauer static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size, 6421ec1e82fSSascha Hauer u32 address) 6431ec1e82fSSascha Hauer { 64476c33d27SSascha Hauer struct sdma_buffer_descriptor *bd0 = sdma->bd0; 6451ec1e82fSSascha Hauer void *buf_virt; 6461ec1e82fSSascha Hauer dma_addr_t buf_phys; 6471ec1e82fSSascha Hauer int ret; 6482ccaef05SRichard Zhao unsigned long flags; 64973eab978SSascha Hauer 6501ec1e82fSSascha Hauer buf_virt = dma_alloc_coherent(NULL, 6511ec1e82fSSascha Hauer size, 6521ec1e82fSSascha Hauer &buf_phys, GFP_KERNEL); 65373eab978SSascha Hauer if (!buf_virt) { 6542ccaef05SRichard Zhao return -ENOMEM; 65573eab978SSascha Hauer } 6561ec1e82fSSascha Hauer 6572ccaef05SRichard Zhao spin_lock_irqsave(&sdma->channel_0_lock, flags); 6582ccaef05SRichard Zhao 6591ec1e82fSSascha Hauer bd0->mode.command = C0_SETPM; 6601ec1e82fSSascha Hauer bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD; 6611ec1e82fSSascha Hauer bd0->mode.count = size / 2; 6621ec1e82fSSascha Hauer bd0->buffer_addr = buf_phys; 6631ec1e82fSSascha Hauer bd0->ext_buffer_addr = address; 6641ec1e82fSSascha Hauer 6651ec1e82fSSascha Hauer memcpy(buf_virt, buf, size); 6661ec1e82fSSascha Hauer 6672ccaef05SRichard Zhao ret = sdma_run_channel0(sdma); 6682ccaef05SRichard Zhao 6692ccaef05SRichard Zhao spin_unlock_irqrestore(&sdma->channel_0_lock, flags); 6701ec1e82fSSascha Hauer 6711ec1e82fSSascha Hauer dma_free_coherent(NULL, size, buf_virt, buf_phys); 6721ec1e82fSSascha Hauer 6731ec1e82fSSascha Hauer return ret; 6741ec1e82fSSascha Hauer } 6751ec1e82fSSascha Hauer 6761ec1e82fSSascha Hauer static void sdma_event_enable(struct sdma_channel *sdmac, unsigned int event) 6771ec1e82fSSascha Hauer { 6781ec1e82fSSascha Hauer struct sdma_engine *sdma = sdmac->sdma; 6791ec1e82fSSascha Hauer int channel = sdmac->channel; 6800bbc1413SRichard Zhao unsigned long val; 6811ec1e82fSSascha Hauer u32 chnenbl = chnenbl_ofs(sdma, event); 6821ec1e82fSSascha Hauer 683c4b56857SRichard Zhao val = readl_relaxed(sdma->regs + chnenbl); 6840bbc1413SRichard Zhao __set_bit(channel, &val); 685c4b56857SRichard Zhao writel_relaxed(val, sdma->regs + chnenbl); 6861ec1e82fSSascha Hauer } 6871ec1e82fSSascha Hauer 6881ec1e82fSSascha Hauer static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event) 6891ec1e82fSSascha Hauer { 6901ec1e82fSSascha Hauer struct sdma_engine *sdma = sdmac->sdma; 6911ec1e82fSSascha Hauer int channel = sdmac->channel; 6921ec1e82fSSascha Hauer u32 chnenbl = chnenbl_ofs(sdma, event); 6930bbc1413SRichard Zhao unsigned long val; 6941ec1e82fSSascha Hauer 695c4b56857SRichard Zhao val = readl_relaxed(sdma->regs + chnenbl); 6960bbc1413SRichard Zhao __clear_bit(channel, &val); 697c4b56857SRichard Zhao writel_relaxed(val, sdma->regs + chnenbl); 6981ec1e82fSSascha Hauer } 6991ec1e82fSSascha Hauer 700*57b772b8SRobin Gong static struct sdma_desc *to_sdma_desc(struct dma_async_tx_descriptor *t) 701*57b772b8SRobin Gong { 702*57b772b8SRobin Gong return container_of(t, struct sdma_desc, vd.tx); 703*57b772b8SRobin Gong } 704*57b772b8SRobin Gong 705*57b772b8SRobin Gong static void sdma_start_desc(struct sdma_channel *sdmac) 706*57b772b8SRobin Gong { 707*57b772b8SRobin Gong struct virt_dma_desc *vd = vchan_next_desc(&sdmac->vc); 708*57b772b8SRobin Gong struct sdma_desc *desc; 709*57b772b8SRobin Gong struct sdma_engine *sdma = sdmac->sdma; 710*57b772b8SRobin Gong int channel = sdmac->channel; 711*57b772b8SRobin Gong 712*57b772b8SRobin Gong if (!vd) { 713*57b772b8SRobin Gong sdmac->desc = NULL; 714*57b772b8SRobin Gong return; 715*57b772b8SRobin Gong } 716*57b772b8SRobin Gong sdmac->desc = desc = to_sdma_desc(&vd->tx); 717*57b772b8SRobin Gong /* 718*57b772b8SRobin Gong * Do not delete the node in desc_issued list in cyclic mode, otherwise 719*57b772b8SRobin Gong * the desc alloced will never be freed in vchan_dma_desc_free_list 720*57b772b8SRobin Gong */ 721*57b772b8SRobin Gong if (!(sdmac->flags & IMX_DMA_SG_LOOP)) 722*57b772b8SRobin Gong list_del(&vd->node); 723*57b772b8SRobin Gong 724*57b772b8SRobin Gong sdma->channel_control[channel].base_bd_ptr = desc->bd_phys; 725*57b772b8SRobin Gong sdma->channel_control[channel].current_bd_ptr = desc->bd_phys; 726*57b772b8SRobin Gong sdma_enable_channel(sdma, sdmac->channel); 727*57b772b8SRobin Gong } 728*57b772b8SRobin Gong 729d1a792f3SRussell King - ARM Linux static void sdma_update_channel_loop(struct sdma_channel *sdmac) 730d1a792f3SRussell King - ARM Linux { 7311ec1e82fSSascha Hauer struct sdma_buffer_descriptor *bd; 7325881826dSNandor Han int error = 0; 7335881826dSNandor Han enum dma_status old_status = sdmac->status; 7342746e2c3SThierry Bultel unsigned long flags; 7352746e2c3SThierry Bultel 7362746e2c3SThierry Bultel spin_lock_irqsave(&sdmac->lock, flags); 7372746e2c3SThierry Bultel if (!sdmac->enabled) { 7382746e2c3SThierry Bultel spin_unlock_irqrestore(&sdmac->lock, flags); 7392746e2c3SThierry Bultel return; 7402746e2c3SThierry Bultel } 7412746e2c3SThierry Bultel spin_unlock_irqrestore(&sdmac->lock, flags); 7421ec1e82fSSascha Hauer 7431ec1e82fSSascha Hauer /* 7441ec1e82fSSascha Hauer * loop mode. Iterate over descriptors, re-setup them and 7451ec1e82fSSascha Hauer * call callback function. 7461ec1e82fSSascha Hauer */ 747*57b772b8SRobin Gong while (sdmac->desc) { 74876c33d27SSascha Hauer struct sdma_desc *desc = sdmac->desc; 74976c33d27SSascha Hauer 75076c33d27SSascha Hauer bd = &desc->bd[desc->buf_tail]; 7511ec1e82fSSascha Hauer 7521ec1e82fSSascha Hauer if (bd->mode.status & BD_DONE) 7531ec1e82fSSascha Hauer break; 7541ec1e82fSSascha Hauer 7555881826dSNandor Han if (bd->mode.status & BD_RROR) { 7565881826dSNandor Han bd->mode.status &= ~BD_RROR; 7571ec1e82fSSascha Hauer sdmac->status = DMA_ERROR; 7585881826dSNandor Han error = -EIO; 7595881826dSNandor Han } 7601ec1e82fSSascha Hauer 7615881826dSNandor Han /* 7625881826dSNandor Han * We use bd->mode.count to calculate the residue, since contains 7635881826dSNandor Han * the number of bytes present in the current buffer descriptor. 7645881826dSNandor Han */ 7655881826dSNandor Han 76676c33d27SSascha Hauer desc->chn_real_count = bd->mode.count; 7671ec1e82fSSascha Hauer bd->mode.status |= BD_DONE; 76876c33d27SSascha Hauer bd->mode.count = desc->period_len; 76976c33d27SSascha Hauer desc->buf_ptail = desc->buf_tail; 77076c33d27SSascha Hauer desc->buf_tail = (desc->buf_tail + 1) % desc->num_bd; 77115f30f51SNandor Han 77215f30f51SNandor Han /* 77315f30f51SNandor Han * The callback is called from the interrupt context in order 77415f30f51SNandor Han * to reduce latency and to avoid the risk of altering the 77515f30f51SNandor Han * SDMA transaction status by the time the client tasklet is 77615f30f51SNandor Han * executed. 77715f30f51SNandor Han */ 778*57b772b8SRobin Gong spin_unlock(&sdmac->vc.lock); 779*57b772b8SRobin Gong dmaengine_desc_get_callback_invoke(&desc->vd.tx, NULL); 780*57b772b8SRobin Gong spin_lock(&sdmac->vc.lock); 78115f30f51SNandor Han 7825881826dSNandor Han if (error) 7835881826dSNandor Han sdmac->status = old_status; 7841ec1e82fSSascha Hauer } 7851ec1e82fSSascha Hauer } 7861ec1e82fSSascha Hauer 787*57b772b8SRobin Gong static void mxc_sdma_handle_channel_normal(struct sdma_channel *data) 7881ec1e82fSSascha Hauer { 78915f30f51SNandor Han struct sdma_channel *sdmac = (struct sdma_channel *) data; 7901ec1e82fSSascha Hauer struct sdma_buffer_descriptor *bd; 7911ec1e82fSSascha Hauer int i, error = 0; 7921ec1e82fSSascha Hauer 79376c33d27SSascha Hauer sdmac->desc->chn_real_count = 0; 7941ec1e82fSSascha Hauer /* 7951ec1e82fSSascha Hauer * non loop mode. Iterate over all descriptors, collect 7961ec1e82fSSascha Hauer * errors and call callback function 7971ec1e82fSSascha Hauer */ 79876c33d27SSascha Hauer for (i = 0; i < sdmac->desc->num_bd; i++) { 79976c33d27SSascha Hauer bd = &sdmac->desc->bd[i]; 8001ec1e82fSSascha Hauer 8011ec1e82fSSascha Hauer if (bd->mode.status & (BD_DONE | BD_RROR)) 8021ec1e82fSSascha Hauer error = -EIO; 80376c33d27SSascha Hauer sdmac->desc->chn_real_count += bd->mode.count; 8041ec1e82fSSascha Hauer } 8051ec1e82fSSascha Hauer 8061ec1e82fSSascha Hauer if (error) 8071ec1e82fSSascha Hauer sdmac->status = DMA_ERROR; 8081ec1e82fSSascha Hauer else 809409bff6aSVinod Koul sdmac->status = DMA_COMPLETE; 8101ec1e82fSSascha Hauer } 8111ec1e82fSSascha Hauer 8121ec1e82fSSascha Hauer static irqreturn_t sdma_int_handler(int irq, void *dev_id) 8131ec1e82fSSascha Hauer { 8141ec1e82fSSascha Hauer struct sdma_engine *sdma = dev_id; 8150bbc1413SRichard Zhao unsigned long stat; 8161ec1e82fSSascha Hauer 817c4b56857SRichard Zhao stat = readl_relaxed(sdma->regs + SDMA_H_INTR); 818c4b56857SRichard Zhao writel_relaxed(stat, sdma->regs + SDMA_H_INTR); 8191d069bfaSMichael Olbrich /* channel 0 is special and not handled here, see run_channel0() */ 8201d069bfaSMichael Olbrich stat &= ~1; 8211ec1e82fSSascha Hauer 8221ec1e82fSSascha Hauer while (stat) { 8231ec1e82fSSascha Hauer int channel = fls(stat) - 1; 8241ec1e82fSSascha Hauer struct sdma_channel *sdmac = &sdma->channel[channel]; 825*57b772b8SRobin Gong struct sdma_desc *desc; 8261ec1e82fSSascha Hauer 827*57b772b8SRobin Gong spin_lock(&sdmac->vc.lock); 828*57b772b8SRobin Gong desc = sdmac->desc; 829*57b772b8SRobin Gong if (desc) { 830*57b772b8SRobin Gong if (sdmac->flags & IMX_DMA_SG_LOOP) { 831d1a792f3SRussell King - ARM Linux sdma_update_channel_loop(sdmac); 832*57b772b8SRobin Gong } else { 833*57b772b8SRobin Gong mxc_sdma_handle_channel_normal(sdmac); 834*57b772b8SRobin Gong vchan_cookie_complete(&desc->vd); 835*57b772b8SRobin Gong sdma_start_desc(sdmac); 836*57b772b8SRobin Gong } 837*57b772b8SRobin Gong } 8381ec1e82fSSascha Hauer 839*57b772b8SRobin Gong spin_unlock(&sdmac->vc.lock); 8400bbc1413SRichard Zhao __clear_bit(channel, &stat); 8411ec1e82fSSascha Hauer } 8421ec1e82fSSascha Hauer 8431ec1e82fSSascha Hauer return IRQ_HANDLED; 8441ec1e82fSSascha Hauer } 8451ec1e82fSSascha Hauer 8461ec1e82fSSascha Hauer /* 8471ec1e82fSSascha Hauer * sets the pc of SDMA script according to the peripheral type 8481ec1e82fSSascha Hauer */ 8491ec1e82fSSascha Hauer static void sdma_get_pc(struct sdma_channel *sdmac, 8501ec1e82fSSascha Hauer enum sdma_peripheral_type peripheral_type) 8511ec1e82fSSascha Hauer { 8521ec1e82fSSascha Hauer struct sdma_engine *sdma = sdmac->sdma; 8531ec1e82fSSascha Hauer int per_2_emi = 0, emi_2_per = 0; 8541ec1e82fSSascha Hauer /* 8551ec1e82fSSascha Hauer * These are needed once we start to support transfers between 8561ec1e82fSSascha Hauer * two peripherals or memory-to-memory transfers 8571ec1e82fSSascha Hauer */ 8580d605ba0SVinod Koul int per_2_per = 0; 8591ec1e82fSSascha Hauer 8601ec1e82fSSascha Hauer sdmac->pc_from_device = 0; 8611ec1e82fSSascha Hauer sdmac->pc_to_device = 0; 8628391ecf4SShengjiu Wang sdmac->device_to_device = 0; 8631ec1e82fSSascha Hauer 8641ec1e82fSSascha Hauer switch (peripheral_type) { 8651ec1e82fSSascha Hauer case IMX_DMATYPE_MEMORY: 8661ec1e82fSSascha Hauer break; 8671ec1e82fSSascha Hauer case IMX_DMATYPE_DSP: 8681ec1e82fSSascha Hauer emi_2_per = sdma->script_addrs->bp_2_ap_addr; 8691ec1e82fSSascha Hauer per_2_emi = sdma->script_addrs->ap_2_bp_addr; 8701ec1e82fSSascha Hauer break; 8711ec1e82fSSascha Hauer case IMX_DMATYPE_FIRI: 8721ec1e82fSSascha Hauer per_2_emi = sdma->script_addrs->firi_2_mcu_addr; 8731ec1e82fSSascha Hauer emi_2_per = sdma->script_addrs->mcu_2_firi_addr; 8741ec1e82fSSascha Hauer break; 8751ec1e82fSSascha Hauer case IMX_DMATYPE_UART: 8761ec1e82fSSascha Hauer per_2_emi = sdma->script_addrs->uart_2_mcu_addr; 8771ec1e82fSSascha Hauer emi_2_per = sdma->script_addrs->mcu_2_app_addr; 8781ec1e82fSSascha Hauer break; 8791ec1e82fSSascha Hauer case IMX_DMATYPE_UART_SP: 8801ec1e82fSSascha Hauer per_2_emi = sdma->script_addrs->uartsh_2_mcu_addr; 8811ec1e82fSSascha Hauer emi_2_per = sdma->script_addrs->mcu_2_shp_addr; 8821ec1e82fSSascha Hauer break; 8831ec1e82fSSascha Hauer case IMX_DMATYPE_ATA: 8841ec1e82fSSascha Hauer per_2_emi = sdma->script_addrs->ata_2_mcu_addr; 8851ec1e82fSSascha Hauer emi_2_per = sdma->script_addrs->mcu_2_ata_addr; 8861ec1e82fSSascha Hauer break; 8871ec1e82fSSascha Hauer case IMX_DMATYPE_CSPI: 8881ec1e82fSSascha Hauer case IMX_DMATYPE_EXT: 8891ec1e82fSSascha Hauer case IMX_DMATYPE_SSI: 89029aebfdeSNicolin Chen case IMX_DMATYPE_SAI: 8911ec1e82fSSascha Hauer per_2_emi = sdma->script_addrs->app_2_mcu_addr; 8921ec1e82fSSascha Hauer emi_2_per = sdma->script_addrs->mcu_2_app_addr; 8931ec1e82fSSascha Hauer break; 8941a895578SNicolin Chen case IMX_DMATYPE_SSI_DUAL: 8951a895578SNicolin Chen per_2_emi = sdma->script_addrs->ssish_2_mcu_addr; 8961a895578SNicolin Chen emi_2_per = sdma->script_addrs->mcu_2_ssish_addr; 8971a895578SNicolin Chen break; 8981ec1e82fSSascha Hauer case IMX_DMATYPE_SSI_SP: 8991ec1e82fSSascha Hauer case IMX_DMATYPE_MMC: 9001ec1e82fSSascha Hauer case IMX_DMATYPE_SDHC: 9011ec1e82fSSascha Hauer case IMX_DMATYPE_CSPI_SP: 9021ec1e82fSSascha Hauer case IMX_DMATYPE_ESAI: 9031ec1e82fSSascha Hauer case IMX_DMATYPE_MSHC_SP: 9041ec1e82fSSascha Hauer per_2_emi = sdma->script_addrs->shp_2_mcu_addr; 9051ec1e82fSSascha Hauer emi_2_per = sdma->script_addrs->mcu_2_shp_addr; 9061ec1e82fSSascha Hauer break; 9071ec1e82fSSascha Hauer case IMX_DMATYPE_ASRC: 9081ec1e82fSSascha Hauer per_2_emi = sdma->script_addrs->asrc_2_mcu_addr; 9091ec1e82fSSascha Hauer emi_2_per = sdma->script_addrs->asrc_2_mcu_addr; 9101ec1e82fSSascha Hauer per_2_per = sdma->script_addrs->per_2_per_addr; 9111ec1e82fSSascha Hauer break; 912f892afb0SNicolin Chen case IMX_DMATYPE_ASRC_SP: 913f892afb0SNicolin Chen per_2_emi = sdma->script_addrs->shp_2_mcu_addr; 914f892afb0SNicolin Chen emi_2_per = sdma->script_addrs->mcu_2_shp_addr; 915f892afb0SNicolin Chen per_2_per = sdma->script_addrs->per_2_per_addr; 916f892afb0SNicolin Chen break; 9171ec1e82fSSascha Hauer case IMX_DMATYPE_MSHC: 9181ec1e82fSSascha Hauer per_2_emi = sdma->script_addrs->mshc_2_mcu_addr; 9191ec1e82fSSascha Hauer emi_2_per = sdma->script_addrs->mcu_2_mshc_addr; 9201ec1e82fSSascha Hauer break; 9211ec1e82fSSascha Hauer case IMX_DMATYPE_CCM: 9221ec1e82fSSascha Hauer per_2_emi = sdma->script_addrs->dptc_dvfs_addr; 9231ec1e82fSSascha Hauer break; 9241ec1e82fSSascha Hauer case IMX_DMATYPE_SPDIF: 9251ec1e82fSSascha Hauer per_2_emi = sdma->script_addrs->spdif_2_mcu_addr; 9261ec1e82fSSascha Hauer emi_2_per = sdma->script_addrs->mcu_2_spdif_addr; 9271ec1e82fSSascha Hauer break; 9281ec1e82fSSascha Hauer case IMX_DMATYPE_IPU_MEMORY: 9291ec1e82fSSascha Hauer emi_2_per = sdma->script_addrs->ext_mem_2_ipu_addr; 9301ec1e82fSSascha Hauer break; 9311ec1e82fSSascha Hauer default: 9321ec1e82fSSascha Hauer break; 9331ec1e82fSSascha Hauer } 9341ec1e82fSSascha Hauer 9351ec1e82fSSascha Hauer sdmac->pc_from_device = per_2_emi; 9361ec1e82fSSascha Hauer sdmac->pc_to_device = emi_2_per; 9378391ecf4SShengjiu Wang sdmac->device_to_device = per_2_per; 9381ec1e82fSSascha Hauer } 9391ec1e82fSSascha Hauer 9401ec1e82fSSascha Hauer static int sdma_load_context(struct sdma_channel *sdmac) 9411ec1e82fSSascha Hauer { 9421ec1e82fSSascha Hauer struct sdma_engine *sdma = sdmac->sdma; 9431ec1e82fSSascha Hauer int channel = sdmac->channel; 9441ec1e82fSSascha Hauer int load_address; 9451ec1e82fSSascha Hauer struct sdma_context_data *context = sdma->context; 94676c33d27SSascha Hauer struct sdma_buffer_descriptor *bd0 = sdma->bd0; 9471ec1e82fSSascha Hauer int ret; 9482ccaef05SRichard Zhao unsigned long flags; 9491ec1e82fSSascha Hauer 9508391ecf4SShengjiu Wang if (sdmac->direction == DMA_DEV_TO_MEM) 9511ec1e82fSSascha Hauer load_address = sdmac->pc_from_device; 9528391ecf4SShengjiu Wang else if (sdmac->direction == DMA_DEV_TO_DEV) 9538391ecf4SShengjiu Wang load_address = sdmac->device_to_device; 9548391ecf4SShengjiu Wang else 9551ec1e82fSSascha Hauer load_address = sdmac->pc_to_device; 9561ec1e82fSSascha Hauer 9571ec1e82fSSascha Hauer if (load_address < 0) 9581ec1e82fSSascha Hauer return load_address; 9591ec1e82fSSascha Hauer 9601ec1e82fSSascha Hauer dev_dbg(sdma->dev, "load_address = %d\n", load_address); 9610bbc1413SRichard Zhao dev_dbg(sdma->dev, "wml = 0x%08x\n", (u32)sdmac->watermark_level); 9621ec1e82fSSascha Hauer dev_dbg(sdma->dev, "shp_addr = 0x%08x\n", sdmac->shp_addr); 9631ec1e82fSSascha Hauer dev_dbg(sdma->dev, "per_addr = 0x%08x\n", sdmac->per_addr); 9640bbc1413SRichard Zhao dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", (u32)sdmac->event_mask[0]); 9650bbc1413SRichard Zhao dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", (u32)sdmac->event_mask[1]); 9661ec1e82fSSascha Hauer 9672ccaef05SRichard Zhao spin_lock_irqsave(&sdma->channel_0_lock, flags); 96873eab978SSascha Hauer 9691ec1e82fSSascha Hauer memset(context, 0, sizeof(*context)); 9701ec1e82fSSascha Hauer context->channel_state.pc = load_address; 9711ec1e82fSSascha Hauer 9721ec1e82fSSascha Hauer /* Send by context the event mask,base address for peripheral 9731ec1e82fSSascha Hauer * and watermark level 9741ec1e82fSSascha Hauer */ 9750bbc1413SRichard Zhao context->gReg[0] = sdmac->event_mask[1]; 9760bbc1413SRichard Zhao context->gReg[1] = sdmac->event_mask[0]; 9771ec1e82fSSascha Hauer context->gReg[2] = sdmac->per_addr; 9781ec1e82fSSascha Hauer context->gReg[6] = sdmac->shp_addr; 9791ec1e82fSSascha Hauer context->gReg[7] = sdmac->watermark_level; 9801ec1e82fSSascha Hauer 9811ec1e82fSSascha Hauer bd0->mode.command = C0_SETDM; 9821ec1e82fSSascha Hauer bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD; 9831ec1e82fSSascha Hauer bd0->mode.count = sizeof(*context) / 4; 9841ec1e82fSSascha Hauer bd0->buffer_addr = sdma->context_phys; 9851ec1e82fSSascha Hauer bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel; 9862ccaef05SRichard Zhao ret = sdma_run_channel0(sdma); 9871ec1e82fSSascha Hauer 9882ccaef05SRichard Zhao spin_unlock_irqrestore(&sdma->channel_0_lock, flags); 98973eab978SSascha Hauer 9901ec1e82fSSascha Hauer return ret; 9911ec1e82fSSascha Hauer } 9921ec1e82fSSascha Hauer 9937b350ab0SMaxime Ripard static struct sdma_channel *to_sdma_chan(struct dma_chan *chan) 9941ec1e82fSSascha Hauer { 995*57b772b8SRobin Gong return container_of(chan, struct sdma_channel, vc.chan); 9967b350ab0SMaxime Ripard } 9977b350ab0SMaxime Ripard 9987b350ab0SMaxime Ripard static int sdma_disable_channel(struct dma_chan *chan) 9997b350ab0SMaxime Ripard { 10007b350ab0SMaxime Ripard struct sdma_channel *sdmac = to_sdma_chan(chan); 10011ec1e82fSSascha Hauer struct sdma_engine *sdma = sdmac->sdma; 10021ec1e82fSSascha Hauer int channel = sdmac->channel; 10032746e2c3SThierry Bultel unsigned long flags; 10041ec1e82fSSascha Hauer 10050bbc1413SRichard Zhao writel_relaxed(BIT(channel), sdma->regs + SDMA_H_STATSTOP); 10061ec1e82fSSascha Hauer sdmac->status = DMA_ERROR; 10077b350ab0SMaxime Ripard 10082746e2c3SThierry Bultel spin_lock_irqsave(&sdmac->lock, flags); 10092746e2c3SThierry Bultel sdmac->enabled = false; 10102746e2c3SThierry Bultel spin_unlock_irqrestore(&sdmac->lock, flags); 10112746e2c3SThierry Bultel 10127b350ab0SMaxime Ripard return 0; 10131ec1e82fSSascha Hauer } 10141ec1e82fSSascha Hauer 10157f3ff14bSJiada Wang static int sdma_disable_channel_with_delay(struct dma_chan *chan) 10167f3ff14bSJiada Wang { 1017*57b772b8SRobin Gong struct sdma_channel *sdmac = to_sdma_chan(chan); 1018*57b772b8SRobin Gong unsigned long flags; 1019*57b772b8SRobin Gong LIST_HEAD(head); 1020*57b772b8SRobin Gong 10217f3ff14bSJiada Wang sdma_disable_channel(chan); 1022*57b772b8SRobin Gong spin_lock_irqsave(&sdmac->vc.lock, flags); 1023*57b772b8SRobin Gong vchan_get_all_descriptors(&sdmac->vc, &head); 1024*57b772b8SRobin Gong sdmac->desc = NULL; 1025*57b772b8SRobin Gong spin_unlock_irqrestore(&sdmac->vc.lock, flags); 1026*57b772b8SRobin Gong vchan_dma_desc_free_list(&sdmac->vc, &head); 10277f3ff14bSJiada Wang 10287f3ff14bSJiada Wang /* 10297f3ff14bSJiada Wang * According to NXP R&D team a delay of one BD SDMA cost time 10307f3ff14bSJiada Wang * (maximum is 1ms) should be added after disable of the channel 10317f3ff14bSJiada Wang * bit, to ensure SDMA core has really been stopped after SDMA 10327f3ff14bSJiada Wang * clients call .device_terminate_all. 10337f3ff14bSJiada Wang */ 10347f3ff14bSJiada Wang mdelay(1); 10357f3ff14bSJiada Wang 10367f3ff14bSJiada Wang return 0; 10377f3ff14bSJiada Wang } 10387f3ff14bSJiada Wang 10398391ecf4SShengjiu Wang static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac) 10408391ecf4SShengjiu Wang { 10418391ecf4SShengjiu Wang struct sdma_engine *sdma = sdmac->sdma; 10428391ecf4SShengjiu Wang 10438391ecf4SShengjiu Wang int lwml = sdmac->watermark_level & SDMA_WATERMARK_LEVEL_LWML; 10448391ecf4SShengjiu Wang int hwml = (sdmac->watermark_level & SDMA_WATERMARK_LEVEL_HWML) >> 16; 10458391ecf4SShengjiu Wang 10468391ecf4SShengjiu Wang set_bit(sdmac->event_id0 % 32, &sdmac->event_mask[1]); 10478391ecf4SShengjiu Wang set_bit(sdmac->event_id1 % 32, &sdmac->event_mask[0]); 10488391ecf4SShengjiu Wang 10498391ecf4SShengjiu Wang if (sdmac->event_id0 > 31) 10508391ecf4SShengjiu Wang sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_LWE; 10518391ecf4SShengjiu Wang 10528391ecf4SShengjiu Wang if (sdmac->event_id1 > 31) 10538391ecf4SShengjiu Wang sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_HWE; 10548391ecf4SShengjiu Wang 10558391ecf4SShengjiu Wang /* 10568391ecf4SShengjiu Wang * If LWML(src_maxburst) > HWML(dst_maxburst), we need 10578391ecf4SShengjiu Wang * swap LWML and HWML of INFO(A.3.2.5.1), also need swap 10588391ecf4SShengjiu Wang * r0(event_mask[1]) and r1(event_mask[0]). 10598391ecf4SShengjiu Wang */ 10608391ecf4SShengjiu Wang if (lwml > hwml) { 10618391ecf4SShengjiu Wang sdmac->watermark_level &= ~(SDMA_WATERMARK_LEVEL_LWML | 10628391ecf4SShengjiu Wang SDMA_WATERMARK_LEVEL_HWML); 10638391ecf4SShengjiu Wang sdmac->watermark_level |= hwml; 10648391ecf4SShengjiu Wang sdmac->watermark_level |= lwml << 16; 10658391ecf4SShengjiu Wang swap(sdmac->event_mask[0], sdmac->event_mask[1]); 10668391ecf4SShengjiu Wang } 10678391ecf4SShengjiu Wang 10688391ecf4SShengjiu Wang if (sdmac->per_address2 >= sdma->spba_start_addr && 10698391ecf4SShengjiu Wang sdmac->per_address2 <= sdma->spba_end_addr) 10708391ecf4SShengjiu Wang sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_SP; 10718391ecf4SShengjiu Wang 10728391ecf4SShengjiu Wang if (sdmac->per_address >= sdma->spba_start_addr && 10738391ecf4SShengjiu Wang sdmac->per_address <= sdma->spba_end_addr) 10748391ecf4SShengjiu Wang sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_DP; 10758391ecf4SShengjiu Wang 10768391ecf4SShengjiu Wang sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_CONT; 10778391ecf4SShengjiu Wang } 10788391ecf4SShengjiu Wang 10797b350ab0SMaxime Ripard static int sdma_config_channel(struct dma_chan *chan) 10801ec1e82fSSascha Hauer { 10817b350ab0SMaxime Ripard struct sdma_channel *sdmac = to_sdma_chan(chan); 10821ec1e82fSSascha Hauer int ret; 10831ec1e82fSSascha Hauer 10847b350ab0SMaxime Ripard sdma_disable_channel(chan); 10851ec1e82fSSascha Hauer 10860bbc1413SRichard Zhao sdmac->event_mask[0] = 0; 10870bbc1413SRichard Zhao sdmac->event_mask[1] = 0; 10881ec1e82fSSascha Hauer sdmac->shp_addr = 0; 10891ec1e82fSSascha Hauer sdmac->per_addr = 0; 10901ec1e82fSSascha Hauer 10911ec1e82fSSascha Hauer if (sdmac->event_id0) { 109217bba72fSSascha Hauer if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events) 10931ec1e82fSSascha Hauer return -EINVAL; 10941ec1e82fSSascha Hauer sdma_event_enable(sdmac, sdmac->event_id0); 10951ec1e82fSSascha Hauer } 10961ec1e82fSSascha Hauer 10978391ecf4SShengjiu Wang if (sdmac->event_id1) { 10988391ecf4SShengjiu Wang if (sdmac->event_id1 >= sdmac->sdma->drvdata->num_events) 10998391ecf4SShengjiu Wang return -EINVAL; 11008391ecf4SShengjiu Wang sdma_event_enable(sdmac, sdmac->event_id1); 11018391ecf4SShengjiu Wang } 11028391ecf4SShengjiu Wang 11031ec1e82fSSascha Hauer switch (sdmac->peripheral_type) { 11041ec1e82fSSascha Hauer case IMX_DMATYPE_DSP: 11051ec1e82fSSascha Hauer sdma_config_ownership(sdmac, false, true, true); 11061ec1e82fSSascha Hauer break; 11071ec1e82fSSascha Hauer case IMX_DMATYPE_MEMORY: 11081ec1e82fSSascha Hauer sdma_config_ownership(sdmac, false, true, false); 11091ec1e82fSSascha Hauer break; 11101ec1e82fSSascha Hauer default: 11111ec1e82fSSascha Hauer sdma_config_ownership(sdmac, true, true, false); 11121ec1e82fSSascha Hauer break; 11131ec1e82fSSascha Hauer } 11141ec1e82fSSascha Hauer 11151ec1e82fSSascha Hauer sdma_get_pc(sdmac, sdmac->peripheral_type); 11161ec1e82fSSascha Hauer 11171ec1e82fSSascha Hauer if ((sdmac->peripheral_type != IMX_DMATYPE_MEMORY) && 11181ec1e82fSSascha Hauer (sdmac->peripheral_type != IMX_DMATYPE_DSP)) { 11191ec1e82fSSascha Hauer /* Handle multiple event channels differently */ 11201ec1e82fSSascha Hauer if (sdmac->event_id1) { 11218391ecf4SShengjiu Wang if (sdmac->peripheral_type == IMX_DMATYPE_ASRC_SP || 11228391ecf4SShengjiu Wang sdmac->peripheral_type == IMX_DMATYPE_ASRC) 11238391ecf4SShengjiu Wang sdma_set_watermarklevel_for_p2p(sdmac); 11248391ecf4SShengjiu Wang } else 11250bbc1413SRichard Zhao __set_bit(sdmac->event_id0, sdmac->event_mask); 11268391ecf4SShengjiu Wang 11271ec1e82fSSascha Hauer /* Address */ 11281ec1e82fSSascha Hauer sdmac->shp_addr = sdmac->per_address; 11298391ecf4SShengjiu Wang sdmac->per_addr = sdmac->per_address2; 11301ec1e82fSSascha Hauer } else { 11311ec1e82fSSascha Hauer sdmac->watermark_level = 0; /* FIXME: M3_BASE_ADDRESS */ 11321ec1e82fSSascha Hauer } 11331ec1e82fSSascha Hauer 11341ec1e82fSSascha Hauer ret = sdma_load_context(sdmac); 11351ec1e82fSSascha Hauer 11361ec1e82fSSascha Hauer return ret; 11371ec1e82fSSascha Hauer } 11381ec1e82fSSascha Hauer 11391ec1e82fSSascha Hauer static int sdma_set_channel_priority(struct sdma_channel *sdmac, 11401ec1e82fSSascha Hauer unsigned int priority) 11411ec1e82fSSascha Hauer { 11421ec1e82fSSascha Hauer struct sdma_engine *sdma = sdmac->sdma; 11431ec1e82fSSascha Hauer int channel = sdmac->channel; 11441ec1e82fSSascha Hauer 11451ec1e82fSSascha Hauer if (priority < MXC_SDMA_MIN_PRIORITY 11461ec1e82fSSascha Hauer || priority > MXC_SDMA_MAX_PRIORITY) { 11471ec1e82fSSascha Hauer return -EINVAL; 11481ec1e82fSSascha Hauer } 11491ec1e82fSSascha Hauer 1150c4b56857SRichard Zhao writel_relaxed(priority, sdma->regs + SDMA_CHNPRI_0 + 4 * channel); 11511ec1e82fSSascha Hauer 11521ec1e82fSSascha Hauer return 0; 11531ec1e82fSSascha Hauer } 11541ec1e82fSSascha Hauer 1155*57b772b8SRobin Gong static int sdma_request_channel0(struct sdma_engine *sdma) 11561ec1e82fSSascha Hauer { 11571ec1e82fSSascha Hauer int ret = -EBUSY; 11581ec1e82fSSascha Hauer 1159*57b772b8SRobin Gong sdma->bd0 = dma_zalloc_coherent(NULL, PAGE_SIZE, &sdma->bd0_phys, 1160*57b772b8SRobin Gong GFP_NOWAIT); 1161*57b772b8SRobin Gong if (!sdma->bd0) { 11621ec1e82fSSascha Hauer ret = -ENOMEM; 11631ec1e82fSSascha Hauer goto out; 11641ec1e82fSSascha Hauer } 11651ec1e82fSSascha Hauer 1166*57b772b8SRobin Gong sdma->channel_control[0].base_bd_ptr = sdma->bd0_phys; 1167*57b772b8SRobin Gong sdma->channel_control[0].current_bd_ptr = sdma->bd0_phys; 11681ec1e82fSSascha Hauer 1169*57b772b8SRobin Gong sdma_set_channel_priority(&sdma->channel[0], MXC_SDMA_DEFAULT_PRIORITY); 11701ec1e82fSSascha Hauer return 0; 11711ec1e82fSSascha Hauer out: 11721ec1e82fSSascha Hauer 11731ec1e82fSSascha Hauer return ret; 11741ec1e82fSSascha Hauer } 11751ec1e82fSSascha Hauer 1176*57b772b8SRobin Gong 1177*57b772b8SRobin Gong static int sdma_alloc_bd(struct sdma_desc *desc) 11781ec1e82fSSascha Hauer { 1179*57b772b8SRobin Gong u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor); 1180*57b772b8SRobin Gong int ret = 0; 11811ec1e82fSSascha Hauer 1182*57b772b8SRobin Gong desc->bd = dma_zalloc_coherent(NULL, bd_size, &desc->bd_phys, 1183*57b772b8SRobin Gong GFP_ATOMIC); 1184*57b772b8SRobin Gong if (!desc->bd) { 1185*57b772b8SRobin Gong ret = -ENOMEM; 1186*57b772b8SRobin Gong goto out; 1187*57b772b8SRobin Gong } 1188*57b772b8SRobin Gong out: 1189*57b772b8SRobin Gong return ret; 1190*57b772b8SRobin Gong } 11911ec1e82fSSascha Hauer 1192*57b772b8SRobin Gong static void sdma_free_bd(struct sdma_desc *desc) 1193*57b772b8SRobin Gong { 1194*57b772b8SRobin Gong u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor); 11951ec1e82fSSascha Hauer 1196*57b772b8SRobin Gong dma_free_coherent(NULL, bd_size, desc->bd, desc->bd_phys); 1197*57b772b8SRobin Gong } 11981ec1e82fSSascha Hauer 1199*57b772b8SRobin Gong static void sdma_desc_free(struct virt_dma_desc *vd) 1200*57b772b8SRobin Gong { 1201*57b772b8SRobin Gong struct sdma_desc *desc = container_of(vd, struct sdma_desc, vd); 1202*57b772b8SRobin Gong 1203*57b772b8SRobin Gong sdma_free_bd(desc); 1204*57b772b8SRobin Gong kfree(desc); 12051ec1e82fSSascha Hauer } 12061ec1e82fSSascha Hauer 12071ec1e82fSSascha Hauer static int sdma_alloc_chan_resources(struct dma_chan *chan) 12081ec1e82fSSascha Hauer { 12091ec1e82fSSascha Hauer struct sdma_channel *sdmac = to_sdma_chan(chan); 12101ec1e82fSSascha Hauer struct imx_dma_data *data = chan->private; 12111ec1e82fSSascha Hauer int prio, ret; 12121ec1e82fSSascha Hauer 12131ec1e82fSSascha Hauer if (!data) 12141ec1e82fSSascha Hauer return -EINVAL; 12151ec1e82fSSascha Hauer 12161ec1e82fSSascha Hauer switch (data->priority) { 12171ec1e82fSSascha Hauer case DMA_PRIO_HIGH: 12181ec1e82fSSascha Hauer prio = 3; 12191ec1e82fSSascha Hauer break; 12201ec1e82fSSascha Hauer case DMA_PRIO_MEDIUM: 12211ec1e82fSSascha Hauer prio = 2; 12221ec1e82fSSascha Hauer break; 12231ec1e82fSSascha Hauer case DMA_PRIO_LOW: 12241ec1e82fSSascha Hauer default: 12251ec1e82fSSascha Hauer prio = 1; 12261ec1e82fSSascha Hauer break; 12271ec1e82fSSascha Hauer } 12281ec1e82fSSascha Hauer 12291ec1e82fSSascha Hauer sdmac->peripheral_type = data->peripheral_type; 12301ec1e82fSSascha Hauer sdmac->event_id0 = data->dma_request; 12318391ecf4SShengjiu Wang sdmac->event_id1 = data->dma_request2; 1232c2c744d3SRichard Zhao 1233b93edcddSFabio Estevam ret = clk_enable(sdmac->sdma->clk_ipg); 1234b93edcddSFabio Estevam if (ret) 1235b93edcddSFabio Estevam return ret; 1236b93edcddSFabio Estevam ret = clk_enable(sdmac->sdma->clk_ahb); 1237b93edcddSFabio Estevam if (ret) 1238b93edcddSFabio Estevam goto disable_clk_ipg; 1239c2c744d3SRichard Zhao 12403bb5e7caSRichard Zhao ret = sdma_set_channel_priority(sdmac, prio); 12411ec1e82fSSascha Hauer if (ret) 1242b93edcddSFabio Estevam goto disable_clk_ahb; 12431ec1e82fSSascha Hauer 12441ec1e82fSSascha Hauer return 0; 1245b93edcddSFabio Estevam 1246b93edcddSFabio Estevam disable_clk_ahb: 1247b93edcddSFabio Estevam clk_disable(sdmac->sdma->clk_ahb); 1248b93edcddSFabio Estevam disable_clk_ipg: 1249b93edcddSFabio Estevam clk_disable(sdmac->sdma->clk_ipg); 1250b93edcddSFabio Estevam return ret; 12511ec1e82fSSascha Hauer } 12521ec1e82fSSascha Hauer 12531ec1e82fSSascha Hauer static void sdma_free_chan_resources(struct dma_chan *chan) 12541ec1e82fSSascha Hauer { 12551ec1e82fSSascha Hauer struct sdma_channel *sdmac = to_sdma_chan(chan); 12561ec1e82fSSascha Hauer struct sdma_engine *sdma = sdmac->sdma; 12571ec1e82fSSascha Hauer 1258*57b772b8SRobin Gong sdma_disable_channel_with_delay(chan); 12591ec1e82fSSascha Hauer 12601ec1e82fSSascha Hauer if (sdmac->event_id0) 12611ec1e82fSSascha Hauer sdma_event_disable(sdmac, sdmac->event_id0); 12621ec1e82fSSascha Hauer if (sdmac->event_id1) 12631ec1e82fSSascha Hauer sdma_event_disable(sdmac, sdmac->event_id1); 12641ec1e82fSSascha Hauer 12651ec1e82fSSascha Hauer sdmac->event_id0 = 0; 12661ec1e82fSSascha Hauer sdmac->event_id1 = 0; 12671ec1e82fSSascha Hauer 12681ec1e82fSSascha Hauer sdma_set_channel_priority(sdmac, 0); 12691ec1e82fSSascha Hauer 12707560e3f3SSascha Hauer clk_disable(sdma->clk_ipg); 12717560e3f3SSascha Hauer clk_disable(sdma->clk_ahb); 12721ec1e82fSSascha Hauer } 12731ec1e82fSSascha Hauer 12741ec1e82fSSascha Hauer static struct dma_async_tx_descriptor *sdma_prep_slave_sg( 12751ec1e82fSSascha Hauer struct dma_chan *chan, struct scatterlist *sgl, 1276db8196dfSVinod Koul unsigned int sg_len, enum dma_transfer_direction direction, 1277185ecb5fSAlexandre Bounine unsigned long flags, void *context) 12781ec1e82fSSascha Hauer { 12791ec1e82fSSascha Hauer struct sdma_channel *sdmac = to_sdma_chan(chan); 12801ec1e82fSSascha Hauer struct sdma_engine *sdma = sdmac->sdma; 12811ec1e82fSSascha Hauer int ret, i, count; 128223889c63SSascha Hauer int channel = sdmac->channel; 12831ec1e82fSSascha Hauer struct scatterlist *sg; 1284*57b772b8SRobin Gong struct sdma_desc *desc; 12851ec1e82fSSascha Hauer 12861ec1e82fSSascha Hauer if (sdmac->status == DMA_IN_PROGRESS) 12871ec1e82fSSascha Hauer return NULL; 12881ec1e82fSSascha Hauer sdmac->status = DMA_IN_PROGRESS; 12891ec1e82fSSascha Hauer 12901ec1e82fSSascha Hauer sdmac->flags = 0; 12911ec1e82fSSascha Hauer 1292*57b772b8SRobin Gong desc = kzalloc((sizeof(*desc)), GFP_NOWAIT); 1293*57b772b8SRobin Gong if (!desc) 1294*57b772b8SRobin Gong goto err_out; 1295*57b772b8SRobin Gong 129676c33d27SSascha Hauer desc->buf_tail = 0; 129776c33d27SSascha Hauer desc->buf_ptail = 0; 1298*57b772b8SRobin Gong desc->sdmac = sdmac; 1299*57b772b8SRobin Gong desc->num_bd = sg_len; 130076c33d27SSascha Hauer desc->chn_real_count = 0; 13018e2e27c7SRichard Zhao 1302*57b772b8SRobin Gong if (sdma_alloc_bd(desc)) { 1303*57b772b8SRobin Gong kfree(desc); 1304*57b772b8SRobin Gong goto err_out; 1305*57b772b8SRobin Gong } 1306*57b772b8SRobin Gong 13071ec1e82fSSascha Hauer dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n", 13081ec1e82fSSascha Hauer sg_len, channel); 13091ec1e82fSSascha Hauer 13101ec1e82fSSascha Hauer sdmac->direction = direction; 13111ec1e82fSSascha Hauer ret = sdma_load_context(sdmac); 13121ec1e82fSSascha Hauer if (ret) 1313*57b772b8SRobin Gong goto err_bd_out; 13141ec1e82fSSascha Hauer 13151ec1e82fSSascha Hauer if (sg_len > NUM_BD) { 13161ec1e82fSSascha Hauer dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n", 13171ec1e82fSSascha Hauer channel, sg_len, NUM_BD); 13181ec1e82fSSascha Hauer ret = -EINVAL; 1319*57b772b8SRobin Gong goto err_bd_out; 13201ec1e82fSSascha Hauer } 13211ec1e82fSSascha Hauer 132276c33d27SSascha Hauer desc->chn_count = 0; 13231ec1e82fSSascha Hauer for_each_sg(sgl, sg, sg_len, i) { 132476c33d27SSascha Hauer struct sdma_buffer_descriptor *bd = &desc->bd[i]; 13251ec1e82fSSascha Hauer int param; 13261ec1e82fSSascha Hauer 1327d2f5c276SAnatolij Gustschin bd->buffer_addr = sg->dma_address; 13281ec1e82fSSascha Hauer 1329fdaf9c4bSLars-Peter Clausen count = sg_dma_len(sg); 13301ec1e82fSSascha Hauer 13311ec1e82fSSascha Hauer if (count > 0xffff) { 13321ec1e82fSSascha Hauer dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n", 13331ec1e82fSSascha Hauer channel, count, 0xffff); 13341ec1e82fSSascha Hauer ret = -EINVAL; 1335*57b772b8SRobin Gong goto err_bd_out; 13361ec1e82fSSascha Hauer } 13371ec1e82fSSascha Hauer 13381ec1e82fSSascha Hauer bd->mode.count = count; 133976c33d27SSascha Hauer desc->chn_count += count; 13401ec1e82fSSascha Hauer 13411ec1e82fSSascha Hauer if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) { 13421ec1e82fSSascha Hauer ret = -EINVAL; 1343*57b772b8SRobin Gong goto err_bd_out; 13441ec1e82fSSascha Hauer } 13451fa81c27SSascha Hauer 13461fa81c27SSascha Hauer switch (sdmac->word_size) { 13471fa81c27SSascha Hauer case DMA_SLAVE_BUSWIDTH_4_BYTES: 13481ec1e82fSSascha Hauer bd->mode.command = 0; 13491fa81c27SSascha Hauer if (count & 3 || sg->dma_address & 3) 1350*57b772b8SRobin Gong goto err_bd_out; 13511fa81c27SSascha Hauer break; 13521fa81c27SSascha Hauer case DMA_SLAVE_BUSWIDTH_2_BYTES: 13531fa81c27SSascha Hauer bd->mode.command = 2; 13541fa81c27SSascha Hauer if (count & 1 || sg->dma_address & 1) 1355*57b772b8SRobin Gong goto err_bd_out; 13561fa81c27SSascha Hauer break; 13571fa81c27SSascha Hauer case DMA_SLAVE_BUSWIDTH_1_BYTE: 13581fa81c27SSascha Hauer bd->mode.command = 1; 13591fa81c27SSascha Hauer break; 13601fa81c27SSascha Hauer default: 1361*57b772b8SRobin Gong goto err_bd_out; 13621fa81c27SSascha Hauer } 13631ec1e82fSSascha Hauer 13641ec1e82fSSascha Hauer param = BD_DONE | BD_EXTD | BD_CONT; 13651ec1e82fSSascha Hauer 1366341b9419SShawn Guo if (i + 1 == sg_len) { 13671ec1e82fSSascha Hauer param |= BD_INTR; 1368341b9419SShawn Guo param |= BD_LAST; 1369341b9419SShawn Guo param &= ~BD_CONT; 13701ec1e82fSSascha Hauer } 13711ec1e82fSSascha Hauer 1372c3cc74b2SOlof Johansson dev_dbg(sdma->dev, "entry %d: count: %d dma: %#llx %s%s\n", 1373c3cc74b2SOlof Johansson i, count, (u64)sg->dma_address, 13741ec1e82fSSascha Hauer param & BD_WRAP ? "wrap" : "", 13751ec1e82fSSascha Hauer param & BD_INTR ? " intr" : ""); 13761ec1e82fSSascha Hauer 13771ec1e82fSSascha Hauer bd->mode.status = param; 13781ec1e82fSSascha Hauer } 13791ec1e82fSSascha Hauer 1380*57b772b8SRobin Gong return vchan_tx_prep(&sdmac->vc, &desc->vd, flags); 1381*57b772b8SRobin Gong err_bd_out: 1382*57b772b8SRobin Gong sdma_free_bd(desc); 1383*57b772b8SRobin Gong kfree(desc); 13841ec1e82fSSascha Hauer err_out: 13854b2ce9ddSShawn Guo sdmac->status = DMA_ERROR; 13861ec1e82fSSascha Hauer return NULL; 13871ec1e82fSSascha Hauer } 13881ec1e82fSSascha Hauer 13891ec1e82fSSascha Hauer static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic( 13901ec1e82fSSascha Hauer struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, 1391185ecb5fSAlexandre Bounine size_t period_len, enum dma_transfer_direction direction, 139231c1e5a1SLaurent Pinchart unsigned long flags) 13931ec1e82fSSascha Hauer { 13941ec1e82fSSascha Hauer struct sdma_channel *sdmac = to_sdma_chan(chan); 13951ec1e82fSSascha Hauer struct sdma_engine *sdma = sdmac->sdma; 13961ec1e82fSSascha Hauer int num_periods = buf_len / period_len; 139723889c63SSascha Hauer int channel = sdmac->channel; 13981ec1e82fSSascha Hauer int ret, i = 0, buf = 0; 1399*57b772b8SRobin Gong struct sdma_desc *desc; 14001ec1e82fSSascha Hauer 14011ec1e82fSSascha Hauer dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel); 14021ec1e82fSSascha Hauer 14031ec1e82fSSascha Hauer if (sdmac->status == DMA_IN_PROGRESS) 14041ec1e82fSSascha Hauer return NULL; 14051ec1e82fSSascha Hauer 14061ec1e82fSSascha Hauer sdmac->status = DMA_IN_PROGRESS; 14071ec1e82fSSascha Hauer 1408*57b772b8SRobin Gong desc = kzalloc((sizeof(*desc)), GFP_NOWAIT); 1409*57b772b8SRobin Gong if (!desc) 1410*57b772b8SRobin Gong goto err_out; 1411*57b772b8SRobin Gong 141276c33d27SSascha Hauer desc->buf_tail = 0; 141376c33d27SSascha Hauer desc->buf_ptail = 0; 1414*57b772b8SRobin Gong desc->sdmac = sdmac; 1415*57b772b8SRobin Gong desc->num_bd = num_periods; 141676c33d27SSascha Hauer desc->chn_real_count = 0; 141776c33d27SSascha Hauer desc->period_len = period_len; 14188e2e27c7SRichard Zhao 14191ec1e82fSSascha Hauer sdmac->flags |= IMX_DMA_SG_LOOP; 14201ec1e82fSSascha Hauer sdmac->direction = direction; 1421*57b772b8SRobin Gong 1422*57b772b8SRobin Gong if (sdma_alloc_bd(desc)) { 1423*57b772b8SRobin Gong kfree(desc); 1424*57b772b8SRobin Gong goto err_bd_out; 1425*57b772b8SRobin Gong } 1426*57b772b8SRobin Gong 14271ec1e82fSSascha Hauer ret = sdma_load_context(sdmac); 14281ec1e82fSSascha Hauer if (ret) 1429*57b772b8SRobin Gong goto err_bd_out; 14301ec1e82fSSascha Hauer 14311ec1e82fSSascha Hauer if (num_periods > NUM_BD) { 14321ec1e82fSSascha Hauer dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n", 14331ec1e82fSSascha Hauer channel, num_periods, NUM_BD); 1434*57b772b8SRobin Gong goto err_bd_out; 14351ec1e82fSSascha Hauer } 14361ec1e82fSSascha Hauer 14371ec1e82fSSascha Hauer if (period_len > 0xffff) { 1438ba6ab3b3SArvind Yadav dev_err(sdma->dev, "SDMA channel %d: maximum period size exceeded: %zu > %d\n", 14391ec1e82fSSascha Hauer channel, period_len, 0xffff); 1440*57b772b8SRobin Gong goto err_bd_out; 14411ec1e82fSSascha Hauer } 14421ec1e82fSSascha Hauer 14431ec1e82fSSascha Hauer while (buf < buf_len) { 144476c33d27SSascha Hauer struct sdma_buffer_descriptor *bd = &desc->bd[i]; 14451ec1e82fSSascha Hauer int param; 14461ec1e82fSSascha Hauer 14471ec1e82fSSascha Hauer bd->buffer_addr = dma_addr; 14481ec1e82fSSascha Hauer 14491ec1e82fSSascha Hauer bd->mode.count = period_len; 14501ec1e82fSSascha Hauer 14511ec1e82fSSascha Hauer if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) 1452*57b772b8SRobin Gong goto err_bd_out; 14531ec1e82fSSascha Hauer if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES) 14541ec1e82fSSascha Hauer bd->mode.command = 0; 14551ec1e82fSSascha Hauer else 14561ec1e82fSSascha Hauer bd->mode.command = sdmac->word_size; 14571ec1e82fSSascha Hauer 14581ec1e82fSSascha Hauer param = BD_DONE | BD_EXTD | BD_CONT | BD_INTR; 14591ec1e82fSSascha Hauer if (i + 1 == num_periods) 14601ec1e82fSSascha Hauer param |= BD_WRAP; 14611ec1e82fSSascha Hauer 1462ba6ab3b3SArvind Yadav dev_dbg(sdma->dev, "entry %d: count: %zu dma: %#llx %s%s\n", 1463c3cc74b2SOlof Johansson i, period_len, (u64)dma_addr, 14641ec1e82fSSascha Hauer param & BD_WRAP ? "wrap" : "", 14651ec1e82fSSascha Hauer param & BD_INTR ? " intr" : ""); 14661ec1e82fSSascha Hauer 14671ec1e82fSSascha Hauer bd->mode.status = param; 14681ec1e82fSSascha Hauer 14691ec1e82fSSascha Hauer dma_addr += period_len; 14701ec1e82fSSascha Hauer buf += period_len; 14711ec1e82fSSascha Hauer 14721ec1e82fSSascha Hauer i++; 14731ec1e82fSSascha Hauer } 14741ec1e82fSSascha Hauer 1475*57b772b8SRobin Gong return vchan_tx_prep(&sdmac->vc, &desc->vd, flags); 1476*57b772b8SRobin Gong err_bd_out: 1477*57b772b8SRobin Gong sdma_free_bd(desc); 1478*57b772b8SRobin Gong kfree(desc); 14791ec1e82fSSascha Hauer err_out: 14801ec1e82fSSascha Hauer sdmac->status = DMA_ERROR; 14811ec1e82fSSascha Hauer return NULL; 14821ec1e82fSSascha Hauer } 14831ec1e82fSSascha Hauer 14847b350ab0SMaxime Ripard static int sdma_config(struct dma_chan *chan, 14857b350ab0SMaxime Ripard struct dma_slave_config *dmaengine_cfg) 14861ec1e82fSSascha Hauer { 14871ec1e82fSSascha Hauer struct sdma_channel *sdmac = to_sdma_chan(chan); 14881ec1e82fSSascha Hauer 1489db8196dfSVinod Koul if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) { 14901ec1e82fSSascha Hauer sdmac->per_address = dmaengine_cfg->src_addr; 149194ac27a5SPhilippe Rétornaz sdmac->watermark_level = dmaengine_cfg->src_maxburst * 149294ac27a5SPhilippe Rétornaz dmaengine_cfg->src_addr_width; 14931ec1e82fSSascha Hauer sdmac->word_size = dmaengine_cfg->src_addr_width; 14948391ecf4SShengjiu Wang } else if (dmaengine_cfg->direction == DMA_DEV_TO_DEV) { 14958391ecf4SShengjiu Wang sdmac->per_address2 = dmaengine_cfg->src_addr; 14968391ecf4SShengjiu Wang sdmac->per_address = dmaengine_cfg->dst_addr; 14978391ecf4SShengjiu Wang sdmac->watermark_level = dmaengine_cfg->src_maxburst & 14988391ecf4SShengjiu Wang SDMA_WATERMARK_LEVEL_LWML; 14998391ecf4SShengjiu Wang sdmac->watermark_level |= (dmaengine_cfg->dst_maxburst << 16) & 15008391ecf4SShengjiu Wang SDMA_WATERMARK_LEVEL_HWML; 15018391ecf4SShengjiu Wang sdmac->word_size = dmaengine_cfg->dst_addr_width; 15021ec1e82fSSascha Hauer } else { 15031ec1e82fSSascha Hauer sdmac->per_address = dmaengine_cfg->dst_addr; 150494ac27a5SPhilippe Rétornaz sdmac->watermark_level = dmaengine_cfg->dst_maxburst * 150594ac27a5SPhilippe Rétornaz dmaengine_cfg->dst_addr_width; 15061ec1e82fSSascha Hauer sdmac->word_size = dmaengine_cfg->dst_addr_width; 15071ec1e82fSSascha Hauer } 1508e6966433SHuang Shijie sdmac->direction = dmaengine_cfg->direction; 15097b350ab0SMaxime Ripard return sdma_config_channel(chan); 15101ec1e82fSSascha Hauer } 15111ec1e82fSSascha Hauer 15121ec1e82fSSascha Hauer static enum dma_status sdma_tx_status(struct dma_chan *chan, 15131ec1e82fSSascha Hauer dma_cookie_t cookie, 15141ec1e82fSSascha Hauer struct dma_tx_state *txstate) 15151ec1e82fSSascha Hauer { 15161ec1e82fSSascha Hauer struct sdma_channel *sdmac = to_sdma_chan(chan); 1517*57b772b8SRobin Gong struct sdma_desc *desc; 1518d1a792f3SRussell King - ARM Linux u32 residue; 1519*57b772b8SRobin Gong struct virt_dma_desc *vd; 1520*57b772b8SRobin Gong enum dma_status ret; 1521*57b772b8SRobin Gong unsigned long flags; 1522d1a792f3SRussell King - ARM Linux 1523*57b772b8SRobin Gong ret = dma_cookie_status(chan, cookie, txstate); 1524*57b772b8SRobin Gong if (ret == DMA_COMPLETE || !txstate) 1525*57b772b8SRobin Gong return ret; 1526*57b772b8SRobin Gong 1527*57b772b8SRobin Gong spin_lock_irqsave(&sdmac->vc.lock, flags); 1528*57b772b8SRobin Gong vd = vchan_find_desc(&sdmac->vc, cookie); 1529*57b772b8SRobin Gong if (vd) { 1530*57b772b8SRobin Gong desc = to_sdma_desc(&vd->tx); 1531d1a792f3SRussell King - ARM Linux if (sdmac->flags & IMX_DMA_SG_LOOP) 153276c33d27SSascha Hauer residue = (desc->num_bd - desc->buf_ptail) * 153376c33d27SSascha Hauer desc->period_len - desc->chn_real_count; 1534d1a792f3SRussell King - ARM Linux else 153576c33d27SSascha Hauer residue = desc->chn_count - desc->chn_real_count; 1536*57b772b8SRobin Gong } else if (sdmac->desc && sdmac->desc->vd.tx.cookie == cookie) { 1537*57b772b8SRobin Gong residue = sdmac->desc->chn_count - sdmac->desc->chn_real_count; 1538*57b772b8SRobin Gong } else { 1539*57b772b8SRobin Gong residue = 0; 1540*57b772b8SRobin Gong } 1541*57b772b8SRobin Gong spin_unlock_irqrestore(&sdmac->vc.lock, flags); 15421ec1e82fSSascha Hauer 1543e8e3a790SAndy Shevchenko dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, 1544d1a792f3SRussell King - ARM Linux residue); 15451ec1e82fSSascha Hauer 15468a965911SShawn Guo return sdmac->status; 15471ec1e82fSSascha Hauer } 15481ec1e82fSSascha Hauer 15491ec1e82fSSascha Hauer static void sdma_issue_pending(struct dma_chan *chan) 15501ec1e82fSSascha Hauer { 15512b4f130eSSascha Hauer struct sdma_channel *sdmac = to_sdma_chan(chan); 1552*57b772b8SRobin Gong unsigned long flags; 15532b4f130eSSascha Hauer 1554*57b772b8SRobin Gong spin_lock_irqsave(&sdmac->vc.lock, flags); 1555*57b772b8SRobin Gong if (vchan_issue_pending(&sdmac->vc) && !sdmac->desc) 1556*57b772b8SRobin Gong sdma_start_desc(sdmac); 1557*57b772b8SRobin Gong spin_unlock_irqrestore(&sdmac->vc.lock, flags); 15581ec1e82fSSascha Hauer } 15591ec1e82fSSascha Hauer 15605b28aa31SSascha Hauer #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34 1561cd72b846SNicolin Chen #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2 38 1562a572460bSFabio Estevam #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3 41 1563b7d2648aSFabio Estevam #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4 42 15645b28aa31SSascha Hauer 15655b28aa31SSascha Hauer static void sdma_add_scripts(struct sdma_engine *sdma, 15665b28aa31SSascha Hauer const struct sdma_script_start_addrs *addr) 15675b28aa31SSascha Hauer { 15685b28aa31SSascha Hauer s32 *addr_arr = (u32 *)addr; 15695b28aa31SSascha Hauer s32 *saddr_arr = (u32 *)sdma->script_addrs; 15705b28aa31SSascha Hauer int i; 15715b28aa31SSascha Hauer 157270dabaedSNicolin Chen /* use the default firmware in ROM if missing external firmware */ 157370dabaedSNicolin Chen if (!sdma->script_number) 157470dabaedSNicolin Chen sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; 157570dabaedSNicolin Chen 1576cd72b846SNicolin Chen for (i = 0; i < sdma->script_number; i++) 15775b28aa31SSascha Hauer if (addr_arr[i] > 0) 15785b28aa31SSascha Hauer saddr_arr[i] = addr_arr[i]; 15795b28aa31SSascha Hauer } 15805b28aa31SSascha Hauer 15817b4b88e0SSascha Hauer static void sdma_load_firmware(const struct firmware *fw, void *context) 15825b28aa31SSascha Hauer { 15837b4b88e0SSascha Hauer struct sdma_engine *sdma = context; 15845b28aa31SSascha Hauer const struct sdma_firmware_header *header; 15855b28aa31SSascha Hauer const struct sdma_script_start_addrs *addr; 15865b28aa31SSascha Hauer unsigned short *ram_code; 15875b28aa31SSascha Hauer 15887b4b88e0SSascha Hauer if (!fw) { 15890f927a11SSascha Hauer dev_info(sdma->dev, "external firmware not found, using ROM firmware\n"); 15900f927a11SSascha Hauer /* In this case we just use the ROM firmware. */ 15917b4b88e0SSascha Hauer return; 15927b4b88e0SSascha Hauer } 15935b28aa31SSascha Hauer 15945b28aa31SSascha Hauer if (fw->size < sizeof(*header)) 15955b28aa31SSascha Hauer goto err_firmware; 15965b28aa31SSascha Hauer 15975b28aa31SSascha Hauer header = (struct sdma_firmware_header *)fw->data; 15985b28aa31SSascha Hauer 15995b28aa31SSascha Hauer if (header->magic != SDMA_FIRMWARE_MAGIC) 16005b28aa31SSascha Hauer goto err_firmware; 16015b28aa31SSascha Hauer if (header->ram_code_start + header->ram_code_size > fw->size) 16025b28aa31SSascha Hauer goto err_firmware; 1603cd72b846SNicolin Chen switch (header->version_major) { 1604cd72b846SNicolin Chen case 1: 1605cd72b846SNicolin Chen sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; 1606cd72b846SNicolin Chen break; 1607cd72b846SNicolin Chen case 2: 1608cd72b846SNicolin Chen sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2; 1609cd72b846SNicolin Chen break; 1610a572460bSFabio Estevam case 3: 1611a572460bSFabio Estevam sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3; 1612a572460bSFabio Estevam break; 1613b7d2648aSFabio Estevam case 4: 1614b7d2648aSFabio Estevam sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4; 1615b7d2648aSFabio Estevam break; 1616cd72b846SNicolin Chen default: 1617cd72b846SNicolin Chen dev_err(sdma->dev, "unknown firmware version\n"); 1618cd72b846SNicolin Chen goto err_firmware; 1619cd72b846SNicolin Chen } 16205b28aa31SSascha Hauer 16215b28aa31SSascha Hauer addr = (void *)header + header->script_addrs_start; 16225b28aa31SSascha Hauer ram_code = (void *)header + header->ram_code_start; 16235b28aa31SSascha Hauer 16247560e3f3SSascha Hauer clk_enable(sdma->clk_ipg); 16257560e3f3SSascha Hauer clk_enable(sdma->clk_ahb); 16265b28aa31SSascha Hauer /* download the RAM image for SDMA */ 16275b28aa31SSascha Hauer sdma_load_script(sdma, ram_code, 16285b28aa31SSascha Hauer header->ram_code_size, 16296866fd3bSSascha Hauer addr->ram_code_start_addr); 16307560e3f3SSascha Hauer clk_disable(sdma->clk_ipg); 16317560e3f3SSascha Hauer clk_disable(sdma->clk_ahb); 16325b28aa31SSascha Hauer 16335b28aa31SSascha Hauer sdma_add_scripts(sdma, addr); 16345b28aa31SSascha Hauer 16355b28aa31SSascha Hauer dev_info(sdma->dev, "loaded firmware %d.%d\n", 16365b28aa31SSascha Hauer header->version_major, 16375b28aa31SSascha Hauer header->version_minor); 16385b28aa31SSascha Hauer 16395b28aa31SSascha Hauer err_firmware: 16405b28aa31SSascha Hauer release_firmware(fw); 16417b4b88e0SSascha Hauer } 16427b4b88e0SSascha Hauer 1643d078cd1bSZidan Wang #define EVENT_REMAP_CELLS 3 1644d078cd1bSZidan Wang 164529f493daSJason Liu static int sdma_event_remap(struct sdma_engine *sdma) 1646d078cd1bSZidan Wang { 1647d078cd1bSZidan Wang struct device_node *np = sdma->dev->of_node; 1648d078cd1bSZidan Wang struct device_node *gpr_np = of_parse_phandle(np, "gpr", 0); 1649d078cd1bSZidan Wang struct property *event_remap; 1650d078cd1bSZidan Wang struct regmap *gpr; 1651d078cd1bSZidan Wang char propname[] = "fsl,sdma-event-remap"; 1652d078cd1bSZidan Wang u32 reg, val, shift, num_map, i; 1653d078cd1bSZidan Wang int ret = 0; 1654d078cd1bSZidan Wang 1655d078cd1bSZidan Wang if (IS_ERR(np) || IS_ERR(gpr_np)) 1656d078cd1bSZidan Wang goto out; 1657d078cd1bSZidan Wang 1658d078cd1bSZidan Wang event_remap = of_find_property(np, propname, NULL); 1659d078cd1bSZidan Wang num_map = event_remap ? (event_remap->length / sizeof(u32)) : 0; 1660d078cd1bSZidan Wang if (!num_map) { 1661ce078af7SFabio Estevam dev_dbg(sdma->dev, "no event needs to be remapped\n"); 1662d078cd1bSZidan Wang goto out; 1663d078cd1bSZidan Wang } else if (num_map % EVENT_REMAP_CELLS) { 1664d078cd1bSZidan Wang dev_err(sdma->dev, "the property %s must modulo %d\n", 1665d078cd1bSZidan Wang propname, EVENT_REMAP_CELLS); 1666d078cd1bSZidan Wang ret = -EINVAL; 1667d078cd1bSZidan Wang goto out; 1668d078cd1bSZidan Wang } 1669d078cd1bSZidan Wang 1670d078cd1bSZidan Wang gpr = syscon_node_to_regmap(gpr_np); 1671d078cd1bSZidan Wang if (IS_ERR(gpr)) { 1672d078cd1bSZidan Wang dev_err(sdma->dev, "failed to get gpr regmap\n"); 1673d078cd1bSZidan Wang ret = PTR_ERR(gpr); 1674d078cd1bSZidan Wang goto out; 1675d078cd1bSZidan Wang } 1676d078cd1bSZidan Wang 1677d078cd1bSZidan Wang for (i = 0; i < num_map; i += EVENT_REMAP_CELLS) { 1678d078cd1bSZidan Wang ret = of_property_read_u32_index(np, propname, i, ®); 1679d078cd1bSZidan Wang if (ret) { 1680d078cd1bSZidan Wang dev_err(sdma->dev, "failed to read property %s index %d\n", 1681d078cd1bSZidan Wang propname, i); 1682d078cd1bSZidan Wang goto out; 1683d078cd1bSZidan Wang } 1684d078cd1bSZidan Wang 1685d078cd1bSZidan Wang ret = of_property_read_u32_index(np, propname, i + 1, &shift); 1686d078cd1bSZidan Wang if (ret) { 1687d078cd1bSZidan Wang dev_err(sdma->dev, "failed to read property %s index %d\n", 1688d078cd1bSZidan Wang propname, i + 1); 1689d078cd1bSZidan Wang goto out; 1690d078cd1bSZidan Wang } 1691d078cd1bSZidan Wang 1692d078cd1bSZidan Wang ret = of_property_read_u32_index(np, propname, i + 2, &val); 1693d078cd1bSZidan Wang if (ret) { 1694d078cd1bSZidan Wang dev_err(sdma->dev, "failed to read property %s index %d\n", 1695d078cd1bSZidan Wang propname, i + 2); 1696d078cd1bSZidan Wang goto out; 1697d078cd1bSZidan Wang } 1698d078cd1bSZidan Wang 1699d078cd1bSZidan Wang regmap_update_bits(gpr, reg, BIT(shift), val << shift); 1700d078cd1bSZidan Wang } 1701d078cd1bSZidan Wang 1702d078cd1bSZidan Wang out: 1703d078cd1bSZidan Wang if (!IS_ERR(gpr_np)) 1704d078cd1bSZidan Wang of_node_put(gpr_np); 1705d078cd1bSZidan Wang 1706d078cd1bSZidan Wang return ret; 1707d078cd1bSZidan Wang } 1708d078cd1bSZidan Wang 1709fe6cf289SArnd Bergmann static int sdma_get_firmware(struct sdma_engine *sdma, 17107b4b88e0SSascha Hauer const char *fw_name) 17117b4b88e0SSascha Hauer { 17127b4b88e0SSascha Hauer int ret; 17137b4b88e0SSascha Hauer 17147b4b88e0SSascha Hauer ret = request_firmware_nowait(THIS_MODULE, 17157b4b88e0SSascha Hauer FW_ACTION_HOTPLUG, fw_name, sdma->dev, 17167b4b88e0SSascha Hauer GFP_KERNEL, sdma, sdma_load_firmware); 17175b28aa31SSascha Hauer 17185b28aa31SSascha Hauer return ret; 17195b28aa31SSascha Hauer } 17205b28aa31SSascha Hauer 172119bfc772SJingoo Han static int sdma_init(struct sdma_engine *sdma) 17221ec1e82fSSascha Hauer { 17231ec1e82fSSascha Hauer int i, ret; 17241ec1e82fSSascha Hauer dma_addr_t ccb_phys; 17251ec1e82fSSascha Hauer 1726b93edcddSFabio Estevam ret = clk_enable(sdma->clk_ipg); 1727b93edcddSFabio Estevam if (ret) 1728b93edcddSFabio Estevam return ret; 1729b93edcddSFabio Estevam ret = clk_enable(sdma->clk_ahb); 1730b93edcddSFabio Estevam if (ret) 1731b93edcddSFabio Estevam goto disable_clk_ipg; 17321ec1e82fSSascha Hauer 17331ec1e82fSSascha Hauer /* Be sure SDMA has not started yet */ 1734c4b56857SRichard Zhao writel_relaxed(0, sdma->regs + SDMA_H_C0PTR); 17351ec1e82fSSascha Hauer 17361ec1e82fSSascha Hauer sdma->channel_control = dma_alloc_coherent(NULL, 17371ec1e82fSSascha Hauer MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control) + 17381ec1e82fSSascha Hauer sizeof(struct sdma_context_data), 17391ec1e82fSSascha Hauer &ccb_phys, GFP_KERNEL); 17401ec1e82fSSascha Hauer 17411ec1e82fSSascha Hauer if (!sdma->channel_control) { 17421ec1e82fSSascha Hauer ret = -ENOMEM; 17431ec1e82fSSascha Hauer goto err_dma_alloc; 17441ec1e82fSSascha Hauer } 17451ec1e82fSSascha Hauer 17461ec1e82fSSascha Hauer sdma->context = (void *)sdma->channel_control + 17471ec1e82fSSascha Hauer MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control); 17481ec1e82fSSascha Hauer sdma->context_phys = ccb_phys + 17491ec1e82fSSascha Hauer MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control); 17501ec1e82fSSascha Hauer 17511ec1e82fSSascha Hauer /* Zero-out the CCB structures array just allocated */ 17521ec1e82fSSascha Hauer memset(sdma->channel_control, 0, 17531ec1e82fSSascha Hauer MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control)); 17541ec1e82fSSascha Hauer 17551ec1e82fSSascha Hauer /* disable all channels */ 175617bba72fSSascha Hauer for (i = 0; i < sdma->drvdata->num_events; i++) 1757c4b56857SRichard Zhao writel_relaxed(0, sdma->regs + chnenbl_ofs(sdma, i)); 17581ec1e82fSSascha Hauer 17591ec1e82fSSascha Hauer /* All channels have priority 0 */ 17601ec1e82fSSascha Hauer for (i = 0; i < MAX_DMA_CHANNELS; i++) 1761c4b56857SRichard Zhao writel_relaxed(0, sdma->regs + SDMA_CHNPRI_0 + i * 4); 17621ec1e82fSSascha Hauer 1763*57b772b8SRobin Gong ret = sdma_request_channel0(sdma); 17641ec1e82fSSascha Hauer if (ret) 17651ec1e82fSSascha Hauer goto err_dma_alloc; 17661ec1e82fSSascha Hauer 17671ec1e82fSSascha Hauer sdma_config_ownership(&sdma->channel[0], false, true, false); 17681ec1e82fSSascha Hauer 17691ec1e82fSSascha Hauer /* Set Command Channel (Channel Zero) */ 1770c4b56857SRichard Zhao writel_relaxed(0x4050, sdma->regs + SDMA_CHN0ADDR); 17711ec1e82fSSascha Hauer 17721ec1e82fSSascha Hauer /* Set bits of CONFIG register but with static context switching */ 17731ec1e82fSSascha Hauer /* FIXME: Check whether to set ACR bit depending on clock ratios */ 1774c4b56857SRichard Zhao writel_relaxed(0, sdma->regs + SDMA_H_CONFIG); 17751ec1e82fSSascha Hauer 1776c4b56857SRichard Zhao writel_relaxed(ccb_phys, sdma->regs + SDMA_H_C0PTR); 17771ec1e82fSSascha Hauer 17781ec1e82fSSascha Hauer /* Initializes channel's priorities */ 17791ec1e82fSSascha Hauer sdma_set_channel_priority(&sdma->channel[0], 7); 17801ec1e82fSSascha Hauer 17817560e3f3SSascha Hauer clk_disable(sdma->clk_ipg); 17827560e3f3SSascha Hauer clk_disable(sdma->clk_ahb); 17831ec1e82fSSascha Hauer 17841ec1e82fSSascha Hauer return 0; 17851ec1e82fSSascha Hauer 17861ec1e82fSSascha Hauer err_dma_alloc: 17877560e3f3SSascha Hauer clk_disable(sdma->clk_ahb); 1788b93edcddSFabio Estevam disable_clk_ipg: 1789b93edcddSFabio Estevam clk_disable(sdma->clk_ipg); 17901ec1e82fSSascha Hauer dev_err(sdma->dev, "initialisation failed with %d\n", ret); 17911ec1e82fSSascha Hauer return ret; 17921ec1e82fSSascha Hauer } 17931ec1e82fSSascha Hauer 17949479e17cSShawn Guo static bool sdma_filter_fn(struct dma_chan *chan, void *fn_param) 17959479e17cSShawn Guo { 17960b351865SNicolin Chen struct sdma_channel *sdmac = to_sdma_chan(chan); 17979479e17cSShawn Guo struct imx_dma_data *data = fn_param; 17989479e17cSShawn Guo 17999479e17cSShawn Guo if (!imx_dma_is_general_purpose(chan)) 18009479e17cSShawn Guo return false; 18019479e17cSShawn Guo 18020b351865SNicolin Chen sdmac->data = *data; 18030b351865SNicolin Chen chan->private = &sdmac->data; 18049479e17cSShawn Guo 18059479e17cSShawn Guo return true; 18069479e17cSShawn Guo } 18079479e17cSShawn Guo 18089479e17cSShawn Guo static struct dma_chan *sdma_xlate(struct of_phandle_args *dma_spec, 18099479e17cSShawn Guo struct of_dma *ofdma) 18109479e17cSShawn Guo { 18119479e17cSShawn Guo struct sdma_engine *sdma = ofdma->of_dma_data; 18129479e17cSShawn Guo dma_cap_mask_t mask = sdma->dma_device.cap_mask; 18139479e17cSShawn Guo struct imx_dma_data data; 18149479e17cSShawn Guo 18159479e17cSShawn Guo if (dma_spec->args_count != 3) 18169479e17cSShawn Guo return NULL; 18179479e17cSShawn Guo 18189479e17cSShawn Guo data.dma_request = dma_spec->args[0]; 18199479e17cSShawn Guo data.peripheral_type = dma_spec->args[1]; 18209479e17cSShawn Guo data.priority = dma_spec->args[2]; 18218391ecf4SShengjiu Wang /* 18228391ecf4SShengjiu Wang * init dma_request2 to zero, which is not used by the dts. 18238391ecf4SShengjiu Wang * For P2P, dma_request2 is init from dma_request_channel(), 18248391ecf4SShengjiu Wang * chan->private will point to the imx_dma_data, and in 18258391ecf4SShengjiu Wang * device_alloc_chan_resources(), imx_dma_data.dma_request2 will 18268391ecf4SShengjiu Wang * be set to sdmac->event_id1. 18278391ecf4SShengjiu Wang */ 18288391ecf4SShengjiu Wang data.dma_request2 = 0; 18299479e17cSShawn Guo 18309479e17cSShawn Guo return dma_request_channel(mask, sdma_filter_fn, &data); 18319479e17cSShawn Guo } 18329479e17cSShawn Guo 1833e34b731fSMark Brown static int sdma_probe(struct platform_device *pdev) 18341ec1e82fSSascha Hauer { 1835580975d7SShawn Guo const struct of_device_id *of_id = 1836580975d7SShawn Guo of_match_device(sdma_dt_ids, &pdev->dev); 1837580975d7SShawn Guo struct device_node *np = pdev->dev.of_node; 18388391ecf4SShengjiu Wang struct device_node *spba_bus; 1839580975d7SShawn Guo const char *fw_name; 18401ec1e82fSSascha Hauer int ret; 18411ec1e82fSSascha Hauer int irq; 18421ec1e82fSSascha Hauer struct resource *iores; 18438391ecf4SShengjiu Wang struct resource spba_res; 1844d4adcc01SJingoo Han struct sdma_platform_data *pdata = dev_get_platdata(&pdev->dev); 18451ec1e82fSSascha Hauer int i; 18461ec1e82fSSascha Hauer struct sdma_engine *sdma; 184736e2f21aSSascha Hauer s32 *saddr_arr; 184817bba72fSSascha Hauer const struct sdma_driver_data *drvdata = NULL; 184917bba72fSSascha Hauer 185017bba72fSSascha Hauer if (of_id) 185117bba72fSSascha Hauer drvdata = of_id->data; 185217bba72fSSascha Hauer else if (pdev->id_entry) 185317bba72fSSascha Hauer drvdata = (void *)pdev->id_entry->driver_data; 185417bba72fSSascha Hauer 185517bba72fSSascha Hauer if (!drvdata) { 185617bba72fSSascha Hauer dev_err(&pdev->dev, "unable to find driver data\n"); 185717bba72fSSascha Hauer return -EINVAL; 185817bba72fSSascha Hauer } 18591ec1e82fSSascha Hauer 186042536b9fSPhilippe Retornaz ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 186142536b9fSPhilippe Retornaz if (ret) 186242536b9fSPhilippe Retornaz return ret; 186342536b9fSPhilippe Retornaz 18647f24e0eeSFabio Estevam sdma = devm_kzalloc(&pdev->dev, sizeof(*sdma), GFP_KERNEL); 18651ec1e82fSSascha Hauer if (!sdma) 18661ec1e82fSSascha Hauer return -ENOMEM; 18671ec1e82fSSascha Hauer 18682ccaef05SRichard Zhao spin_lock_init(&sdma->channel_0_lock); 186973eab978SSascha Hauer 18701ec1e82fSSascha Hauer sdma->dev = &pdev->dev; 187117bba72fSSascha Hauer sdma->drvdata = drvdata; 18721ec1e82fSSascha Hauer 18731ec1e82fSSascha Hauer irq = platform_get_irq(pdev, 0); 18747f24e0eeSFabio Estevam if (irq < 0) 187563c72e02SFabio Estevam return irq; 18761ec1e82fSSascha Hauer 18777f24e0eeSFabio Estevam iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); 18787f24e0eeSFabio Estevam sdma->regs = devm_ioremap_resource(&pdev->dev, iores); 18797f24e0eeSFabio Estevam if (IS_ERR(sdma->regs)) 18807f24e0eeSFabio Estevam return PTR_ERR(sdma->regs); 18811ec1e82fSSascha Hauer 18827560e3f3SSascha Hauer sdma->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); 18837f24e0eeSFabio Estevam if (IS_ERR(sdma->clk_ipg)) 18847f24e0eeSFabio Estevam return PTR_ERR(sdma->clk_ipg); 18851ec1e82fSSascha Hauer 18867560e3f3SSascha Hauer sdma->clk_ahb = devm_clk_get(&pdev->dev, "ahb"); 18877f24e0eeSFabio Estevam if (IS_ERR(sdma->clk_ahb)) 18887f24e0eeSFabio Estevam return PTR_ERR(sdma->clk_ahb); 18897560e3f3SSascha Hauer 1890fb9caf37SArvind Yadav ret = clk_prepare(sdma->clk_ipg); 1891fb9caf37SArvind Yadav if (ret) 1892fb9caf37SArvind Yadav return ret; 1893fb9caf37SArvind Yadav 1894fb9caf37SArvind Yadav ret = clk_prepare(sdma->clk_ahb); 1895fb9caf37SArvind Yadav if (ret) 1896fb9caf37SArvind Yadav goto err_clk; 18977560e3f3SSascha Hauer 18987f24e0eeSFabio Estevam ret = devm_request_irq(&pdev->dev, irq, sdma_int_handler, 0, "sdma", 18997f24e0eeSFabio Estevam sdma); 19001ec1e82fSSascha Hauer if (ret) 1901fb9caf37SArvind Yadav goto err_irq; 19021ec1e82fSSascha Hauer 19035bb9dbb5SVinod Koul sdma->irq = irq; 19045bb9dbb5SVinod Koul 19055b28aa31SSascha Hauer sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL); 1906fb9caf37SArvind Yadav if (!sdma->script_addrs) { 1907fb9caf37SArvind Yadav ret = -ENOMEM; 1908fb9caf37SArvind Yadav goto err_irq; 1909fb9caf37SArvind Yadav } 19101ec1e82fSSascha Hauer 191136e2f21aSSascha Hauer /* initially no scripts available */ 191236e2f21aSSascha Hauer saddr_arr = (s32 *)sdma->script_addrs; 191336e2f21aSSascha Hauer for (i = 0; i < SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; i++) 191436e2f21aSSascha Hauer saddr_arr[i] = -EINVAL; 191536e2f21aSSascha Hauer 19167214a8b1SSascha Hauer dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask); 19177214a8b1SSascha Hauer dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask); 19187214a8b1SSascha Hauer 19191ec1e82fSSascha Hauer INIT_LIST_HEAD(&sdma->dma_device.channels); 19201ec1e82fSSascha Hauer /* Initialize channel parameters */ 19211ec1e82fSSascha Hauer for (i = 0; i < MAX_DMA_CHANNELS; i++) { 19221ec1e82fSSascha Hauer struct sdma_channel *sdmac = &sdma->channel[i]; 19231ec1e82fSSascha Hauer 19241ec1e82fSSascha Hauer sdmac->sdma = sdma; 19251ec1e82fSSascha Hauer spin_lock_init(&sdmac->lock); 19261ec1e82fSSascha Hauer 19271ec1e82fSSascha Hauer sdmac->channel = i; 1928*57b772b8SRobin Gong sdmac->vc.desc_free = sdma_desc_free; 192923889c63SSascha Hauer /* 193023889c63SSascha Hauer * Add the channel to the DMAC list. Do not add channel 0 though 193123889c63SSascha Hauer * because we need it internally in the SDMA driver. This also means 193223889c63SSascha Hauer * that channel 0 in dmaengine counting matches sdma channel 1. 193323889c63SSascha Hauer */ 193423889c63SSascha Hauer if (i) 1935*57b772b8SRobin Gong vchan_init(&sdmac->vc, &sdma->dma_device); 19361ec1e82fSSascha Hauer } 19371ec1e82fSSascha Hauer 19385b28aa31SSascha Hauer ret = sdma_init(sdma); 19391ec1e82fSSascha Hauer if (ret) 19401ec1e82fSSascha Hauer goto err_init; 19411ec1e82fSSascha Hauer 1942d078cd1bSZidan Wang ret = sdma_event_remap(sdma); 1943d078cd1bSZidan Wang if (ret) 1944d078cd1bSZidan Wang goto err_init; 1945d078cd1bSZidan Wang 1946dcfec3c0SSascha Hauer if (sdma->drvdata->script_addrs) 1947dcfec3c0SSascha Hauer sdma_add_scripts(sdma, sdma->drvdata->script_addrs); 1948580975d7SShawn Guo if (pdata && pdata->script_addrs) 19495b28aa31SSascha Hauer sdma_add_scripts(sdma, pdata->script_addrs); 19505b28aa31SSascha Hauer 1951580975d7SShawn Guo if (pdata) { 19526d0d7e2dSFabio Estevam ret = sdma_get_firmware(sdma, pdata->fw_name); 19536d0d7e2dSFabio Estevam if (ret) 1954ad1122e5SFabio Estevam dev_warn(&pdev->dev, "failed to get firmware from platform data\n"); 1955580975d7SShawn Guo } else { 1956580975d7SShawn Guo /* 1957580975d7SShawn Guo * Because that device tree does not encode ROM script address, 1958580975d7SShawn Guo * the RAM script in firmware is mandatory for device tree 1959580975d7SShawn Guo * probe, otherwise it fails. 1960580975d7SShawn Guo */ 1961580975d7SShawn Guo ret = of_property_read_string(np, "fsl,sdma-ram-script-name", 1962580975d7SShawn Guo &fw_name); 19636602b0ddSFabio Estevam if (ret) 1964ad1122e5SFabio Estevam dev_warn(&pdev->dev, "failed to get firmware name\n"); 19656602b0ddSFabio Estevam else { 1966580975d7SShawn Guo ret = sdma_get_firmware(sdma, fw_name); 19676602b0ddSFabio Estevam if (ret) 1968ad1122e5SFabio Estevam dev_warn(&pdev->dev, "failed to get firmware from device tree\n"); 1969580975d7SShawn Guo } 1970580975d7SShawn Guo } 19715b28aa31SSascha Hauer 19721ec1e82fSSascha Hauer sdma->dma_device.dev = &pdev->dev; 19731ec1e82fSSascha Hauer 19741ec1e82fSSascha Hauer sdma->dma_device.device_alloc_chan_resources = sdma_alloc_chan_resources; 19751ec1e82fSSascha Hauer sdma->dma_device.device_free_chan_resources = sdma_free_chan_resources; 19761ec1e82fSSascha Hauer sdma->dma_device.device_tx_status = sdma_tx_status; 19771ec1e82fSSascha Hauer sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg; 19781ec1e82fSSascha Hauer sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic; 19797b350ab0SMaxime Ripard sdma->dma_device.device_config = sdma_config; 19807f3ff14bSJiada Wang sdma->dma_device.device_terminate_all = sdma_disable_channel_with_delay; 1981f9d4a398SNicolin Chen sdma->dma_device.src_addr_widths = SDMA_DMA_BUSWIDTHS; 1982f9d4a398SNicolin Chen sdma->dma_device.dst_addr_widths = SDMA_DMA_BUSWIDTHS; 1983f9d4a398SNicolin Chen sdma->dma_device.directions = SDMA_DMA_DIRECTIONS; 19846f3125ceSLucas Stach sdma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; 19851ec1e82fSSascha Hauer sdma->dma_device.device_issue_pending = sdma_issue_pending; 1986b9b3f82fSSascha Hauer sdma->dma_device.dev->dma_parms = &sdma->dma_parms; 1987b9b3f82fSSascha Hauer dma_set_max_seg_size(sdma->dma_device.dev, 65535); 19881ec1e82fSSascha Hauer 198923e11811SVignesh Raman platform_set_drvdata(pdev, sdma); 199023e11811SVignesh Raman 19911ec1e82fSSascha Hauer ret = dma_async_device_register(&sdma->dma_device); 19921ec1e82fSSascha Hauer if (ret) { 19931ec1e82fSSascha Hauer dev_err(&pdev->dev, "unable to register\n"); 19941ec1e82fSSascha Hauer goto err_init; 19951ec1e82fSSascha Hauer } 19961ec1e82fSSascha Hauer 19979479e17cSShawn Guo if (np) { 19989479e17cSShawn Guo ret = of_dma_controller_register(np, sdma_xlate, sdma); 19999479e17cSShawn Guo if (ret) { 20009479e17cSShawn Guo dev_err(&pdev->dev, "failed to register controller\n"); 20019479e17cSShawn Guo goto err_register; 20029479e17cSShawn Guo } 20038391ecf4SShengjiu Wang 20048391ecf4SShengjiu Wang spba_bus = of_find_compatible_node(NULL, NULL, "fsl,spba-bus"); 20058391ecf4SShengjiu Wang ret = of_address_to_resource(spba_bus, 0, &spba_res); 20068391ecf4SShengjiu Wang if (!ret) { 20078391ecf4SShengjiu Wang sdma->spba_start_addr = spba_res.start; 20088391ecf4SShengjiu Wang sdma->spba_end_addr = spba_res.end; 20098391ecf4SShengjiu Wang } 20108391ecf4SShengjiu Wang of_node_put(spba_bus); 20119479e17cSShawn Guo } 20129479e17cSShawn Guo 20131ec1e82fSSascha Hauer return 0; 20141ec1e82fSSascha Hauer 20159479e17cSShawn Guo err_register: 20169479e17cSShawn Guo dma_async_device_unregister(&sdma->dma_device); 20171ec1e82fSSascha Hauer err_init: 20181ec1e82fSSascha Hauer kfree(sdma->script_addrs); 2019fb9caf37SArvind Yadav err_irq: 2020fb9caf37SArvind Yadav clk_unprepare(sdma->clk_ahb); 2021fb9caf37SArvind Yadav err_clk: 2022fb9caf37SArvind Yadav clk_unprepare(sdma->clk_ipg); 2023939fd4f0SShawn Guo return ret; 20241ec1e82fSSascha Hauer } 20251ec1e82fSSascha Hauer 20261d1bbd30SMaxin B. John static int sdma_remove(struct platform_device *pdev) 20271ec1e82fSSascha Hauer { 202823e11811SVignesh Raman struct sdma_engine *sdma = platform_get_drvdata(pdev); 2029c12fe497SVignesh Raman int i; 203023e11811SVignesh Raman 20315bb9dbb5SVinod Koul devm_free_irq(&pdev->dev, sdma->irq, sdma); 203223e11811SVignesh Raman dma_async_device_unregister(&sdma->dma_device); 203323e11811SVignesh Raman kfree(sdma->script_addrs); 2034fb9caf37SArvind Yadav clk_unprepare(sdma->clk_ahb); 2035fb9caf37SArvind Yadav clk_unprepare(sdma->clk_ipg); 2036c12fe497SVignesh Raman /* Kill the tasklet */ 2037c12fe497SVignesh Raman for (i = 0; i < MAX_DMA_CHANNELS; i++) { 2038c12fe497SVignesh Raman struct sdma_channel *sdmac = &sdma->channel[i]; 2039c12fe497SVignesh Raman 2040*57b772b8SRobin Gong tasklet_kill(&sdmac->vc.task); 2041*57b772b8SRobin Gong sdma_free_chan_resources(&sdmac->vc.chan); 2042c12fe497SVignesh Raman } 204323e11811SVignesh Raman 204423e11811SVignesh Raman platform_set_drvdata(pdev, NULL); 204523e11811SVignesh Raman return 0; 20461ec1e82fSSascha Hauer } 20471ec1e82fSSascha Hauer 20481ec1e82fSSascha Hauer static struct platform_driver sdma_driver = { 20491ec1e82fSSascha Hauer .driver = { 20501ec1e82fSSascha Hauer .name = "imx-sdma", 2051580975d7SShawn Guo .of_match_table = sdma_dt_ids, 20521ec1e82fSSascha Hauer }, 205362550cd7SShawn Guo .id_table = sdma_devtypes, 20541d1bbd30SMaxin B. John .remove = sdma_remove, 205523e11811SVignesh Raman .probe = sdma_probe, 20561ec1e82fSSascha Hauer }; 20571ec1e82fSSascha Hauer 205823e11811SVignesh Raman module_platform_driver(sdma_driver); 20591ec1e82fSSascha Hauer 20601ec1e82fSSascha Hauer MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>"); 20611ec1e82fSSascha Hauer MODULE_DESCRIPTION("i.MX SDMA driver"); 2062c0879342SNicolas Chauvet #if IS_ENABLED(CONFIG_SOC_IMX6Q) 2063c0879342SNicolas Chauvet MODULE_FIRMWARE("imx/sdma/sdma-imx6q.bin"); 2064c0879342SNicolas Chauvet #endif 2065c0879342SNicolas Chauvet #if IS_ENABLED(CONFIG_SOC_IMX7D) 2066c0879342SNicolas Chauvet MODULE_FIRMWARE("imx/sdma/sdma-imx7d.bin"); 2067c0879342SNicolas Chauvet #endif 20681ec1e82fSSascha Hauer MODULE_LICENSE("GPL"); 2069