1c01faacaSFabio Estevam // SPDX-License-Identifier: GPL-2.0+ 2c01faacaSFabio Estevam // 3c01faacaSFabio Estevam // drivers/dma/imx-sdma.c 4c01faacaSFabio Estevam // 5c01faacaSFabio Estevam // This file contains a driver for the Freescale Smart DMA engine 6c01faacaSFabio Estevam // 7c01faacaSFabio Estevam // Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de> 8c01faacaSFabio Estevam // 9c01faacaSFabio Estevam // Based on code from Freescale: 10c01faacaSFabio Estevam // 11c01faacaSFabio Estevam // Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved. 121ec1e82fSSascha Hauer 131ec1e82fSSascha Hauer #include <linux/init.h> 141d069bfaSMichael Olbrich #include <linux/iopoll.h> 15f8de8f4cSAxel Lin #include <linux/module.h> 161ec1e82fSSascha Hauer #include <linux/types.h> 170bbc1413SRichard Zhao #include <linux/bitops.h> 181ec1e82fSSascha Hauer #include <linux/mm.h> 191ec1e82fSSascha Hauer #include <linux/interrupt.h> 201ec1e82fSSascha Hauer #include <linux/clk.h> 212ccaef05SRichard Zhao #include <linux/delay.h> 221ec1e82fSSascha Hauer #include <linux/sched.h> 231ec1e82fSSascha Hauer #include <linux/semaphore.h> 241ec1e82fSSascha Hauer #include <linux/spinlock.h> 251ec1e82fSSascha Hauer #include <linux/device.h> 261ec1e82fSSascha Hauer #include <linux/dma-mapping.h> 271ec1e82fSSascha Hauer #include <linux/firmware.h> 281ec1e82fSSascha Hauer #include <linux/slab.h> 291ec1e82fSSascha Hauer #include <linux/platform_device.h> 301ec1e82fSSascha Hauer #include <linux/dmaengine.h> 31580975d7SShawn Guo #include <linux/of.h> 328391ecf4SShengjiu Wang #include <linux/of_address.h> 33580975d7SShawn Guo #include <linux/of_device.h> 349479e17cSShawn Guo #include <linux/of_dma.h> 35b8603d2aSLucas Stach #include <linux/workqueue.h> 361ec1e82fSSascha Hauer 371ec1e82fSSascha Hauer #include <asm/irq.h> 3882906b13SArnd Bergmann #include <linux/platform_data/dma-imx-sdma.h> 3982906b13SArnd Bergmann #include <linux/platform_data/dma-imx.h> 40d078cd1bSZidan Wang #include <linux/regmap.h> 41d078cd1bSZidan Wang #include <linux/mfd/syscon.h> 42d078cd1bSZidan Wang #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> 431ec1e82fSSascha Hauer 44d2ebfb33SRussell King - ARM Linux #include "dmaengine.h" 4557b772b8SRobin Gong #include "virt-dma.h" 46d2ebfb33SRussell King - ARM Linux 471ec1e82fSSascha Hauer /* SDMA registers */ 481ec1e82fSSascha Hauer #define SDMA_H_C0PTR 0x000 491ec1e82fSSascha Hauer #define SDMA_H_INTR 0x004 501ec1e82fSSascha Hauer #define SDMA_H_STATSTOP 0x008 511ec1e82fSSascha Hauer #define SDMA_H_START 0x00c 521ec1e82fSSascha Hauer #define SDMA_H_EVTOVR 0x010 531ec1e82fSSascha Hauer #define SDMA_H_DSPOVR 0x014 541ec1e82fSSascha Hauer #define SDMA_H_HOSTOVR 0x018 551ec1e82fSSascha Hauer #define SDMA_H_EVTPEND 0x01c 561ec1e82fSSascha Hauer #define SDMA_H_DSPENBL 0x020 571ec1e82fSSascha Hauer #define SDMA_H_RESET 0x024 581ec1e82fSSascha Hauer #define SDMA_H_EVTERR 0x028 591ec1e82fSSascha Hauer #define SDMA_H_INTRMSK 0x02c 601ec1e82fSSascha Hauer #define SDMA_H_PSW 0x030 611ec1e82fSSascha Hauer #define SDMA_H_EVTERRDBG 0x034 621ec1e82fSSascha Hauer #define SDMA_H_CONFIG 0x038 631ec1e82fSSascha Hauer #define SDMA_ONCE_ENB 0x040 641ec1e82fSSascha Hauer #define SDMA_ONCE_DATA 0x044 651ec1e82fSSascha Hauer #define SDMA_ONCE_INSTR 0x048 661ec1e82fSSascha Hauer #define SDMA_ONCE_STAT 0x04c 671ec1e82fSSascha Hauer #define SDMA_ONCE_CMD 0x050 681ec1e82fSSascha Hauer #define SDMA_EVT_MIRROR 0x054 691ec1e82fSSascha Hauer #define SDMA_ILLINSTADDR 0x058 701ec1e82fSSascha Hauer #define SDMA_CHN0ADDR 0x05c 711ec1e82fSSascha Hauer #define SDMA_ONCE_RTB 0x060 721ec1e82fSSascha Hauer #define SDMA_XTRIG_CONF1 0x070 731ec1e82fSSascha Hauer #define SDMA_XTRIG_CONF2 0x074 7462550cd7SShawn Guo #define SDMA_CHNENBL0_IMX35 0x200 7562550cd7SShawn Guo #define SDMA_CHNENBL0_IMX31 0x080 761ec1e82fSSascha Hauer #define SDMA_CHNPRI_0 0x100 771ec1e82fSSascha Hauer 781ec1e82fSSascha Hauer /* 791ec1e82fSSascha Hauer * Buffer descriptor status values. 801ec1e82fSSascha Hauer */ 811ec1e82fSSascha Hauer #define BD_DONE 0x01 821ec1e82fSSascha Hauer #define BD_WRAP 0x02 831ec1e82fSSascha Hauer #define BD_CONT 0x04 841ec1e82fSSascha Hauer #define BD_INTR 0x08 851ec1e82fSSascha Hauer #define BD_RROR 0x10 861ec1e82fSSascha Hauer #define BD_LAST 0x20 871ec1e82fSSascha Hauer #define BD_EXTD 0x80 881ec1e82fSSascha Hauer 891ec1e82fSSascha Hauer /* 901ec1e82fSSascha Hauer * Data Node descriptor status values. 911ec1e82fSSascha Hauer */ 921ec1e82fSSascha Hauer #define DND_END_OF_FRAME 0x80 931ec1e82fSSascha Hauer #define DND_END_OF_XFER 0x40 941ec1e82fSSascha Hauer #define DND_DONE 0x20 951ec1e82fSSascha Hauer #define DND_UNUSED 0x01 961ec1e82fSSascha Hauer 971ec1e82fSSascha Hauer /* 981ec1e82fSSascha Hauer * IPCV2 descriptor status values. 991ec1e82fSSascha Hauer */ 1001ec1e82fSSascha Hauer #define BD_IPCV2_END_OF_FRAME 0x40 1011ec1e82fSSascha Hauer 1021ec1e82fSSascha Hauer #define IPCV2_MAX_NODES 50 1031ec1e82fSSascha Hauer /* 1041ec1e82fSSascha Hauer * Error bit set in the CCB status field by the SDMA, 1051ec1e82fSSascha Hauer * in setbd routine, in case of a transfer error 1061ec1e82fSSascha Hauer */ 1071ec1e82fSSascha Hauer #define DATA_ERROR 0x10000000 1081ec1e82fSSascha Hauer 1091ec1e82fSSascha Hauer /* 1101ec1e82fSSascha Hauer * Buffer descriptor commands. 1111ec1e82fSSascha Hauer */ 1121ec1e82fSSascha Hauer #define C0_ADDR 0x01 1131ec1e82fSSascha Hauer #define C0_LOAD 0x02 1141ec1e82fSSascha Hauer #define C0_DUMP 0x03 1151ec1e82fSSascha Hauer #define C0_SETCTX 0x07 1161ec1e82fSSascha Hauer #define C0_GETCTX 0x03 1171ec1e82fSSascha Hauer #define C0_SETDM 0x01 1181ec1e82fSSascha Hauer #define C0_SETPM 0x04 1191ec1e82fSSascha Hauer #define C0_GETDM 0x02 1201ec1e82fSSascha Hauer #define C0_GETPM 0x08 1211ec1e82fSSascha Hauer /* 1221ec1e82fSSascha Hauer * Change endianness indicator in the BD command field 1231ec1e82fSSascha Hauer */ 1241ec1e82fSSascha Hauer #define CHANGE_ENDIANNESS 0x80 1251ec1e82fSSascha Hauer 1261ec1e82fSSascha Hauer /* 1278391ecf4SShengjiu Wang * p_2_p watermark_level description 1288391ecf4SShengjiu Wang * Bits Name Description 1298391ecf4SShengjiu Wang * 0-7 Lower WML Lower watermark level 1308391ecf4SShengjiu Wang * 8 PS 1: Pad Swallowing 1318391ecf4SShengjiu Wang * 0: No Pad Swallowing 1328391ecf4SShengjiu Wang * 9 PA 1: Pad Adding 1338391ecf4SShengjiu Wang * 0: No Pad Adding 1348391ecf4SShengjiu Wang * 10 SPDIF If this bit is set both source 1358391ecf4SShengjiu Wang * and destination are on SPBA 1368391ecf4SShengjiu Wang * 11 Source Bit(SP) 1: Source on SPBA 1378391ecf4SShengjiu Wang * 0: Source on AIPS 1388391ecf4SShengjiu Wang * 12 Destination Bit(DP) 1: Destination on SPBA 1398391ecf4SShengjiu Wang * 0: Destination on AIPS 1408391ecf4SShengjiu Wang * 13-15 --------- MUST BE 0 1418391ecf4SShengjiu Wang * 16-23 Higher WML HWML 1428391ecf4SShengjiu Wang * 24-27 N Total number of samples after 1438391ecf4SShengjiu Wang * which Pad adding/Swallowing 1448391ecf4SShengjiu Wang * must be done. It must be odd. 1458391ecf4SShengjiu Wang * 28 Lower WML Event(LWE) SDMA events reg to check for 1468391ecf4SShengjiu Wang * LWML event mask 1478391ecf4SShengjiu Wang * 0: LWE in EVENTS register 1488391ecf4SShengjiu Wang * 1: LWE in EVENTS2 register 1498391ecf4SShengjiu Wang * 29 Higher WML Event(HWE) SDMA events reg to check for 1508391ecf4SShengjiu Wang * HWML event mask 1518391ecf4SShengjiu Wang * 0: HWE in EVENTS register 1528391ecf4SShengjiu Wang * 1: HWE in EVENTS2 register 1538391ecf4SShengjiu Wang * 30 --------- MUST BE 0 1548391ecf4SShengjiu Wang * 31 CONT 1: Amount of samples to be 1558391ecf4SShengjiu Wang * transferred is unknown and 1568391ecf4SShengjiu Wang * script will keep on 1578391ecf4SShengjiu Wang * transferring samples as long as 1588391ecf4SShengjiu Wang * both events are detected and 1598391ecf4SShengjiu Wang * script must be manually stopped 1608391ecf4SShengjiu Wang * by the application 1618391ecf4SShengjiu Wang * 0: The amount of samples to be 1628391ecf4SShengjiu Wang * transferred is equal to the 1638391ecf4SShengjiu Wang * count field of mode word 1648391ecf4SShengjiu Wang */ 1658391ecf4SShengjiu Wang #define SDMA_WATERMARK_LEVEL_LWML 0xFF 1668391ecf4SShengjiu Wang #define SDMA_WATERMARK_LEVEL_PS BIT(8) 1678391ecf4SShengjiu Wang #define SDMA_WATERMARK_LEVEL_PA BIT(9) 1688391ecf4SShengjiu Wang #define SDMA_WATERMARK_LEVEL_SPDIF BIT(10) 1698391ecf4SShengjiu Wang #define SDMA_WATERMARK_LEVEL_SP BIT(11) 1708391ecf4SShengjiu Wang #define SDMA_WATERMARK_LEVEL_DP BIT(12) 1718391ecf4SShengjiu Wang #define SDMA_WATERMARK_LEVEL_HWML (0xFF << 16) 1728391ecf4SShengjiu Wang #define SDMA_WATERMARK_LEVEL_LWE BIT(28) 1738391ecf4SShengjiu Wang #define SDMA_WATERMARK_LEVEL_HWE BIT(29) 1748391ecf4SShengjiu Wang #define SDMA_WATERMARK_LEVEL_CONT BIT(31) 1758391ecf4SShengjiu Wang 176f9d4a398SNicolin Chen #define SDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ 177f9d4a398SNicolin Chen BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ 178f9d4a398SNicolin Chen BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) 179f9d4a398SNicolin Chen 180f9d4a398SNicolin Chen #define SDMA_DMA_DIRECTIONS (BIT(DMA_DEV_TO_MEM) | \ 181f9d4a398SNicolin Chen BIT(DMA_MEM_TO_DEV) | \ 182f9d4a398SNicolin Chen BIT(DMA_DEV_TO_DEV)) 183f9d4a398SNicolin Chen 1848391ecf4SShengjiu Wang /* 1851ec1e82fSSascha Hauer * Mode/Count of data node descriptors - IPCv2 1861ec1e82fSSascha Hauer */ 1871ec1e82fSSascha Hauer struct sdma_mode_count { 1884a6b2e8aSRobin Gong #define SDMA_BD_MAX_CNT 0xffff 1891ec1e82fSSascha Hauer u32 count : 16; /* size of the buffer pointed by this BD */ 1901ec1e82fSSascha Hauer u32 status : 8; /* E,R,I,C,W,D status bits stored here */ 191e4b75760SMartin Kaiser u32 command : 8; /* command mostly used for channel 0 */ 1921ec1e82fSSascha Hauer }; 1931ec1e82fSSascha Hauer 1941ec1e82fSSascha Hauer /* 1951ec1e82fSSascha Hauer * Buffer descriptor 1961ec1e82fSSascha Hauer */ 1971ec1e82fSSascha Hauer struct sdma_buffer_descriptor { 1981ec1e82fSSascha Hauer struct sdma_mode_count mode; 1991ec1e82fSSascha Hauer u32 buffer_addr; /* address of the buffer described */ 2001ec1e82fSSascha Hauer u32 ext_buffer_addr; /* extended buffer address */ 2011ec1e82fSSascha Hauer } __attribute__ ((packed)); 2021ec1e82fSSascha Hauer 2031ec1e82fSSascha Hauer /** 2041ec1e82fSSascha Hauer * struct sdma_channel_control - Channel control Block 2051ec1e82fSSascha Hauer * 20624ca312dSRobin Gong * @current_bd_ptr: current buffer descriptor processed 20724ca312dSRobin Gong * @base_bd_ptr: first element of buffer descriptor array 20824ca312dSRobin Gong * @unused: padding. The SDMA engine expects an array of 128 byte 2091ec1e82fSSascha Hauer * control blocks 2101ec1e82fSSascha Hauer */ 2111ec1e82fSSascha Hauer struct sdma_channel_control { 2121ec1e82fSSascha Hauer u32 current_bd_ptr; 2131ec1e82fSSascha Hauer u32 base_bd_ptr; 2141ec1e82fSSascha Hauer u32 unused[2]; 2151ec1e82fSSascha Hauer } __attribute__ ((packed)); 2161ec1e82fSSascha Hauer 2171ec1e82fSSascha Hauer /** 2181ec1e82fSSascha Hauer * struct sdma_state_registers - SDMA context for a channel 2191ec1e82fSSascha Hauer * 2201ec1e82fSSascha Hauer * @pc: program counter 22124ca312dSRobin Gong * @unused1: unused 2221ec1e82fSSascha Hauer * @t: test bit: status of arithmetic & test instruction 2231ec1e82fSSascha Hauer * @rpc: return program counter 22424ca312dSRobin Gong * @unused0: unused 2251ec1e82fSSascha Hauer * @sf: source fault while loading data 2261ec1e82fSSascha Hauer * @spc: loop start program counter 22724ca312dSRobin Gong * @unused2: unused 2281ec1e82fSSascha Hauer * @df: destination fault while storing data 2291ec1e82fSSascha Hauer * @epc: loop end program counter 2301ec1e82fSSascha Hauer * @lm: loop mode 2311ec1e82fSSascha Hauer */ 2321ec1e82fSSascha Hauer struct sdma_state_registers { 2331ec1e82fSSascha Hauer u32 pc :14; 2341ec1e82fSSascha Hauer u32 unused1: 1; 2351ec1e82fSSascha Hauer u32 t : 1; 2361ec1e82fSSascha Hauer u32 rpc :14; 2371ec1e82fSSascha Hauer u32 unused0: 1; 2381ec1e82fSSascha Hauer u32 sf : 1; 2391ec1e82fSSascha Hauer u32 spc :14; 2401ec1e82fSSascha Hauer u32 unused2: 1; 2411ec1e82fSSascha Hauer u32 df : 1; 2421ec1e82fSSascha Hauer u32 epc :14; 2431ec1e82fSSascha Hauer u32 lm : 2; 2441ec1e82fSSascha Hauer } __attribute__ ((packed)); 2451ec1e82fSSascha Hauer 2461ec1e82fSSascha Hauer /** 2471ec1e82fSSascha Hauer * struct sdma_context_data - sdma context specific to a channel 2481ec1e82fSSascha Hauer * 2491ec1e82fSSascha Hauer * @channel_state: channel state bits 2501ec1e82fSSascha Hauer * @gReg: general registers 2511ec1e82fSSascha Hauer * @mda: burst dma destination address register 2521ec1e82fSSascha Hauer * @msa: burst dma source address register 2531ec1e82fSSascha Hauer * @ms: burst dma status register 2541ec1e82fSSascha Hauer * @md: burst dma data register 2551ec1e82fSSascha Hauer * @pda: peripheral dma destination address register 2561ec1e82fSSascha Hauer * @psa: peripheral dma source address register 2571ec1e82fSSascha Hauer * @ps: peripheral dma status register 2581ec1e82fSSascha Hauer * @pd: peripheral dma data register 2591ec1e82fSSascha Hauer * @ca: CRC polynomial register 2601ec1e82fSSascha Hauer * @cs: CRC accumulator register 2611ec1e82fSSascha Hauer * @dda: dedicated core destination address register 2621ec1e82fSSascha Hauer * @dsa: dedicated core source address register 2631ec1e82fSSascha Hauer * @ds: dedicated core status register 2641ec1e82fSSascha Hauer * @dd: dedicated core data register 26524ca312dSRobin Gong * @scratch0: 1st word of dedicated ram for context switch 26624ca312dSRobin Gong * @scratch1: 2nd word of dedicated ram for context switch 26724ca312dSRobin Gong * @scratch2: 3rd word of dedicated ram for context switch 26824ca312dSRobin Gong * @scratch3: 4th word of dedicated ram for context switch 26924ca312dSRobin Gong * @scratch4: 5th word of dedicated ram for context switch 27024ca312dSRobin Gong * @scratch5: 6th word of dedicated ram for context switch 27124ca312dSRobin Gong * @scratch6: 7th word of dedicated ram for context switch 27224ca312dSRobin Gong * @scratch7: 8th word of dedicated ram for context switch 2731ec1e82fSSascha Hauer */ 2741ec1e82fSSascha Hauer struct sdma_context_data { 2751ec1e82fSSascha Hauer struct sdma_state_registers channel_state; 2761ec1e82fSSascha Hauer u32 gReg[8]; 2771ec1e82fSSascha Hauer u32 mda; 2781ec1e82fSSascha Hauer u32 msa; 2791ec1e82fSSascha Hauer u32 ms; 2801ec1e82fSSascha Hauer u32 md; 2811ec1e82fSSascha Hauer u32 pda; 2821ec1e82fSSascha Hauer u32 psa; 2831ec1e82fSSascha Hauer u32 ps; 2841ec1e82fSSascha Hauer u32 pd; 2851ec1e82fSSascha Hauer u32 ca; 2861ec1e82fSSascha Hauer u32 cs; 2871ec1e82fSSascha Hauer u32 dda; 2881ec1e82fSSascha Hauer u32 dsa; 2891ec1e82fSSascha Hauer u32 ds; 2901ec1e82fSSascha Hauer u32 dd; 2911ec1e82fSSascha Hauer u32 scratch0; 2921ec1e82fSSascha Hauer u32 scratch1; 2931ec1e82fSSascha Hauer u32 scratch2; 2941ec1e82fSSascha Hauer u32 scratch3; 2951ec1e82fSSascha Hauer u32 scratch4; 2961ec1e82fSSascha Hauer u32 scratch5; 2971ec1e82fSSascha Hauer u32 scratch6; 2981ec1e82fSSascha Hauer u32 scratch7; 2991ec1e82fSSascha Hauer } __attribute__ ((packed)); 3001ec1e82fSSascha Hauer 3011ec1e82fSSascha Hauer 3021ec1e82fSSascha Hauer struct sdma_engine; 3031ec1e82fSSascha Hauer 3041ec1e82fSSascha Hauer /** 30576c33d27SSascha Hauer * struct sdma_desc - descriptor structor for one transfer 30624ca312dSRobin Gong * @vd: descriptor for virt dma 30724ca312dSRobin Gong * @num_bd: number of descriptors currently handling 30824ca312dSRobin Gong * @bd_phys: physical address of bd 30924ca312dSRobin Gong * @buf_tail: ID of the buffer that was processed 31024ca312dSRobin Gong * @buf_ptail: ID of the previous buffer that was processed 31124ca312dSRobin Gong * @period_len: period length, used in cyclic. 31224ca312dSRobin Gong * @chn_real_count: the real count updated from bd->mode.count 31324ca312dSRobin Gong * @chn_count: the transfer count set 31424ca312dSRobin Gong * @sdmac: sdma_channel pointer 31524ca312dSRobin Gong * @bd: pointer of allocate bd 31676c33d27SSascha Hauer */ 31776c33d27SSascha Hauer struct sdma_desc { 31857b772b8SRobin Gong struct virt_dma_desc vd; 31976c33d27SSascha Hauer unsigned int num_bd; 32076c33d27SSascha Hauer dma_addr_t bd_phys; 32176c33d27SSascha Hauer unsigned int buf_tail; 32276c33d27SSascha Hauer unsigned int buf_ptail; 32376c33d27SSascha Hauer unsigned int period_len; 32476c33d27SSascha Hauer unsigned int chn_real_count; 32576c33d27SSascha Hauer unsigned int chn_count; 32676c33d27SSascha Hauer struct sdma_channel *sdmac; 32776c33d27SSascha Hauer struct sdma_buffer_descriptor *bd; 32876c33d27SSascha Hauer }; 32976c33d27SSascha Hauer 33076c33d27SSascha Hauer /** 3311ec1e82fSSascha Hauer * struct sdma_channel - housekeeping for a SDMA channel 3321ec1e82fSSascha Hauer * 33324ca312dSRobin Gong * @vc: virt_dma base structure 33424ca312dSRobin Gong * @desc: sdma description including vd and other special member 33524ca312dSRobin Gong * @sdma: pointer to the SDMA engine for this channel 33624ca312dSRobin Gong * @channel: the channel number, matches dmaengine chan_id + 1 33724ca312dSRobin Gong * @direction: transfer type. Needed for setting SDMA script 338107d0644SVinod Koul * @slave_config Slave configuration 33924ca312dSRobin Gong * @peripheral_type: Peripheral type. Needed for setting SDMA script 34024ca312dSRobin Gong * @event_id0: aka dma request line 34124ca312dSRobin Gong * @event_id1: for channels that use 2 events 34224ca312dSRobin Gong * @word_size: peripheral access size 34324ca312dSRobin Gong * @pc_from_device: script address for those device_2_memory 34424ca312dSRobin Gong * @pc_to_device: script address for those memory_2_device 34524ca312dSRobin Gong * @device_to_device: script address for those device_2_device 3460f06c027SRobin Gong * @pc_to_pc: script address for those memory_2_memory 34724ca312dSRobin Gong * @flags: loop mode or not 34824ca312dSRobin Gong * @per_address: peripheral source or destination address in common case 34924ca312dSRobin Gong * destination address in p_2_p case 35024ca312dSRobin Gong * @per_address2: peripheral source address in p_2_p case 35124ca312dSRobin Gong * @event_mask: event mask used in p_2_p script 35224ca312dSRobin Gong * @watermark_level: value for gReg[7], some script will extend it from 35324ca312dSRobin Gong * basic watermark such as p_2_p 35424ca312dSRobin Gong * @shp_addr: value for gReg[6] 35524ca312dSRobin Gong * @per_addr: value for gReg[2] 35624ca312dSRobin Gong * @status: status of dma channel 35724ca312dSRobin Gong * @data: specific sdma interface structure 35824ca312dSRobin Gong * @bd_pool: dma_pool for bd 3591ec1e82fSSascha Hauer */ 3601ec1e82fSSascha Hauer struct sdma_channel { 36157b772b8SRobin Gong struct virt_dma_chan vc; 36276c33d27SSascha Hauer struct sdma_desc *desc; 3631ec1e82fSSascha Hauer struct sdma_engine *sdma; 3641ec1e82fSSascha Hauer unsigned int channel; 365db8196dfSVinod Koul enum dma_transfer_direction direction; 366107d0644SVinod Koul struct dma_slave_config slave_config; 3671ec1e82fSSascha Hauer enum sdma_peripheral_type peripheral_type; 3681ec1e82fSSascha Hauer unsigned int event_id0; 3691ec1e82fSSascha Hauer unsigned int event_id1; 3701ec1e82fSSascha Hauer enum dma_slave_buswidth word_size; 3711ec1e82fSSascha Hauer unsigned int pc_from_device, pc_to_device; 3728391ecf4SShengjiu Wang unsigned int device_to_device; 3730f06c027SRobin Gong unsigned int pc_to_pc; 3741ec1e82fSSascha Hauer unsigned long flags; 3758391ecf4SShengjiu Wang dma_addr_t per_address, per_address2; 3760bbc1413SRichard Zhao unsigned long event_mask[2]; 3770bbc1413SRichard Zhao unsigned long watermark_level; 3781ec1e82fSSascha Hauer u32 shp_addr, per_addr; 3791ec1e82fSSascha Hauer enum dma_status status; 380*ad0d92d7SRobin Gong bool context_loaded; 3810b351865SNicolin Chen struct imx_dma_data data; 382b8603d2aSLucas Stach struct work_struct terminate_worker; 3831ec1e82fSSascha Hauer }; 3841ec1e82fSSascha Hauer 3850bbc1413SRichard Zhao #define IMX_DMA_SG_LOOP BIT(0) 3861ec1e82fSSascha Hauer 3871ec1e82fSSascha Hauer #define MAX_DMA_CHANNELS 32 3881ec1e82fSSascha Hauer #define MXC_SDMA_DEFAULT_PRIORITY 1 3891ec1e82fSSascha Hauer #define MXC_SDMA_MIN_PRIORITY 1 3901ec1e82fSSascha Hauer #define MXC_SDMA_MAX_PRIORITY 7 3911ec1e82fSSascha Hauer 3921ec1e82fSSascha Hauer #define SDMA_FIRMWARE_MAGIC 0x414d4453 3931ec1e82fSSascha Hauer 3941ec1e82fSSascha Hauer /** 3951ec1e82fSSascha Hauer * struct sdma_firmware_header - Layout of the firmware image 3961ec1e82fSSascha Hauer * 39724ca312dSRobin Gong * @magic: "SDMA" 39824ca312dSRobin Gong * @version_major: increased whenever layout of struct 39924ca312dSRobin Gong * sdma_script_start_addrs changes. 40024ca312dSRobin Gong * @version_minor: firmware minor version (for binary compatible changes) 40124ca312dSRobin Gong * @script_addrs_start: offset of struct sdma_script_start_addrs in this image 40224ca312dSRobin Gong * @num_script_addrs: Number of script addresses in this image 40324ca312dSRobin Gong * @ram_code_start: offset of SDMA ram image in this firmware image 40424ca312dSRobin Gong * @ram_code_size: size of SDMA ram image 40524ca312dSRobin Gong * @script_addrs: Stores the start address of the SDMA scripts 4061ec1e82fSSascha Hauer * (in SDMA memory space) 4071ec1e82fSSascha Hauer */ 4081ec1e82fSSascha Hauer struct sdma_firmware_header { 4091ec1e82fSSascha Hauer u32 magic; 4101ec1e82fSSascha Hauer u32 version_major; 4111ec1e82fSSascha Hauer u32 version_minor; 4121ec1e82fSSascha Hauer u32 script_addrs_start; 4131ec1e82fSSascha Hauer u32 num_script_addrs; 4141ec1e82fSSascha Hauer u32 ram_code_start; 4151ec1e82fSSascha Hauer u32 ram_code_size; 4161ec1e82fSSascha Hauer }; 4171ec1e82fSSascha Hauer 41817bba72fSSascha Hauer struct sdma_driver_data { 41917bba72fSSascha Hauer int chnenbl0; 42017bba72fSSascha Hauer int num_events; 421dcfec3c0SSascha Hauer struct sdma_script_start_addrs *script_addrs; 42262550cd7SShawn Guo }; 42362550cd7SShawn Guo 4241ec1e82fSSascha Hauer struct sdma_engine { 4251ec1e82fSSascha Hauer struct device *dev; 426b9b3f82fSSascha Hauer struct device_dma_parameters dma_parms; 4271ec1e82fSSascha Hauer struct sdma_channel channel[MAX_DMA_CHANNELS]; 4281ec1e82fSSascha Hauer struct sdma_channel_control *channel_control; 4291ec1e82fSSascha Hauer void __iomem *regs; 4301ec1e82fSSascha Hauer struct sdma_context_data *context; 4311ec1e82fSSascha Hauer dma_addr_t context_phys; 4321ec1e82fSSascha Hauer struct dma_device dma_device; 4337560e3f3SSascha Hauer struct clk *clk_ipg; 4347560e3f3SSascha Hauer struct clk *clk_ahb; 4352ccaef05SRichard Zhao spinlock_t channel_0_lock; 436cd72b846SNicolin Chen u32 script_number; 4371ec1e82fSSascha Hauer struct sdma_script_start_addrs *script_addrs; 43817bba72fSSascha Hauer const struct sdma_driver_data *drvdata; 4398391ecf4SShengjiu Wang u32 spba_start_addr; 4408391ecf4SShengjiu Wang u32 spba_end_addr; 4415bb9dbb5SVinod Koul unsigned int irq; 44276c33d27SSascha Hauer dma_addr_t bd0_phys; 44376c33d27SSascha Hauer struct sdma_buffer_descriptor *bd0; 44417bba72fSSascha Hauer }; 44517bba72fSSascha Hauer 446107d0644SVinod Koul static int sdma_config_write(struct dma_chan *chan, 447107d0644SVinod Koul struct dma_slave_config *dmaengine_cfg, 448107d0644SVinod Koul enum dma_transfer_direction direction); 449107d0644SVinod Koul 450e9fd58deSFabio Estevam static struct sdma_driver_data sdma_imx31 = { 45117bba72fSSascha Hauer .chnenbl0 = SDMA_CHNENBL0_IMX31, 45217bba72fSSascha Hauer .num_events = 32, 45317bba72fSSascha Hauer }; 45417bba72fSSascha Hauer 455dcfec3c0SSascha Hauer static struct sdma_script_start_addrs sdma_script_imx25 = { 456dcfec3c0SSascha Hauer .ap_2_ap_addr = 729, 457dcfec3c0SSascha Hauer .uart_2_mcu_addr = 904, 458dcfec3c0SSascha Hauer .per_2_app_addr = 1255, 459dcfec3c0SSascha Hauer .mcu_2_app_addr = 834, 460dcfec3c0SSascha Hauer .uartsh_2_mcu_addr = 1120, 461dcfec3c0SSascha Hauer .per_2_shp_addr = 1329, 462dcfec3c0SSascha Hauer .mcu_2_shp_addr = 1048, 463dcfec3c0SSascha Hauer .ata_2_mcu_addr = 1560, 464dcfec3c0SSascha Hauer .mcu_2_ata_addr = 1479, 465dcfec3c0SSascha Hauer .app_2_per_addr = 1189, 466dcfec3c0SSascha Hauer .app_2_mcu_addr = 770, 467dcfec3c0SSascha Hauer .shp_2_per_addr = 1407, 468dcfec3c0SSascha Hauer .shp_2_mcu_addr = 979, 469dcfec3c0SSascha Hauer }; 470dcfec3c0SSascha Hauer 471e9fd58deSFabio Estevam static struct sdma_driver_data sdma_imx25 = { 472dcfec3c0SSascha Hauer .chnenbl0 = SDMA_CHNENBL0_IMX35, 473dcfec3c0SSascha Hauer .num_events = 48, 474dcfec3c0SSascha Hauer .script_addrs = &sdma_script_imx25, 475dcfec3c0SSascha Hauer }; 476dcfec3c0SSascha Hauer 477e9fd58deSFabio Estevam static struct sdma_driver_data sdma_imx35 = { 47817bba72fSSascha Hauer .chnenbl0 = SDMA_CHNENBL0_IMX35, 47917bba72fSSascha Hauer .num_events = 48, 4801ec1e82fSSascha Hauer }; 4811ec1e82fSSascha Hauer 482dcfec3c0SSascha Hauer static struct sdma_script_start_addrs sdma_script_imx51 = { 483dcfec3c0SSascha Hauer .ap_2_ap_addr = 642, 484dcfec3c0SSascha Hauer .uart_2_mcu_addr = 817, 485dcfec3c0SSascha Hauer .mcu_2_app_addr = 747, 486dcfec3c0SSascha Hauer .mcu_2_shp_addr = 961, 487dcfec3c0SSascha Hauer .ata_2_mcu_addr = 1473, 488dcfec3c0SSascha Hauer .mcu_2_ata_addr = 1392, 489dcfec3c0SSascha Hauer .app_2_per_addr = 1033, 490dcfec3c0SSascha Hauer .app_2_mcu_addr = 683, 491dcfec3c0SSascha Hauer .shp_2_per_addr = 1251, 492dcfec3c0SSascha Hauer .shp_2_mcu_addr = 892, 493dcfec3c0SSascha Hauer }; 494dcfec3c0SSascha Hauer 495e9fd58deSFabio Estevam static struct sdma_driver_data sdma_imx51 = { 496dcfec3c0SSascha Hauer .chnenbl0 = SDMA_CHNENBL0_IMX35, 497dcfec3c0SSascha Hauer .num_events = 48, 498dcfec3c0SSascha Hauer .script_addrs = &sdma_script_imx51, 499dcfec3c0SSascha Hauer }; 500dcfec3c0SSascha Hauer 501dcfec3c0SSascha Hauer static struct sdma_script_start_addrs sdma_script_imx53 = { 502dcfec3c0SSascha Hauer .ap_2_ap_addr = 642, 503dcfec3c0SSascha Hauer .app_2_mcu_addr = 683, 504dcfec3c0SSascha Hauer .mcu_2_app_addr = 747, 505dcfec3c0SSascha Hauer .uart_2_mcu_addr = 817, 506dcfec3c0SSascha Hauer .shp_2_mcu_addr = 891, 507dcfec3c0SSascha Hauer .mcu_2_shp_addr = 960, 508dcfec3c0SSascha Hauer .uartsh_2_mcu_addr = 1032, 509dcfec3c0SSascha Hauer .spdif_2_mcu_addr = 1100, 510dcfec3c0SSascha Hauer .mcu_2_spdif_addr = 1134, 511dcfec3c0SSascha Hauer .firi_2_mcu_addr = 1193, 512dcfec3c0SSascha Hauer .mcu_2_firi_addr = 1290, 513dcfec3c0SSascha Hauer }; 514dcfec3c0SSascha Hauer 515e9fd58deSFabio Estevam static struct sdma_driver_data sdma_imx53 = { 516dcfec3c0SSascha Hauer .chnenbl0 = SDMA_CHNENBL0_IMX35, 517dcfec3c0SSascha Hauer .num_events = 48, 518dcfec3c0SSascha Hauer .script_addrs = &sdma_script_imx53, 519dcfec3c0SSascha Hauer }; 520dcfec3c0SSascha Hauer 521dcfec3c0SSascha Hauer static struct sdma_script_start_addrs sdma_script_imx6q = { 522dcfec3c0SSascha Hauer .ap_2_ap_addr = 642, 523dcfec3c0SSascha Hauer .uart_2_mcu_addr = 817, 524dcfec3c0SSascha Hauer .mcu_2_app_addr = 747, 525dcfec3c0SSascha Hauer .per_2_per_addr = 6331, 526dcfec3c0SSascha Hauer .uartsh_2_mcu_addr = 1032, 527dcfec3c0SSascha Hauer .mcu_2_shp_addr = 960, 528dcfec3c0SSascha Hauer .app_2_mcu_addr = 683, 529dcfec3c0SSascha Hauer .shp_2_mcu_addr = 891, 530dcfec3c0SSascha Hauer .spdif_2_mcu_addr = 1100, 531dcfec3c0SSascha Hauer .mcu_2_spdif_addr = 1134, 532dcfec3c0SSascha Hauer }; 533dcfec3c0SSascha Hauer 534e9fd58deSFabio Estevam static struct sdma_driver_data sdma_imx6q = { 535dcfec3c0SSascha Hauer .chnenbl0 = SDMA_CHNENBL0_IMX35, 536dcfec3c0SSascha Hauer .num_events = 48, 537dcfec3c0SSascha Hauer .script_addrs = &sdma_script_imx6q, 538dcfec3c0SSascha Hauer }; 539dcfec3c0SSascha Hauer 540b7d2648aSFabio Estevam static struct sdma_script_start_addrs sdma_script_imx7d = { 541b7d2648aSFabio Estevam .ap_2_ap_addr = 644, 542b7d2648aSFabio Estevam .uart_2_mcu_addr = 819, 543b7d2648aSFabio Estevam .mcu_2_app_addr = 749, 544b7d2648aSFabio Estevam .uartsh_2_mcu_addr = 1034, 545b7d2648aSFabio Estevam .mcu_2_shp_addr = 962, 546b7d2648aSFabio Estevam .app_2_mcu_addr = 685, 547b7d2648aSFabio Estevam .shp_2_mcu_addr = 893, 548b7d2648aSFabio Estevam .spdif_2_mcu_addr = 1102, 549b7d2648aSFabio Estevam .mcu_2_spdif_addr = 1136, 550b7d2648aSFabio Estevam }; 551b7d2648aSFabio Estevam 552b7d2648aSFabio Estevam static struct sdma_driver_data sdma_imx7d = { 553b7d2648aSFabio Estevam .chnenbl0 = SDMA_CHNENBL0_IMX35, 554b7d2648aSFabio Estevam .num_events = 48, 555b7d2648aSFabio Estevam .script_addrs = &sdma_script_imx7d, 556b7d2648aSFabio Estevam }; 557b7d2648aSFabio Estevam 558afe7cdedSKrzysztof Kozlowski static const struct platform_device_id sdma_devtypes[] = { 55962550cd7SShawn Guo { 560dcfec3c0SSascha Hauer .name = "imx25-sdma", 561dcfec3c0SSascha Hauer .driver_data = (unsigned long)&sdma_imx25, 562dcfec3c0SSascha Hauer }, { 56362550cd7SShawn Guo .name = "imx31-sdma", 56417bba72fSSascha Hauer .driver_data = (unsigned long)&sdma_imx31, 56562550cd7SShawn Guo }, { 56662550cd7SShawn Guo .name = "imx35-sdma", 56717bba72fSSascha Hauer .driver_data = (unsigned long)&sdma_imx35, 56862550cd7SShawn Guo }, { 569dcfec3c0SSascha Hauer .name = "imx51-sdma", 570dcfec3c0SSascha Hauer .driver_data = (unsigned long)&sdma_imx51, 571dcfec3c0SSascha Hauer }, { 572dcfec3c0SSascha Hauer .name = "imx53-sdma", 573dcfec3c0SSascha Hauer .driver_data = (unsigned long)&sdma_imx53, 574dcfec3c0SSascha Hauer }, { 575dcfec3c0SSascha Hauer .name = "imx6q-sdma", 576dcfec3c0SSascha Hauer .driver_data = (unsigned long)&sdma_imx6q, 577dcfec3c0SSascha Hauer }, { 578b7d2648aSFabio Estevam .name = "imx7d-sdma", 579b7d2648aSFabio Estevam .driver_data = (unsigned long)&sdma_imx7d, 580b7d2648aSFabio Estevam }, { 58162550cd7SShawn Guo /* sentinel */ 58262550cd7SShawn Guo } 58362550cd7SShawn Guo }; 58462550cd7SShawn Guo MODULE_DEVICE_TABLE(platform, sdma_devtypes); 58562550cd7SShawn Guo 586580975d7SShawn Guo static const struct of_device_id sdma_dt_ids[] = { 587dcfec3c0SSascha Hauer { .compatible = "fsl,imx6q-sdma", .data = &sdma_imx6q, }, 588dcfec3c0SSascha Hauer { .compatible = "fsl,imx53-sdma", .data = &sdma_imx53, }, 589dcfec3c0SSascha Hauer { .compatible = "fsl,imx51-sdma", .data = &sdma_imx51, }, 59017bba72fSSascha Hauer { .compatible = "fsl,imx35-sdma", .data = &sdma_imx35, }, 591dcfec3c0SSascha Hauer { .compatible = "fsl,imx31-sdma", .data = &sdma_imx31, }, 59263edea16SMarkus Pargmann { .compatible = "fsl,imx25-sdma", .data = &sdma_imx25, }, 593b7d2648aSFabio Estevam { .compatible = "fsl,imx7d-sdma", .data = &sdma_imx7d, }, 594580975d7SShawn Guo { /* sentinel */ } 595580975d7SShawn Guo }; 596580975d7SShawn Guo MODULE_DEVICE_TABLE(of, sdma_dt_ids); 597580975d7SShawn Guo 5980bbc1413SRichard Zhao #define SDMA_H_CONFIG_DSPDMA BIT(12) /* indicates if the DSPDMA is used */ 5990bbc1413SRichard Zhao #define SDMA_H_CONFIG_RTD_PINS BIT(11) /* indicates if Real-Time Debug pins are enabled */ 6000bbc1413SRichard Zhao #define SDMA_H_CONFIG_ACR BIT(4) /* indicates if AHB freq /core freq = 2 or 1 */ 6011ec1e82fSSascha Hauer #define SDMA_H_CONFIG_CSM (3) /* indicates which context switch mode is selected*/ 6021ec1e82fSSascha Hauer 6031ec1e82fSSascha Hauer static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event) 6041ec1e82fSSascha Hauer { 60517bba72fSSascha Hauer u32 chnenbl0 = sdma->drvdata->chnenbl0; 6061ec1e82fSSascha Hauer return chnenbl0 + event * 4; 6071ec1e82fSSascha Hauer } 6081ec1e82fSSascha Hauer 6091ec1e82fSSascha Hauer static int sdma_config_ownership(struct sdma_channel *sdmac, 6101ec1e82fSSascha Hauer bool event_override, bool mcu_override, bool dsp_override) 6111ec1e82fSSascha Hauer { 6121ec1e82fSSascha Hauer struct sdma_engine *sdma = sdmac->sdma; 6131ec1e82fSSascha Hauer int channel = sdmac->channel; 6140bbc1413SRichard Zhao unsigned long evt, mcu, dsp; 6151ec1e82fSSascha Hauer 6161ec1e82fSSascha Hauer if (event_override && mcu_override && dsp_override) 6171ec1e82fSSascha Hauer return -EINVAL; 6181ec1e82fSSascha Hauer 619c4b56857SRichard Zhao evt = readl_relaxed(sdma->regs + SDMA_H_EVTOVR); 620c4b56857SRichard Zhao mcu = readl_relaxed(sdma->regs + SDMA_H_HOSTOVR); 621c4b56857SRichard Zhao dsp = readl_relaxed(sdma->regs + SDMA_H_DSPOVR); 6221ec1e82fSSascha Hauer 6231ec1e82fSSascha Hauer if (dsp_override) 6240bbc1413SRichard Zhao __clear_bit(channel, &dsp); 6251ec1e82fSSascha Hauer else 6260bbc1413SRichard Zhao __set_bit(channel, &dsp); 6271ec1e82fSSascha Hauer 6281ec1e82fSSascha Hauer if (event_override) 6290bbc1413SRichard Zhao __clear_bit(channel, &evt); 6301ec1e82fSSascha Hauer else 6310bbc1413SRichard Zhao __set_bit(channel, &evt); 6321ec1e82fSSascha Hauer 6331ec1e82fSSascha Hauer if (mcu_override) 6340bbc1413SRichard Zhao __clear_bit(channel, &mcu); 6351ec1e82fSSascha Hauer else 6360bbc1413SRichard Zhao __set_bit(channel, &mcu); 6371ec1e82fSSascha Hauer 638c4b56857SRichard Zhao writel_relaxed(evt, sdma->regs + SDMA_H_EVTOVR); 639c4b56857SRichard Zhao writel_relaxed(mcu, sdma->regs + SDMA_H_HOSTOVR); 640c4b56857SRichard Zhao writel_relaxed(dsp, sdma->regs + SDMA_H_DSPOVR); 6411ec1e82fSSascha Hauer 6421ec1e82fSSascha Hauer return 0; 6431ec1e82fSSascha Hauer } 6441ec1e82fSSascha Hauer 645b9a59166SRichard Zhao static void sdma_enable_channel(struct sdma_engine *sdma, int channel) 646b9a59166SRichard Zhao { 6470bbc1413SRichard Zhao writel(BIT(channel), sdma->regs + SDMA_H_START); 648b9a59166SRichard Zhao } 649b9a59166SRichard Zhao 6501ec1e82fSSascha Hauer /* 6512ccaef05SRichard Zhao * sdma_run_channel0 - run a channel and wait till it's done 6521ec1e82fSSascha Hauer */ 6532ccaef05SRichard Zhao static int sdma_run_channel0(struct sdma_engine *sdma) 6541ec1e82fSSascha Hauer { 6551ec1e82fSSascha Hauer int ret; 6561d069bfaSMichael Olbrich u32 reg; 6571ec1e82fSSascha Hauer 6582ccaef05SRichard Zhao sdma_enable_channel(sdma, 0); 6591ec1e82fSSascha Hauer 6601d069bfaSMichael Olbrich ret = readl_relaxed_poll_timeout_atomic(sdma->regs + SDMA_H_STATSTOP, 6611d069bfaSMichael Olbrich reg, !(reg & 1), 1, 500); 6621d069bfaSMichael Olbrich if (ret) 6632ccaef05SRichard Zhao dev_err(sdma->dev, "Timeout waiting for CH0 ready\n"); 6641ec1e82fSSascha Hauer 665855832e4SRobin Gong /* Set bits of CONFIG register with dynamic context switching */ 666855832e4SRobin Gong if (readl(sdma->regs + SDMA_H_CONFIG) == 0) 667855832e4SRobin Gong writel_relaxed(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG); 668855832e4SRobin Gong 6691d069bfaSMichael Olbrich return ret; 6701ec1e82fSSascha Hauer } 6711ec1e82fSSascha Hauer 6721ec1e82fSSascha Hauer static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size, 6731ec1e82fSSascha Hauer u32 address) 6741ec1e82fSSascha Hauer { 67576c33d27SSascha Hauer struct sdma_buffer_descriptor *bd0 = sdma->bd0; 6761ec1e82fSSascha Hauer void *buf_virt; 6771ec1e82fSSascha Hauer dma_addr_t buf_phys; 6781ec1e82fSSascha Hauer int ret; 6792ccaef05SRichard Zhao unsigned long flags; 68073eab978SSascha Hauer 681af8bf89aSFabio Estevam buf_virt = dma_alloc_coherent(NULL, size, &buf_phys, GFP_KERNEL); 68273eab978SSascha Hauer if (!buf_virt) { 6832ccaef05SRichard Zhao return -ENOMEM; 68473eab978SSascha Hauer } 6851ec1e82fSSascha Hauer 6862ccaef05SRichard Zhao spin_lock_irqsave(&sdma->channel_0_lock, flags); 6872ccaef05SRichard Zhao 6881ec1e82fSSascha Hauer bd0->mode.command = C0_SETPM; 6891ec1e82fSSascha Hauer bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD; 6901ec1e82fSSascha Hauer bd0->mode.count = size / 2; 6911ec1e82fSSascha Hauer bd0->buffer_addr = buf_phys; 6921ec1e82fSSascha Hauer bd0->ext_buffer_addr = address; 6931ec1e82fSSascha Hauer 6941ec1e82fSSascha Hauer memcpy(buf_virt, buf, size); 6951ec1e82fSSascha Hauer 6962ccaef05SRichard Zhao ret = sdma_run_channel0(sdma); 6972ccaef05SRichard Zhao 6982ccaef05SRichard Zhao spin_unlock_irqrestore(&sdma->channel_0_lock, flags); 6991ec1e82fSSascha Hauer 7001ec1e82fSSascha Hauer dma_free_coherent(NULL, size, buf_virt, buf_phys); 7011ec1e82fSSascha Hauer 7021ec1e82fSSascha Hauer return ret; 7031ec1e82fSSascha Hauer } 7041ec1e82fSSascha Hauer 7051ec1e82fSSascha Hauer static void sdma_event_enable(struct sdma_channel *sdmac, unsigned int event) 7061ec1e82fSSascha Hauer { 7071ec1e82fSSascha Hauer struct sdma_engine *sdma = sdmac->sdma; 7081ec1e82fSSascha Hauer int channel = sdmac->channel; 7090bbc1413SRichard Zhao unsigned long val; 7101ec1e82fSSascha Hauer u32 chnenbl = chnenbl_ofs(sdma, event); 7111ec1e82fSSascha Hauer 712c4b56857SRichard Zhao val = readl_relaxed(sdma->regs + chnenbl); 7130bbc1413SRichard Zhao __set_bit(channel, &val); 714c4b56857SRichard Zhao writel_relaxed(val, sdma->regs + chnenbl); 7151ec1e82fSSascha Hauer } 7161ec1e82fSSascha Hauer 7171ec1e82fSSascha Hauer static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event) 7181ec1e82fSSascha Hauer { 7191ec1e82fSSascha Hauer struct sdma_engine *sdma = sdmac->sdma; 7201ec1e82fSSascha Hauer int channel = sdmac->channel; 7211ec1e82fSSascha Hauer u32 chnenbl = chnenbl_ofs(sdma, event); 7220bbc1413SRichard Zhao unsigned long val; 7231ec1e82fSSascha Hauer 724c4b56857SRichard Zhao val = readl_relaxed(sdma->regs + chnenbl); 7250bbc1413SRichard Zhao __clear_bit(channel, &val); 726c4b56857SRichard Zhao writel_relaxed(val, sdma->regs + chnenbl); 7271ec1e82fSSascha Hauer } 7281ec1e82fSSascha Hauer 72957b772b8SRobin Gong static struct sdma_desc *to_sdma_desc(struct dma_async_tx_descriptor *t) 73057b772b8SRobin Gong { 73157b772b8SRobin Gong return container_of(t, struct sdma_desc, vd.tx); 73257b772b8SRobin Gong } 73357b772b8SRobin Gong 73457b772b8SRobin Gong static void sdma_start_desc(struct sdma_channel *sdmac) 73557b772b8SRobin Gong { 73657b772b8SRobin Gong struct virt_dma_desc *vd = vchan_next_desc(&sdmac->vc); 73757b772b8SRobin Gong struct sdma_desc *desc; 73857b772b8SRobin Gong struct sdma_engine *sdma = sdmac->sdma; 73957b772b8SRobin Gong int channel = sdmac->channel; 74057b772b8SRobin Gong 74157b772b8SRobin Gong if (!vd) { 74257b772b8SRobin Gong sdmac->desc = NULL; 74357b772b8SRobin Gong return; 74457b772b8SRobin Gong } 74557b772b8SRobin Gong sdmac->desc = desc = to_sdma_desc(&vd->tx); 74657b772b8SRobin Gong /* 74757b772b8SRobin Gong * Do not delete the node in desc_issued list in cyclic mode, otherwise 748680302c4SVinod Koul * the desc allocated will never be freed in vchan_dma_desc_free_list 74957b772b8SRobin Gong */ 75057b772b8SRobin Gong if (!(sdmac->flags & IMX_DMA_SG_LOOP)) 75157b772b8SRobin Gong list_del(&vd->node); 75257b772b8SRobin Gong 75357b772b8SRobin Gong sdma->channel_control[channel].base_bd_ptr = desc->bd_phys; 75457b772b8SRobin Gong sdma->channel_control[channel].current_bd_ptr = desc->bd_phys; 75557b772b8SRobin Gong sdma_enable_channel(sdma, sdmac->channel); 75657b772b8SRobin Gong } 75757b772b8SRobin Gong 758d1a792f3SRussell King - ARM Linux static void sdma_update_channel_loop(struct sdma_channel *sdmac) 759d1a792f3SRussell King - ARM Linux { 7601ec1e82fSSascha Hauer struct sdma_buffer_descriptor *bd; 7615881826dSNandor Han int error = 0; 7625881826dSNandor Han enum dma_status old_status = sdmac->status; 7631ec1e82fSSascha Hauer 7641ec1e82fSSascha Hauer /* 7651ec1e82fSSascha Hauer * loop mode. Iterate over descriptors, re-setup them and 7661ec1e82fSSascha Hauer * call callback function. 7671ec1e82fSSascha Hauer */ 76857b772b8SRobin Gong while (sdmac->desc) { 76976c33d27SSascha Hauer struct sdma_desc *desc = sdmac->desc; 77076c33d27SSascha Hauer 77176c33d27SSascha Hauer bd = &desc->bd[desc->buf_tail]; 7721ec1e82fSSascha Hauer 7731ec1e82fSSascha Hauer if (bd->mode.status & BD_DONE) 7741ec1e82fSSascha Hauer break; 7751ec1e82fSSascha Hauer 7765881826dSNandor Han if (bd->mode.status & BD_RROR) { 7775881826dSNandor Han bd->mode.status &= ~BD_RROR; 7781ec1e82fSSascha Hauer sdmac->status = DMA_ERROR; 7795881826dSNandor Han error = -EIO; 7805881826dSNandor Han } 7811ec1e82fSSascha Hauer 7825881826dSNandor Han /* 7835881826dSNandor Han * We use bd->mode.count to calculate the residue, since contains 7845881826dSNandor Han * the number of bytes present in the current buffer descriptor. 7855881826dSNandor Han */ 7865881826dSNandor Han 78776c33d27SSascha Hauer desc->chn_real_count = bd->mode.count; 7881ec1e82fSSascha Hauer bd->mode.status |= BD_DONE; 78976c33d27SSascha Hauer bd->mode.count = desc->period_len; 79076c33d27SSascha Hauer desc->buf_ptail = desc->buf_tail; 79176c33d27SSascha Hauer desc->buf_tail = (desc->buf_tail + 1) % desc->num_bd; 79215f30f51SNandor Han 79315f30f51SNandor Han /* 79415f30f51SNandor Han * The callback is called from the interrupt context in order 79515f30f51SNandor Han * to reduce latency and to avoid the risk of altering the 79615f30f51SNandor Han * SDMA transaction status by the time the client tasklet is 79715f30f51SNandor Han * executed. 79815f30f51SNandor Han */ 79957b772b8SRobin Gong spin_unlock(&sdmac->vc.lock); 80057b772b8SRobin Gong dmaengine_desc_get_callback_invoke(&desc->vd.tx, NULL); 80157b772b8SRobin Gong spin_lock(&sdmac->vc.lock); 80215f30f51SNandor Han 8035881826dSNandor Han if (error) 8045881826dSNandor Han sdmac->status = old_status; 8051ec1e82fSSascha Hauer } 8061ec1e82fSSascha Hauer } 8071ec1e82fSSascha Hauer 80857b772b8SRobin Gong static void mxc_sdma_handle_channel_normal(struct sdma_channel *data) 8091ec1e82fSSascha Hauer { 81015f30f51SNandor Han struct sdma_channel *sdmac = (struct sdma_channel *) data; 8111ec1e82fSSascha Hauer struct sdma_buffer_descriptor *bd; 8121ec1e82fSSascha Hauer int i, error = 0; 8131ec1e82fSSascha Hauer 81476c33d27SSascha Hauer sdmac->desc->chn_real_count = 0; 8151ec1e82fSSascha Hauer /* 8161ec1e82fSSascha Hauer * non loop mode. Iterate over all descriptors, collect 8171ec1e82fSSascha Hauer * errors and call callback function 8181ec1e82fSSascha Hauer */ 81976c33d27SSascha Hauer for (i = 0; i < sdmac->desc->num_bd; i++) { 82076c33d27SSascha Hauer bd = &sdmac->desc->bd[i]; 8211ec1e82fSSascha Hauer 8221ec1e82fSSascha Hauer if (bd->mode.status & (BD_DONE | BD_RROR)) 8231ec1e82fSSascha Hauer error = -EIO; 82476c33d27SSascha Hauer sdmac->desc->chn_real_count += bd->mode.count; 8251ec1e82fSSascha Hauer } 8261ec1e82fSSascha Hauer 8271ec1e82fSSascha Hauer if (error) 8281ec1e82fSSascha Hauer sdmac->status = DMA_ERROR; 8291ec1e82fSSascha Hauer else 830409bff6aSVinod Koul sdmac->status = DMA_COMPLETE; 8311ec1e82fSSascha Hauer } 8321ec1e82fSSascha Hauer 8331ec1e82fSSascha Hauer static irqreturn_t sdma_int_handler(int irq, void *dev_id) 8341ec1e82fSSascha Hauer { 8351ec1e82fSSascha Hauer struct sdma_engine *sdma = dev_id; 8360bbc1413SRichard Zhao unsigned long stat; 8371ec1e82fSSascha Hauer 838c4b56857SRichard Zhao stat = readl_relaxed(sdma->regs + SDMA_H_INTR); 839c4b56857SRichard Zhao writel_relaxed(stat, sdma->regs + SDMA_H_INTR); 8401d069bfaSMichael Olbrich /* channel 0 is special and not handled here, see run_channel0() */ 8411d069bfaSMichael Olbrich stat &= ~1; 8421ec1e82fSSascha Hauer 8431ec1e82fSSascha Hauer while (stat) { 8441ec1e82fSSascha Hauer int channel = fls(stat) - 1; 8451ec1e82fSSascha Hauer struct sdma_channel *sdmac = &sdma->channel[channel]; 84657b772b8SRobin Gong struct sdma_desc *desc; 8471ec1e82fSSascha Hauer 84857b772b8SRobin Gong spin_lock(&sdmac->vc.lock); 84957b772b8SRobin Gong desc = sdmac->desc; 85057b772b8SRobin Gong if (desc) { 85157b772b8SRobin Gong if (sdmac->flags & IMX_DMA_SG_LOOP) { 852d1a792f3SRussell King - ARM Linux sdma_update_channel_loop(sdmac); 85357b772b8SRobin Gong } else { 85457b772b8SRobin Gong mxc_sdma_handle_channel_normal(sdmac); 85557b772b8SRobin Gong vchan_cookie_complete(&desc->vd); 85657b772b8SRobin Gong sdma_start_desc(sdmac); 85757b772b8SRobin Gong } 85857b772b8SRobin Gong } 8591ec1e82fSSascha Hauer 86057b772b8SRobin Gong spin_unlock(&sdmac->vc.lock); 8610bbc1413SRichard Zhao __clear_bit(channel, &stat); 8621ec1e82fSSascha Hauer } 8631ec1e82fSSascha Hauer 8641ec1e82fSSascha Hauer return IRQ_HANDLED; 8651ec1e82fSSascha Hauer } 8661ec1e82fSSascha Hauer 8671ec1e82fSSascha Hauer /* 8681ec1e82fSSascha Hauer * sets the pc of SDMA script according to the peripheral type 8691ec1e82fSSascha Hauer */ 8701ec1e82fSSascha Hauer static void sdma_get_pc(struct sdma_channel *sdmac, 8711ec1e82fSSascha Hauer enum sdma_peripheral_type peripheral_type) 8721ec1e82fSSascha Hauer { 8731ec1e82fSSascha Hauer struct sdma_engine *sdma = sdmac->sdma; 8741ec1e82fSSascha Hauer int per_2_emi = 0, emi_2_per = 0; 8751ec1e82fSSascha Hauer /* 8761ec1e82fSSascha Hauer * These are needed once we start to support transfers between 8771ec1e82fSSascha Hauer * two peripherals or memory-to-memory transfers 8781ec1e82fSSascha Hauer */ 8790f06c027SRobin Gong int per_2_per = 0, emi_2_emi = 0; 8801ec1e82fSSascha Hauer 8811ec1e82fSSascha Hauer sdmac->pc_from_device = 0; 8821ec1e82fSSascha Hauer sdmac->pc_to_device = 0; 8838391ecf4SShengjiu Wang sdmac->device_to_device = 0; 8840f06c027SRobin Gong sdmac->pc_to_pc = 0; 8851ec1e82fSSascha Hauer 8861ec1e82fSSascha Hauer switch (peripheral_type) { 8871ec1e82fSSascha Hauer case IMX_DMATYPE_MEMORY: 8880f06c027SRobin Gong emi_2_emi = sdma->script_addrs->ap_2_ap_addr; 8891ec1e82fSSascha Hauer break; 8901ec1e82fSSascha Hauer case IMX_DMATYPE_DSP: 8911ec1e82fSSascha Hauer emi_2_per = sdma->script_addrs->bp_2_ap_addr; 8921ec1e82fSSascha Hauer per_2_emi = sdma->script_addrs->ap_2_bp_addr; 8931ec1e82fSSascha Hauer break; 8941ec1e82fSSascha Hauer case IMX_DMATYPE_FIRI: 8951ec1e82fSSascha Hauer per_2_emi = sdma->script_addrs->firi_2_mcu_addr; 8961ec1e82fSSascha Hauer emi_2_per = sdma->script_addrs->mcu_2_firi_addr; 8971ec1e82fSSascha Hauer break; 8981ec1e82fSSascha Hauer case IMX_DMATYPE_UART: 8991ec1e82fSSascha Hauer per_2_emi = sdma->script_addrs->uart_2_mcu_addr; 9001ec1e82fSSascha Hauer emi_2_per = sdma->script_addrs->mcu_2_app_addr; 9011ec1e82fSSascha Hauer break; 9021ec1e82fSSascha Hauer case IMX_DMATYPE_UART_SP: 9031ec1e82fSSascha Hauer per_2_emi = sdma->script_addrs->uartsh_2_mcu_addr; 9041ec1e82fSSascha Hauer emi_2_per = sdma->script_addrs->mcu_2_shp_addr; 9051ec1e82fSSascha Hauer break; 9061ec1e82fSSascha Hauer case IMX_DMATYPE_ATA: 9071ec1e82fSSascha Hauer per_2_emi = sdma->script_addrs->ata_2_mcu_addr; 9081ec1e82fSSascha Hauer emi_2_per = sdma->script_addrs->mcu_2_ata_addr; 9091ec1e82fSSascha Hauer break; 9101ec1e82fSSascha Hauer case IMX_DMATYPE_CSPI: 9111ec1e82fSSascha Hauer case IMX_DMATYPE_EXT: 9121ec1e82fSSascha Hauer case IMX_DMATYPE_SSI: 91329aebfdeSNicolin Chen case IMX_DMATYPE_SAI: 9141ec1e82fSSascha Hauer per_2_emi = sdma->script_addrs->app_2_mcu_addr; 9151ec1e82fSSascha Hauer emi_2_per = sdma->script_addrs->mcu_2_app_addr; 9161ec1e82fSSascha Hauer break; 9171a895578SNicolin Chen case IMX_DMATYPE_SSI_DUAL: 9181a895578SNicolin Chen per_2_emi = sdma->script_addrs->ssish_2_mcu_addr; 9191a895578SNicolin Chen emi_2_per = sdma->script_addrs->mcu_2_ssish_addr; 9201a895578SNicolin Chen break; 9211ec1e82fSSascha Hauer case IMX_DMATYPE_SSI_SP: 9221ec1e82fSSascha Hauer case IMX_DMATYPE_MMC: 9231ec1e82fSSascha Hauer case IMX_DMATYPE_SDHC: 9241ec1e82fSSascha Hauer case IMX_DMATYPE_CSPI_SP: 9251ec1e82fSSascha Hauer case IMX_DMATYPE_ESAI: 9261ec1e82fSSascha Hauer case IMX_DMATYPE_MSHC_SP: 9271ec1e82fSSascha Hauer per_2_emi = sdma->script_addrs->shp_2_mcu_addr; 9281ec1e82fSSascha Hauer emi_2_per = sdma->script_addrs->mcu_2_shp_addr; 9291ec1e82fSSascha Hauer break; 9301ec1e82fSSascha Hauer case IMX_DMATYPE_ASRC: 9311ec1e82fSSascha Hauer per_2_emi = sdma->script_addrs->asrc_2_mcu_addr; 9321ec1e82fSSascha Hauer emi_2_per = sdma->script_addrs->asrc_2_mcu_addr; 9331ec1e82fSSascha Hauer per_2_per = sdma->script_addrs->per_2_per_addr; 9341ec1e82fSSascha Hauer break; 935f892afb0SNicolin Chen case IMX_DMATYPE_ASRC_SP: 936f892afb0SNicolin Chen per_2_emi = sdma->script_addrs->shp_2_mcu_addr; 937f892afb0SNicolin Chen emi_2_per = sdma->script_addrs->mcu_2_shp_addr; 938f892afb0SNicolin Chen per_2_per = sdma->script_addrs->per_2_per_addr; 939f892afb0SNicolin Chen break; 9401ec1e82fSSascha Hauer case IMX_DMATYPE_MSHC: 9411ec1e82fSSascha Hauer per_2_emi = sdma->script_addrs->mshc_2_mcu_addr; 9421ec1e82fSSascha Hauer emi_2_per = sdma->script_addrs->mcu_2_mshc_addr; 9431ec1e82fSSascha Hauer break; 9441ec1e82fSSascha Hauer case IMX_DMATYPE_CCM: 9451ec1e82fSSascha Hauer per_2_emi = sdma->script_addrs->dptc_dvfs_addr; 9461ec1e82fSSascha Hauer break; 9471ec1e82fSSascha Hauer case IMX_DMATYPE_SPDIF: 9481ec1e82fSSascha Hauer per_2_emi = sdma->script_addrs->spdif_2_mcu_addr; 9491ec1e82fSSascha Hauer emi_2_per = sdma->script_addrs->mcu_2_spdif_addr; 9501ec1e82fSSascha Hauer break; 9511ec1e82fSSascha Hauer case IMX_DMATYPE_IPU_MEMORY: 9521ec1e82fSSascha Hauer emi_2_per = sdma->script_addrs->ext_mem_2_ipu_addr; 9531ec1e82fSSascha Hauer break; 9541ec1e82fSSascha Hauer default: 9551ec1e82fSSascha Hauer break; 9561ec1e82fSSascha Hauer } 9571ec1e82fSSascha Hauer 9581ec1e82fSSascha Hauer sdmac->pc_from_device = per_2_emi; 9591ec1e82fSSascha Hauer sdmac->pc_to_device = emi_2_per; 9608391ecf4SShengjiu Wang sdmac->device_to_device = per_2_per; 9610f06c027SRobin Gong sdmac->pc_to_pc = emi_2_emi; 9621ec1e82fSSascha Hauer } 9631ec1e82fSSascha Hauer 9641ec1e82fSSascha Hauer static int sdma_load_context(struct sdma_channel *sdmac) 9651ec1e82fSSascha Hauer { 9661ec1e82fSSascha Hauer struct sdma_engine *sdma = sdmac->sdma; 9671ec1e82fSSascha Hauer int channel = sdmac->channel; 9681ec1e82fSSascha Hauer int load_address; 9691ec1e82fSSascha Hauer struct sdma_context_data *context = sdma->context; 97076c33d27SSascha Hauer struct sdma_buffer_descriptor *bd0 = sdma->bd0; 9711ec1e82fSSascha Hauer int ret; 9722ccaef05SRichard Zhao unsigned long flags; 9731ec1e82fSSascha Hauer 974*ad0d92d7SRobin Gong if (sdmac->context_loaded) 975*ad0d92d7SRobin Gong return 0; 976*ad0d92d7SRobin Gong 9778391ecf4SShengjiu Wang if (sdmac->direction == DMA_DEV_TO_MEM) 9781ec1e82fSSascha Hauer load_address = sdmac->pc_from_device; 9798391ecf4SShengjiu Wang else if (sdmac->direction == DMA_DEV_TO_DEV) 9808391ecf4SShengjiu Wang load_address = sdmac->device_to_device; 9810f06c027SRobin Gong else if (sdmac->direction == DMA_MEM_TO_MEM) 9820f06c027SRobin Gong load_address = sdmac->pc_to_pc; 9838391ecf4SShengjiu Wang else 9841ec1e82fSSascha Hauer load_address = sdmac->pc_to_device; 9851ec1e82fSSascha Hauer 9861ec1e82fSSascha Hauer if (load_address < 0) 9871ec1e82fSSascha Hauer return load_address; 9881ec1e82fSSascha Hauer 9891ec1e82fSSascha Hauer dev_dbg(sdma->dev, "load_address = %d\n", load_address); 9900bbc1413SRichard Zhao dev_dbg(sdma->dev, "wml = 0x%08x\n", (u32)sdmac->watermark_level); 9911ec1e82fSSascha Hauer dev_dbg(sdma->dev, "shp_addr = 0x%08x\n", sdmac->shp_addr); 9921ec1e82fSSascha Hauer dev_dbg(sdma->dev, "per_addr = 0x%08x\n", sdmac->per_addr); 9930bbc1413SRichard Zhao dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", (u32)sdmac->event_mask[0]); 9940bbc1413SRichard Zhao dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", (u32)sdmac->event_mask[1]); 9951ec1e82fSSascha Hauer 9962ccaef05SRichard Zhao spin_lock_irqsave(&sdma->channel_0_lock, flags); 99773eab978SSascha Hauer 9981ec1e82fSSascha Hauer memset(context, 0, sizeof(*context)); 9991ec1e82fSSascha Hauer context->channel_state.pc = load_address; 10001ec1e82fSSascha Hauer 10011ec1e82fSSascha Hauer /* Send by context the event mask,base address for peripheral 10021ec1e82fSSascha Hauer * and watermark level 10031ec1e82fSSascha Hauer */ 10040bbc1413SRichard Zhao context->gReg[0] = sdmac->event_mask[1]; 10050bbc1413SRichard Zhao context->gReg[1] = sdmac->event_mask[0]; 10061ec1e82fSSascha Hauer context->gReg[2] = sdmac->per_addr; 10071ec1e82fSSascha Hauer context->gReg[6] = sdmac->shp_addr; 10081ec1e82fSSascha Hauer context->gReg[7] = sdmac->watermark_level; 10091ec1e82fSSascha Hauer 10101ec1e82fSSascha Hauer bd0->mode.command = C0_SETDM; 10111ec1e82fSSascha Hauer bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD; 10121ec1e82fSSascha Hauer bd0->mode.count = sizeof(*context) / 4; 10131ec1e82fSSascha Hauer bd0->buffer_addr = sdma->context_phys; 10141ec1e82fSSascha Hauer bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel; 10152ccaef05SRichard Zhao ret = sdma_run_channel0(sdma); 10161ec1e82fSSascha Hauer 10172ccaef05SRichard Zhao spin_unlock_irqrestore(&sdma->channel_0_lock, flags); 101873eab978SSascha Hauer 1019*ad0d92d7SRobin Gong sdmac->context_loaded = true; 1020*ad0d92d7SRobin Gong 10211ec1e82fSSascha Hauer return ret; 10221ec1e82fSSascha Hauer } 10231ec1e82fSSascha Hauer 10247b350ab0SMaxime Ripard static struct sdma_channel *to_sdma_chan(struct dma_chan *chan) 10251ec1e82fSSascha Hauer { 102657b772b8SRobin Gong return container_of(chan, struct sdma_channel, vc.chan); 10277b350ab0SMaxime Ripard } 10287b350ab0SMaxime Ripard 10297b350ab0SMaxime Ripard static int sdma_disable_channel(struct dma_chan *chan) 10307b350ab0SMaxime Ripard { 10317b350ab0SMaxime Ripard struct sdma_channel *sdmac = to_sdma_chan(chan); 10321ec1e82fSSascha Hauer struct sdma_engine *sdma = sdmac->sdma; 10331ec1e82fSSascha Hauer int channel = sdmac->channel; 10341ec1e82fSSascha Hauer 10350bbc1413SRichard Zhao writel_relaxed(BIT(channel), sdma->regs + SDMA_H_STATSTOP); 10361ec1e82fSSascha Hauer sdmac->status = DMA_ERROR; 10377b350ab0SMaxime Ripard 10387b350ab0SMaxime Ripard return 0; 10391ec1e82fSSascha Hauer } 1040b8603d2aSLucas Stach static void sdma_channel_terminate_work(struct work_struct *work) 10417f3ff14bSJiada Wang { 1042b8603d2aSLucas Stach struct sdma_channel *sdmac = container_of(work, struct sdma_channel, 1043b8603d2aSLucas Stach terminate_worker); 104457b772b8SRobin Gong unsigned long flags; 104557b772b8SRobin Gong LIST_HEAD(head); 104657b772b8SRobin Gong 10477f3ff14bSJiada Wang /* 10487f3ff14bSJiada Wang * According to NXP R&D team a delay of one BD SDMA cost time 10497f3ff14bSJiada Wang * (maximum is 1ms) should be added after disable of the channel 10507f3ff14bSJiada Wang * bit, to ensure SDMA core has really been stopped after SDMA 10517f3ff14bSJiada Wang * clients call .device_terminate_all. 10527f3ff14bSJiada Wang */ 1053b8603d2aSLucas Stach usleep_range(1000, 2000); 1054b8603d2aSLucas Stach 1055b8603d2aSLucas Stach spin_lock_irqsave(&sdmac->vc.lock, flags); 1056b8603d2aSLucas Stach vchan_get_all_descriptors(&sdmac->vc, &head); 1057b8603d2aSLucas Stach sdmac->desc = NULL; 1058b8603d2aSLucas Stach spin_unlock_irqrestore(&sdmac->vc.lock, flags); 1059b8603d2aSLucas Stach vchan_dma_desc_free_list(&sdmac->vc, &head); 1060*ad0d92d7SRobin Gong sdmac->context_loaded = false; 1061b8603d2aSLucas Stach } 1062b8603d2aSLucas Stach 1063b8603d2aSLucas Stach static int sdma_disable_channel_async(struct dma_chan *chan) 1064b8603d2aSLucas Stach { 1065b8603d2aSLucas Stach struct sdma_channel *sdmac = to_sdma_chan(chan); 1066b8603d2aSLucas Stach 1067b8603d2aSLucas Stach sdma_disable_channel(chan); 1068b8603d2aSLucas Stach 1069b8603d2aSLucas Stach if (sdmac->desc) 1070b8603d2aSLucas Stach schedule_work(&sdmac->terminate_worker); 10717f3ff14bSJiada Wang 10727f3ff14bSJiada Wang return 0; 10737f3ff14bSJiada Wang } 10747f3ff14bSJiada Wang 1075b8603d2aSLucas Stach static void sdma_channel_synchronize(struct dma_chan *chan) 1076b8603d2aSLucas Stach { 1077b8603d2aSLucas Stach struct sdma_channel *sdmac = to_sdma_chan(chan); 1078b8603d2aSLucas Stach 1079b8603d2aSLucas Stach vchan_synchronize(&sdmac->vc); 1080b8603d2aSLucas Stach 1081b8603d2aSLucas Stach flush_work(&sdmac->terminate_worker); 1082b8603d2aSLucas Stach } 1083b8603d2aSLucas Stach 10848391ecf4SShengjiu Wang static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac) 10858391ecf4SShengjiu Wang { 10868391ecf4SShengjiu Wang struct sdma_engine *sdma = sdmac->sdma; 10878391ecf4SShengjiu Wang 10888391ecf4SShengjiu Wang int lwml = sdmac->watermark_level & SDMA_WATERMARK_LEVEL_LWML; 10898391ecf4SShengjiu Wang int hwml = (sdmac->watermark_level & SDMA_WATERMARK_LEVEL_HWML) >> 16; 10908391ecf4SShengjiu Wang 10918391ecf4SShengjiu Wang set_bit(sdmac->event_id0 % 32, &sdmac->event_mask[1]); 10928391ecf4SShengjiu Wang set_bit(sdmac->event_id1 % 32, &sdmac->event_mask[0]); 10938391ecf4SShengjiu Wang 10948391ecf4SShengjiu Wang if (sdmac->event_id0 > 31) 10958391ecf4SShengjiu Wang sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_LWE; 10968391ecf4SShengjiu Wang 10978391ecf4SShengjiu Wang if (sdmac->event_id1 > 31) 10988391ecf4SShengjiu Wang sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_HWE; 10998391ecf4SShengjiu Wang 11008391ecf4SShengjiu Wang /* 11018391ecf4SShengjiu Wang * If LWML(src_maxburst) > HWML(dst_maxburst), we need 11028391ecf4SShengjiu Wang * swap LWML and HWML of INFO(A.3.2.5.1), also need swap 11038391ecf4SShengjiu Wang * r0(event_mask[1]) and r1(event_mask[0]). 11048391ecf4SShengjiu Wang */ 11058391ecf4SShengjiu Wang if (lwml > hwml) { 11068391ecf4SShengjiu Wang sdmac->watermark_level &= ~(SDMA_WATERMARK_LEVEL_LWML | 11078391ecf4SShengjiu Wang SDMA_WATERMARK_LEVEL_HWML); 11088391ecf4SShengjiu Wang sdmac->watermark_level |= hwml; 11098391ecf4SShengjiu Wang sdmac->watermark_level |= lwml << 16; 11108391ecf4SShengjiu Wang swap(sdmac->event_mask[0], sdmac->event_mask[1]); 11118391ecf4SShengjiu Wang } 11128391ecf4SShengjiu Wang 11138391ecf4SShengjiu Wang if (sdmac->per_address2 >= sdma->spba_start_addr && 11148391ecf4SShengjiu Wang sdmac->per_address2 <= sdma->spba_end_addr) 11158391ecf4SShengjiu Wang sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_SP; 11168391ecf4SShengjiu Wang 11178391ecf4SShengjiu Wang if (sdmac->per_address >= sdma->spba_start_addr && 11188391ecf4SShengjiu Wang sdmac->per_address <= sdma->spba_end_addr) 11198391ecf4SShengjiu Wang sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_DP; 11208391ecf4SShengjiu Wang 11218391ecf4SShengjiu Wang sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_CONT; 11228391ecf4SShengjiu Wang } 11238391ecf4SShengjiu Wang 11247b350ab0SMaxime Ripard static int sdma_config_channel(struct dma_chan *chan) 11251ec1e82fSSascha Hauer { 11267b350ab0SMaxime Ripard struct sdma_channel *sdmac = to_sdma_chan(chan); 11271ec1e82fSSascha Hauer int ret; 11281ec1e82fSSascha Hauer 11297b350ab0SMaxime Ripard sdma_disable_channel(chan); 11301ec1e82fSSascha Hauer 11310bbc1413SRichard Zhao sdmac->event_mask[0] = 0; 11320bbc1413SRichard Zhao sdmac->event_mask[1] = 0; 11331ec1e82fSSascha Hauer sdmac->shp_addr = 0; 11341ec1e82fSSascha Hauer sdmac->per_addr = 0; 11351ec1e82fSSascha Hauer 11361ec1e82fSSascha Hauer switch (sdmac->peripheral_type) { 11371ec1e82fSSascha Hauer case IMX_DMATYPE_DSP: 11381ec1e82fSSascha Hauer sdma_config_ownership(sdmac, false, true, true); 11391ec1e82fSSascha Hauer break; 11401ec1e82fSSascha Hauer case IMX_DMATYPE_MEMORY: 11411ec1e82fSSascha Hauer sdma_config_ownership(sdmac, false, true, false); 11421ec1e82fSSascha Hauer break; 11431ec1e82fSSascha Hauer default: 11441ec1e82fSSascha Hauer sdma_config_ownership(sdmac, true, true, false); 11451ec1e82fSSascha Hauer break; 11461ec1e82fSSascha Hauer } 11471ec1e82fSSascha Hauer 11481ec1e82fSSascha Hauer sdma_get_pc(sdmac, sdmac->peripheral_type); 11491ec1e82fSSascha Hauer 11501ec1e82fSSascha Hauer if ((sdmac->peripheral_type != IMX_DMATYPE_MEMORY) && 11511ec1e82fSSascha Hauer (sdmac->peripheral_type != IMX_DMATYPE_DSP)) { 11521ec1e82fSSascha Hauer /* Handle multiple event channels differently */ 11531ec1e82fSSascha Hauer if (sdmac->event_id1) { 11548391ecf4SShengjiu Wang if (sdmac->peripheral_type == IMX_DMATYPE_ASRC_SP || 11558391ecf4SShengjiu Wang sdmac->peripheral_type == IMX_DMATYPE_ASRC) 11568391ecf4SShengjiu Wang sdma_set_watermarklevel_for_p2p(sdmac); 11578391ecf4SShengjiu Wang } else 11580bbc1413SRichard Zhao __set_bit(sdmac->event_id0, sdmac->event_mask); 11598391ecf4SShengjiu Wang 11601ec1e82fSSascha Hauer /* Address */ 11611ec1e82fSSascha Hauer sdmac->shp_addr = sdmac->per_address; 11628391ecf4SShengjiu Wang sdmac->per_addr = sdmac->per_address2; 11631ec1e82fSSascha Hauer } else { 11641ec1e82fSSascha Hauer sdmac->watermark_level = 0; /* FIXME: M3_BASE_ADDRESS */ 11651ec1e82fSSascha Hauer } 11661ec1e82fSSascha Hauer 11671ec1e82fSSascha Hauer ret = sdma_load_context(sdmac); 11681ec1e82fSSascha Hauer 11691ec1e82fSSascha Hauer return ret; 11701ec1e82fSSascha Hauer } 11711ec1e82fSSascha Hauer 11721ec1e82fSSascha Hauer static int sdma_set_channel_priority(struct sdma_channel *sdmac, 11731ec1e82fSSascha Hauer unsigned int priority) 11741ec1e82fSSascha Hauer { 11751ec1e82fSSascha Hauer struct sdma_engine *sdma = sdmac->sdma; 11761ec1e82fSSascha Hauer int channel = sdmac->channel; 11771ec1e82fSSascha Hauer 11781ec1e82fSSascha Hauer if (priority < MXC_SDMA_MIN_PRIORITY 11791ec1e82fSSascha Hauer || priority > MXC_SDMA_MAX_PRIORITY) { 11801ec1e82fSSascha Hauer return -EINVAL; 11811ec1e82fSSascha Hauer } 11821ec1e82fSSascha Hauer 1183c4b56857SRichard Zhao writel_relaxed(priority, sdma->regs + SDMA_CHNPRI_0 + 4 * channel); 11841ec1e82fSSascha Hauer 11851ec1e82fSSascha Hauer return 0; 11861ec1e82fSSascha Hauer } 11871ec1e82fSSascha Hauer 118857b772b8SRobin Gong static int sdma_request_channel0(struct sdma_engine *sdma) 11891ec1e82fSSascha Hauer { 11901ec1e82fSSascha Hauer int ret = -EBUSY; 11911ec1e82fSSascha Hauer 119257b772b8SRobin Gong sdma->bd0 = dma_zalloc_coherent(NULL, PAGE_SIZE, &sdma->bd0_phys, 119357b772b8SRobin Gong GFP_NOWAIT); 119457b772b8SRobin Gong if (!sdma->bd0) { 11951ec1e82fSSascha Hauer ret = -ENOMEM; 11961ec1e82fSSascha Hauer goto out; 11971ec1e82fSSascha Hauer } 11981ec1e82fSSascha Hauer 119957b772b8SRobin Gong sdma->channel_control[0].base_bd_ptr = sdma->bd0_phys; 120057b772b8SRobin Gong sdma->channel_control[0].current_bd_ptr = sdma->bd0_phys; 12011ec1e82fSSascha Hauer 120257b772b8SRobin Gong sdma_set_channel_priority(&sdma->channel[0], MXC_SDMA_DEFAULT_PRIORITY); 12031ec1e82fSSascha Hauer return 0; 12041ec1e82fSSascha Hauer out: 12051ec1e82fSSascha Hauer 12061ec1e82fSSascha Hauer return ret; 12071ec1e82fSSascha Hauer } 12081ec1e82fSSascha Hauer 120957b772b8SRobin Gong 121057b772b8SRobin Gong static int sdma_alloc_bd(struct sdma_desc *desc) 12111ec1e82fSSascha Hauer { 1212ebb853b1SLucas Stach u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor); 121357b772b8SRobin Gong int ret = 0; 12141ec1e82fSSascha Hauer 1215ebb853b1SLucas Stach desc->bd = dma_zalloc_coherent(NULL, bd_size, &desc->bd_phys, 121664068853SLucas Stach GFP_NOWAIT); 121757b772b8SRobin Gong if (!desc->bd) { 121857b772b8SRobin Gong ret = -ENOMEM; 121957b772b8SRobin Gong goto out; 122057b772b8SRobin Gong } 122157b772b8SRobin Gong out: 122257b772b8SRobin Gong return ret; 122357b772b8SRobin Gong } 12241ec1e82fSSascha Hauer 122557b772b8SRobin Gong static void sdma_free_bd(struct sdma_desc *desc) 122657b772b8SRobin Gong { 1227ebb853b1SLucas Stach u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor); 1228ebb853b1SLucas Stach 1229ebb853b1SLucas Stach dma_free_coherent(NULL, bd_size, desc->bd, desc->bd_phys); 123057b772b8SRobin Gong } 12311ec1e82fSSascha Hauer 123257b772b8SRobin Gong static void sdma_desc_free(struct virt_dma_desc *vd) 123357b772b8SRobin Gong { 123457b772b8SRobin Gong struct sdma_desc *desc = container_of(vd, struct sdma_desc, vd); 123557b772b8SRobin Gong 123657b772b8SRobin Gong sdma_free_bd(desc); 123757b772b8SRobin Gong kfree(desc); 12381ec1e82fSSascha Hauer } 12391ec1e82fSSascha Hauer 12401ec1e82fSSascha Hauer static int sdma_alloc_chan_resources(struct dma_chan *chan) 12411ec1e82fSSascha Hauer { 12421ec1e82fSSascha Hauer struct sdma_channel *sdmac = to_sdma_chan(chan); 12431ec1e82fSSascha Hauer struct imx_dma_data *data = chan->private; 12440f06c027SRobin Gong struct imx_dma_data mem_data; 12451ec1e82fSSascha Hauer int prio, ret; 12461ec1e82fSSascha Hauer 12470f06c027SRobin Gong /* 12480f06c027SRobin Gong * MEMCPY may never setup chan->private by filter function such as 12490f06c027SRobin Gong * dmatest, thus create 'struct imx_dma_data mem_data' for this case. 12500f06c027SRobin Gong * Please note in any other slave case, you have to setup chan->private 12510f06c027SRobin Gong * with 'struct imx_dma_data' in your own filter function if you want to 12520f06c027SRobin Gong * request dma channel by dma_request_channel() rather than 12530f06c027SRobin Gong * dma_request_slave_channel(). Othwise, 'MEMCPY in case?' will appear 12540f06c027SRobin Gong * to warn you to correct your filter function. 12550f06c027SRobin Gong */ 12560f06c027SRobin Gong if (!data) { 12570f06c027SRobin Gong dev_dbg(sdmac->sdma->dev, "MEMCPY in case?\n"); 12580f06c027SRobin Gong mem_data.priority = 2; 12590f06c027SRobin Gong mem_data.peripheral_type = IMX_DMATYPE_MEMORY; 12600f06c027SRobin Gong mem_data.dma_request = 0; 12610f06c027SRobin Gong mem_data.dma_request2 = 0; 12620f06c027SRobin Gong data = &mem_data; 12630f06c027SRobin Gong 12640f06c027SRobin Gong sdma_get_pc(sdmac, IMX_DMATYPE_MEMORY); 12650f06c027SRobin Gong } 12661ec1e82fSSascha Hauer 12671ec1e82fSSascha Hauer switch (data->priority) { 12681ec1e82fSSascha Hauer case DMA_PRIO_HIGH: 12691ec1e82fSSascha Hauer prio = 3; 12701ec1e82fSSascha Hauer break; 12711ec1e82fSSascha Hauer case DMA_PRIO_MEDIUM: 12721ec1e82fSSascha Hauer prio = 2; 12731ec1e82fSSascha Hauer break; 12741ec1e82fSSascha Hauer case DMA_PRIO_LOW: 12751ec1e82fSSascha Hauer default: 12761ec1e82fSSascha Hauer prio = 1; 12771ec1e82fSSascha Hauer break; 12781ec1e82fSSascha Hauer } 12791ec1e82fSSascha Hauer 12801ec1e82fSSascha Hauer sdmac->peripheral_type = data->peripheral_type; 12811ec1e82fSSascha Hauer sdmac->event_id0 = data->dma_request; 12828391ecf4SShengjiu Wang sdmac->event_id1 = data->dma_request2; 1283c2c744d3SRichard Zhao 1284b93edcddSFabio Estevam ret = clk_enable(sdmac->sdma->clk_ipg); 1285b93edcddSFabio Estevam if (ret) 1286b93edcddSFabio Estevam return ret; 1287b93edcddSFabio Estevam ret = clk_enable(sdmac->sdma->clk_ahb); 1288b93edcddSFabio Estevam if (ret) 1289b93edcddSFabio Estevam goto disable_clk_ipg; 1290c2c744d3SRichard Zhao 12913bb5e7caSRichard Zhao ret = sdma_set_channel_priority(sdmac, prio); 12921ec1e82fSSascha Hauer if (ret) 1293b93edcddSFabio Estevam goto disable_clk_ahb; 12941ec1e82fSSascha Hauer 12951ec1e82fSSascha Hauer return 0; 1296b93edcddSFabio Estevam 1297b93edcddSFabio Estevam disable_clk_ahb: 1298b93edcddSFabio Estevam clk_disable(sdmac->sdma->clk_ahb); 1299b93edcddSFabio Estevam disable_clk_ipg: 1300b93edcddSFabio Estevam clk_disable(sdmac->sdma->clk_ipg); 1301b93edcddSFabio Estevam return ret; 13021ec1e82fSSascha Hauer } 13031ec1e82fSSascha Hauer 13041ec1e82fSSascha Hauer static void sdma_free_chan_resources(struct dma_chan *chan) 13051ec1e82fSSascha Hauer { 13061ec1e82fSSascha Hauer struct sdma_channel *sdmac = to_sdma_chan(chan); 13071ec1e82fSSascha Hauer struct sdma_engine *sdma = sdmac->sdma; 13081ec1e82fSSascha Hauer 1309b8603d2aSLucas Stach sdma_disable_channel_async(chan); 1310b8603d2aSLucas Stach 1311b8603d2aSLucas Stach sdma_channel_synchronize(chan); 13121ec1e82fSSascha Hauer 13131ec1e82fSSascha Hauer if (sdmac->event_id0) 13141ec1e82fSSascha Hauer sdma_event_disable(sdmac, sdmac->event_id0); 13151ec1e82fSSascha Hauer if (sdmac->event_id1) 13161ec1e82fSSascha Hauer sdma_event_disable(sdmac, sdmac->event_id1); 13171ec1e82fSSascha Hauer 13181ec1e82fSSascha Hauer sdmac->event_id0 = 0; 13191ec1e82fSSascha Hauer sdmac->event_id1 = 0; 13201ec1e82fSSascha Hauer 13211ec1e82fSSascha Hauer sdma_set_channel_priority(sdmac, 0); 13221ec1e82fSSascha Hauer 13237560e3f3SSascha Hauer clk_disable(sdma->clk_ipg); 13247560e3f3SSascha Hauer clk_disable(sdma->clk_ahb); 13251ec1e82fSSascha Hauer } 13261ec1e82fSSascha Hauer 132721420841SRobin Gong static struct sdma_desc *sdma_transfer_init(struct sdma_channel *sdmac, 132821420841SRobin Gong enum dma_transfer_direction direction, u32 bds) 132921420841SRobin Gong { 133021420841SRobin Gong struct sdma_desc *desc; 133121420841SRobin Gong 133221420841SRobin Gong desc = kzalloc((sizeof(*desc)), GFP_NOWAIT); 133321420841SRobin Gong if (!desc) 133421420841SRobin Gong goto err_out; 133521420841SRobin Gong 133621420841SRobin Gong sdmac->status = DMA_IN_PROGRESS; 133721420841SRobin Gong sdmac->direction = direction; 133821420841SRobin Gong sdmac->flags = 0; 133921420841SRobin Gong 134021420841SRobin Gong desc->chn_count = 0; 134121420841SRobin Gong desc->chn_real_count = 0; 134221420841SRobin Gong desc->buf_tail = 0; 134321420841SRobin Gong desc->buf_ptail = 0; 134421420841SRobin Gong desc->sdmac = sdmac; 134521420841SRobin Gong desc->num_bd = bds; 134621420841SRobin Gong 134721420841SRobin Gong if (sdma_alloc_bd(desc)) 134821420841SRobin Gong goto err_desc_out; 134921420841SRobin Gong 13500f06c027SRobin Gong /* No slave_config called in MEMCPY case, so do here */ 13510f06c027SRobin Gong if (direction == DMA_MEM_TO_MEM) 13520f06c027SRobin Gong sdma_config_ownership(sdmac, false, true, false); 13530f06c027SRobin Gong 135421420841SRobin Gong if (sdma_load_context(sdmac)) 135521420841SRobin Gong goto err_desc_out; 135621420841SRobin Gong 135721420841SRobin Gong return desc; 135821420841SRobin Gong 135921420841SRobin Gong err_desc_out: 136021420841SRobin Gong kfree(desc); 136121420841SRobin Gong err_out: 136221420841SRobin Gong return NULL; 136321420841SRobin Gong } 136421420841SRobin Gong 13650f06c027SRobin Gong static struct dma_async_tx_descriptor *sdma_prep_memcpy( 13660f06c027SRobin Gong struct dma_chan *chan, dma_addr_t dma_dst, 13670f06c027SRobin Gong dma_addr_t dma_src, size_t len, unsigned long flags) 13680f06c027SRobin Gong { 13690f06c027SRobin Gong struct sdma_channel *sdmac = to_sdma_chan(chan); 13700f06c027SRobin Gong struct sdma_engine *sdma = sdmac->sdma; 13710f06c027SRobin Gong int channel = sdmac->channel; 13720f06c027SRobin Gong size_t count; 13730f06c027SRobin Gong int i = 0, param; 13740f06c027SRobin Gong struct sdma_buffer_descriptor *bd; 13750f06c027SRobin Gong struct sdma_desc *desc; 13760f06c027SRobin Gong 13770f06c027SRobin Gong if (!chan || !len) 13780f06c027SRobin Gong return NULL; 13790f06c027SRobin Gong 13800f06c027SRobin Gong dev_dbg(sdma->dev, "memcpy: %pad->%pad, len=%zu, channel=%d.\n", 13810f06c027SRobin Gong &dma_src, &dma_dst, len, channel); 13820f06c027SRobin Gong 13830f06c027SRobin Gong desc = sdma_transfer_init(sdmac, DMA_MEM_TO_MEM, 13840f06c027SRobin Gong len / SDMA_BD_MAX_CNT + 1); 13850f06c027SRobin Gong if (!desc) 13860f06c027SRobin Gong return NULL; 13870f06c027SRobin Gong 13880f06c027SRobin Gong do { 13890f06c027SRobin Gong count = min_t(size_t, len, SDMA_BD_MAX_CNT); 13900f06c027SRobin Gong bd = &desc->bd[i]; 13910f06c027SRobin Gong bd->buffer_addr = dma_src; 13920f06c027SRobin Gong bd->ext_buffer_addr = dma_dst; 13930f06c027SRobin Gong bd->mode.count = count; 13940f06c027SRobin Gong desc->chn_count += count; 13950f06c027SRobin Gong bd->mode.command = 0; 13960f06c027SRobin Gong 13970f06c027SRobin Gong dma_src += count; 13980f06c027SRobin Gong dma_dst += count; 13990f06c027SRobin Gong len -= count; 14000f06c027SRobin Gong i++; 14010f06c027SRobin Gong 14020f06c027SRobin Gong param = BD_DONE | BD_EXTD | BD_CONT; 14030f06c027SRobin Gong /* last bd */ 14040f06c027SRobin Gong if (!len) { 14050f06c027SRobin Gong param |= BD_INTR; 14060f06c027SRobin Gong param |= BD_LAST; 14070f06c027SRobin Gong param &= ~BD_CONT; 14080f06c027SRobin Gong } 14090f06c027SRobin Gong 14100f06c027SRobin Gong dev_dbg(sdma->dev, "entry %d: count: %zd dma: 0x%x %s%s\n", 14110f06c027SRobin Gong i, count, bd->buffer_addr, 14120f06c027SRobin Gong param & BD_WRAP ? "wrap" : "", 14130f06c027SRobin Gong param & BD_INTR ? " intr" : ""); 14140f06c027SRobin Gong 14150f06c027SRobin Gong bd->mode.status = param; 14160f06c027SRobin Gong } while (len); 14170f06c027SRobin Gong 14180f06c027SRobin Gong return vchan_tx_prep(&sdmac->vc, &desc->vd, flags); 14190f06c027SRobin Gong } 14200f06c027SRobin Gong 14211ec1e82fSSascha Hauer static struct dma_async_tx_descriptor *sdma_prep_slave_sg( 14221ec1e82fSSascha Hauer struct dma_chan *chan, struct scatterlist *sgl, 1423db8196dfSVinod Koul unsigned int sg_len, enum dma_transfer_direction direction, 1424185ecb5fSAlexandre Bounine unsigned long flags, void *context) 14251ec1e82fSSascha Hauer { 14261ec1e82fSSascha Hauer struct sdma_channel *sdmac = to_sdma_chan(chan); 14271ec1e82fSSascha Hauer struct sdma_engine *sdma = sdmac->sdma; 1428ad78b000SVinod Koul int i, count; 142923889c63SSascha Hauer int channel = sdmac->channel; 14301ec1e82fSSascha Hauer struct scatterlist *sg; 143157b772b8SRobin Gong struct sdma_desc *desc; 14321ec1e82fSSascha Hauer 1433107d0644SVinod Koul sdma_config_write(chan, &sdmac->slave_config, direction); 1434107d0644SVinod Koul 143521420841SRobin Gong desc = sdma_transfer_init(sdmac, direction, sg_len); 143657b772b8SRobin Gong if (!desc) 143757b772b8SRobin Gong goto err_out; 143857b772b8SRobin Gong 14391ec1e82fSSascha Hauer dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n", 14401ec1e82fSSascha Hauer sg_len, channel); 14411ec1e82fSSascha Hauer 14421ec1e82fSSascha Hauer for_each_sg(sgl, sg, sg_len, i) { 144376c33d27SSascha Hauer struct sdma_buffer_descriptor *bd = &desc->bd[i]; 14441ec1e82fSSascha Hauer int param; 14451ec1e82fSSascha Hauer 1446d2f5c276SAnatolij Gustschin bd->buffer_addr = sg->dma_address; 14471ec1e82fSSascha Hauer 1448fdaf9c4bSLars-Peter Clausen count = sg_dma_len(sg); 14491ec1e82fSSascha Hauer 14504a6b2e8aSRobin Gong if (count > SDMA_BD_MAX_CNT) { 14511ec1e82fSSascha Hauer dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n", 14524a6b2e8aSRobin Gong channel, count, SDMA_BD_MAX_CNT); 145357b772b8SRobin Gong goto err_bd_out; 14541ec1e82fSSascha Hauer } 14551ec1e82fSSascha Hauer 14561ec1e82fSSascha Hauer bd->mode.count = count; 145776c33d27SSascha Hauer desc->chn_count += count; 14581ec1e82fSSascha Hauer 1459ad78b000SVinod Koul if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) 146057b772b8SRobin Gong goto err_bd_out; 14611fa81c27SSascha Hauer 14621fa81c27SSascha Hauer switch (sdmac->word_size) { 14631fa81c27SSascha Hauer case DMA_SLAVE_BUSWIDTH_4_BYTES: 14641ec1e82fSSascha Hauer bd->mode.command = 0; 14651fa81c27SSascha Hauer if (count & 3 || sg->dma_address & 3) 146657b772b8SRobin Gong goto err_bd_out; 14671fa81c27SSascha Hauer break; 14681fa81c27SSascha Hauer case DMA_SLAVE_BUSWIDTH_2_BYTES: 14691fa81c27SSascha Hauer bd->mode.command = 2; 14701fa81c27SSascha Hauer if (count & 1 || sg->dma_address & 1) 147157b772b8SRobin Gong goto err_bd_out; 14721fa81c27SSascha Hauer break; 14731fa81c27SSascha Hauer case DMA_SLAVE_BUSWIDTH_1_BYTE: 14741fa81c27SSascha Hauer bd->mode.command = 1; 14751fa81c27SSascha Hauer break; 14761fa81c27SSascha Hauer default: 147757b772b8SRobin Gong goto err_bd_out; 14781fa81c27SSascha Hauer } 14791ec1e82fSSascha Hauer 14801ec1e82fSSascha Hauer param = BD_DONE | BD_EXTD | BD_CONT; 14811ec1e82fSSascha Hauer 1482341b9419SShawn Guo if (i + 1 == sg_len) { 14831ec1e82fSSascha Hauer param |= BD_INTR; 1484341b9419SShawn Guo param |= BD_LAST; 1485341b9419SShawn Guo param &= ~BD_CONT; 14861ec1e82fSSascha Hauer } 14871ec1e82fSSascha Hauer 1488c3cc74b2SOlof Johansson dev_dbg(sdma->dev, "entry %d: count: %d dma: %#llx %s%s\n", 1489c3cc74b2SOlof Johansson i, count, (u64)sg->dma_address, 14901ec1e82fSSascha Hauer param & BD_WRAP ? "wrap" : "", 14911ec1e82fSSascha Hauer param & BD_INTR ? " intr" : ""); 14921ec1e82fSSascha Hauer 14931ec1e82fSSascha Hauer bd->mode.status = param; 14941ec1e82fSSascha Hauer } 14951ec1e82fSSascha Hauer 149657b772b8SRobin Gong return vchan_tx_prep(&sdmac->vc, &desc->vd, flags); 149757b772b8SRobin Gong err_bd_out: 149857b772b8SRobin Gong sdma_free_bd(desc); 149957b772b8SRobin Gong kfree(desc); 15001ec1e82fSSascha Hauer err_out: 15014b2ce9ddSShawn Guo sdmac->status = DMA_ERROR; 15021ec1e82fSSascha Hauer return NULL; 15031ec1e82fSSascha Hauer } 15041ec1e82fSSascha Hauer 15051ec1e82fSSascha Hauer static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic( 15061ec1e82fSSascha Hauer struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, 1507185ecb5fSAlexandre Bounine size_t period_len, enum dma_transfer_direction direction, 150831c1e5a1SLaurent Pinchart unsigned long flags) 15091ec1e82fSSascha Hauer { 15101ec1e82fSSascha Hauer struct sdma_channel *sdmac = to_sdma_chan(chan); 15111ec1e82fSSascha Hauer struct sdma_engine *sdma = sdmac->sdma; 15121ec1e82fSSascha Hauer int num_periods = buf_len / period_len; 151323889c63SSascha Hauer int channel = sdmac->channel; 151421420841SRobin Gong int i = 0, buf = 0; 151557b772b8SRobin Gong struct sdma_desc *desc; 15161ec1e82fSSascha Hauer 15171ec1e82fSSascha Hauer dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel); 15181ec1e82fSSascha Hauer 1519107d0644SVinod Koul sdma_config_write(chan, &sdmac->slave_config, direction); 1520107d0644SVinod Koul 152121420841SRobin Gong desc = sdma_transfer_init(sdmac, direction, num_periods); 152257b772b8SRobin Gong if (!desc) 152357b772b8SRobin Gong goto err_out; 152457b772b8SRobin Gong 152576c33d27SSascha Hauer desc->period_len = period_len; 15268e2e27c7SRichard Zhao 15271ec1e82fSSascha Hauer sdmac->flags |= IMX_DMA_SG_LOOP; 15281ec1e82fSSascha Hauer 15294a6b2e8aSRobin Gong if (period_len > SDMA_BD_MAX_CNT) { 1530ba6ab3b3SArvind Yadav dev_err(sdma->dev, "SDMA channel %d: maximum period size exceeded: %zu > %d\n", 15314a6b2e8aSRobin Gong channel, period_len, SDMA_BD_MAX_CNT); 153257b772b8SRobin Gong goto err_bd_out; 15331ec1e82fSSascha Hauer } 15341ec1e82fSSascha Hauer 15351ec1e82fSSascha Hauer while (buf < buf_len) { 153676c33d27SSascha Hauer struct sdma_buffer_descriptor *bd = &desc->bd[i]; 15371ec1e82fSSascha Hauer int param; 15381ec1e82fSSascha Hauer 15391ec1e82fSSascha Hauer bd->buffer_addr = dma_addr; 15401ec1e82fSSascha Hauer 15411ec1e82fSSascha Hauer bd->mode.count = period_len; 15421ec1e82fSSascha Hauer 15431ec1e82fSSascha Hauer if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) 154457b772b8SRobin Gong goto err_bd_out; 15451ec1e82fSSascha Hauer if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES) 15461ec1e82fSSascha Hauer bd->mode.command = 0; 15471ec1e82fSSascha Hauer else 15481ec1e82fSSascha Hauer bd->mode.command = sdmac->word_size; 15491ec1e82fSSascha Hauer 15501ec1e82fSSascha Hauer param = BD_DONE | BD_EXTD | BD_CONT | BD_INTR; 15511ec1e82fSSascha Hauer if (i + 1 == num_periods) 15521ec1e82fSSascha Hauer param |= BD_WRAP; 15531ec1e82fSSascha Hauer 1554ba6ab3b3SArvind Yadav dev_dbg(sdma->dev, "entry %d: count: %zu dma: %#llx %s%s\n", 1555c3cc74b2SOlof Johansson i, period_len, (u64)dma_addr, 15561ec1e82fSSascha Hauer param & BD_WRAP ? "wrap" : "", 15571ec1e82fSSascha Hauer param & BD_INTR ? " intr" : ""); 15581ec1e82fSSascha Hauer 15591ec1e82fSSascha Hauer bd->mode.status = param; 15601ec1e82fSSascha Hauer 15611ec1e82fSSascha Hauer dma_addr += period_len; 15621ec1e82fSSascha Hauer buf += period_len; 15631ec1e82fSSascha Hauer 15641ec1e82fSSascha Hauer i++; 15651ec1e82fSSascha Hauer } 15661ec1e82fSSascha Hauer 156757b772b8SRobin Gong return vchan_tx_prep(&sdmac->vc, &desc->vd, flags); 156857b772b8SRobin Gong err_bd_out: 156957b772b8SRobin Gong sdma_free_bd(desc); 157057b772b8SRobin Gong kfree(desc); 15711ec1e82fSSascha Hauer err_out: 15721ec1e82fSSascha Hauer sdmac->status = DMA_ERROR; 15731ec1e82fSSascha Hauer return NULL; 15741ec1e82fSSascha Hauer } 15751ec1e82fSSascha Hauer 1576107d0644SVinod Koul static int sdma_config_write(struct dma_chan *chan, 1577107d0644SVinod Koul struct dma_slave_config *dmaengine_cfg, 1578107d0644SVinod Koul enum dma_transfer_direction direction) 15791ec1e82fSSascha Hauer { 15801ec1e82fSSascha Hauer struct sdma_channel *sdmac = to_sdma_chan(chan); 15811ec1e82fSSascha Hauer 1582107d0644SVinod Koul if (direction == DMA_DEV_TO_MEM) { 15831ec1e82fSSascha Hauer sdmac->per_address = dmaengine_cfg->src_addr; 158494ac27a5SPhilippe Rétornaz sdmac->watermark_level = dmaengine_cfg->src_maxburst * 158594ac27a5SPhilippe Rétornaz dmaengine_cfg->src_addr_width; 15861ec1e82fSSascha Hauer sdmac->word_size = dmaengine_cfg->src_addr_width; 1587107d0644SVinod Koul } else if (direction == DMA_DEV_TO_DEV) { 15888391ecf4SShengjiu Wang sdmac->per_address2 = dmaengine_cfg->src_addr; 15898391ecf4SShengjiu Wang sdmac->per_address = dmaengine_cfg->dst_addr; 15908391ecf4SShengjiu Wang sdmac->watermark_level = dmaengine_cfg->src_maxburst & 15918391ecf4SShengjiu Wang SDMA_WATERMARK_LEVEL_LWML; 15928391ecf4SShengjiu Wang sdmac->watermark_level |= (dmaengine_cfg->dst_maxburst << 16) & 15938391ecf4SShengjiu Wang SDMA_WATERMARK_LEVEL_HWML; 15948391ecf4SShengjiu Wang sdmac->word_size = dmaengine_cfg->dst_addr_width; 15951ec1e82fSSascha Hauer } else { 15961ec1e82fSSascha Hauer sdmac->per_address = dmaengine_cfg->dst_addr; 159794ac27a5SPhilippe Rétornaz sdmac->watermark_level = dmaengine_cfg->dst_maxburst * 159894ac27a5SPhilippe Rétornaz dmaengine_cfg->dst_addr_width; 15991ec1e82fSSascha Hauer sdmac->word_size = dmaengine_cfg->dst_addr_width; 16001ec1e82fSSascha Hauer } 1601107d0644SVinod Koul sdmac->direction = direction; 16027b350ab0SMaxime Ripard return sdma_config_channel(chan); 16031ec1e82fSSascha Hauer } 16041ec1e82fSSascha Hauer 1605107d0644SVinod Koul static int sdma_config(struct dma_chan *chan, 1606107d0644SVinod Koul struct dma_slave_config *dmaengine_cfg) 1607107d0644SVinod Koul { 1608107d0644SVinod Koul struct sdma_channel *sdmac = to_sdma_chan(chan); 1609107d0644SVinod Koul 1610107d0644SVinod Koul memcpy(&sdmac->slave_config, dmaengine_cfg, sizeof(*dmaengine_cfg)); 1611107d0644SVinod Koul 1612107d0644SVinod Koul /* Set ENBLn earlier to make sure dma request triggered after that */ 1613107d0644SVinod Koul if (sdmac->event_id0) { 1614107d0644SVinod Koul if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events) 1615107d0644SVinod Koul return -EINVAL; 1616107d0644SVinod Koul sdma_event_enable(sdmac, sdmac->event_id0); 1617107d0644SVinod Koul } 1618107d0644SVinod Koul 1619107d0644SVinod Koul if (sdmac->event_id1) { 1620107d0644SVinod Koul if (sdmac->event_id1 >= sdmac->sdma->drvdata->num_events) 1621107d0644SVinod Koul return -EINVAL; 1622107d0644SVinod Koul sdma_event_enable(sdmac, sdmac->event_id1); 1623107d0644SVinod Koul } 1624107d0644SVinod Koul 1625107d0644SVinod Koul return 0; 1626107d0644SVinod Koul } 1627107d0644SVinod Koul 16281ec1e82fSSascha Hauer static enum dma_status sdma_tx_status(struct dma_chan *chan, 16291ec1e82fSSascha Hauer dma_cookie_t cookie, 16301ec1e82fSSascha Hauer struct dma_tx_state *txstate) 16311ec1e82fSSascha Hauer { 16321ec1e82fSSascha Hauer struct sdma_channel *sdmac = to_sdma_chan(chan); 163357b772b8SRobin Gong struct sdma_desc *desc; 1634d1a792f3SRussell King - ARM Linux u32 residue; 163557b772b8SRobin Gong struct virt_dma_desc *vd; 163657b772b8SRobin Gong enum dma_status ret; 163757b772b8SRobin Gong unsigned long flags; 1638d1a792f3SRussell King - ARM Linux 163957b772b8SRobin Gong ret = dma_cookie_status(chan, cookie, txstate); 164057b772b8SRobin Gong if (ret == DMA_COMPLETE || !txstate) 164157b772b8SRobin Gong return ret; 164257b772b8SRobin Gong 164357b772b8SRobin Gong spin_lock_irqsave(&sdmac->vc.lock, flags); 164457b772b8SRobin Gong vd = vchan_find_desc(&sdmac->vc, cookie); 164557b772b8SRobin Gong if (vd) { 164657b772b8SRobin Gong desc = to_sdma_desc(&vd->tx); 1647d1a792f3SRussell King - ARM Linux if (sdmac->flags & IMX_DMA_SG_LOOP) 164876c33d27SSascha Hauer residue = (desc->num_bd - desc->buf_ptail) * 164976c33d27SSascha Hauer desc->period_len - desc->chn_real_count; 1650d1a792f3SRussell King - ARM Linux else 165176c33d27SSascha Hauer residue = desc->chn_count - desc->chn_real_count; 165257b772b8SRobin Gong } else if (sdmac->desc && sdmac->desc->vd.tx.cookie == cookie) { 165357b772b8SRobin Gong residue = sdmac->desc->chn_count - sdmac->desc->chn_real_count; 165457b772b8SRobin Gong } else { 165557b772b8SRobin Gong residue = 0; 165657b772b8SRobin Gong } 165757b772b8SRobin Gong spin_unlock_irqrestore(&sdmac->vc.lock, flags); 16581ec1e82fSSascha Hauer 1659e8e3a790SAndy Shevchenko dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, 1660d1a792f3SRussell King - ARM Linux residue); 16611ec1e82fSSascha Hauer 16628a965911SShawn Guo return sdmac->status; 16631ec1e82fSSascha Hauer } 16641ec1e82fSSascha Hauer 16651ec1e82fSSascha Hauer static void sdma_issue_pending(struct dma_chan *chan) 16661ec1e82fSSascha Hauer { 16672b4f130eSSascha Hauer struct sdma_channel *sdmac = to_sdma_chan(chan); 166857b772b8SRobin Gong unsigned long flags; 16692b4f130eSSascha Hauer 167057b772b8SRobin Gong spin_lock_irqsave(&sdmac->vc.lock, flags); 167157b772b8SRobin Gong if (vchan_issue_pending(&sdmac->vc) && !sdmac->desc) 167257b772b8SRobin Gong sdma_start_desc(sdmac); 167357b772b8SRobin Gong spin_unlock_irqrestore(&sdmac->vc.lock, flags); 16741ec1e82fSSascha Hauer } 16751ec1e82fSSascha Hauer 16765b28aa31SSascha Hauer #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34 1677cd72b846SNicolin Chen #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2 38 1678a572460bSFabio Estevam #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3 41 1679b7d2648aSFabio Estevam #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4 42 16805b28aa31SSascha Hauer 16815b28aa31SSascha Hauer static void sdma_add_scripts(struct sdma_engine *sdma, 16825b28aa31SSascha Hauer const struct sdma_script_start_addrs *addr) 16835b28aa31SSascha Hauer { 16845b28aa31SSascha Hauer s32 *addr_arr = (u32 *)addr; 16855b28aa31SSascha Hauer s32 *saddr_arr = (u32 *)sdma->script_addrs; 16865b28aa31SSascha Hauer int i; 16875b28aa31SSascha Hauer 168870dabaedSNicolin Chen /* use the default firmware in ROM if missing external firmware */ 168970dabaedSNicolin Chen if (!sdma->script_number) 169070dabaedSNicolin Chen sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; 169170dabaedSNicolin Chen 1692cd72b846SNicolin Chen for (i = 0; i < sdma->script_number; i++) 16935b28aa31SSascha Hauer if (addr_arr[i] > 0) 16945b28aa31SSascha Hauer saddr_arr[i] = addr_arr[i]; 16955b28aa31SSascha Hauer } 16965b28aa31SSascha Hauer 16977b4b88e0SSascha Hauer static void sdma_load_firmware(const struct firmware *fw, void *context) 16985b28aa31SSascha Hauer { 16997b4b88e0SSascha Hauer struct sdma_engine *sdma = context; 17005b28aa31SSascha Hauer const struct sdma_firmware_header *header; 17015b28aa31SSascha Hauer const struct sdma_script_start_addrs *addr; 17025b28aa31SSascha Hauer unsigned short *ram_code; 17035b28aa31SSascha Hauer 17047b4b88e0SSascha Hauer if (!fw) { 17050f927a11SSascha Hauer dev_info(sdma->dev, "external firmware not found, using ROM firmware\n"); 17060f927a11SSascha Hauer /* In this case we just use the ROM firmware. */ 17077b4b88e0SSascha Hauer return; 17087b4b88e0SSascha Hauer } 17095b28aa31SSascha Hauer 17105b28aa31SSascha Hauer if (fw->size < sizeof(*header)) 17115b28aa31SSascha Hauer goto err_firmware; 17125b28aa31SSascha Hauer 17135b28aa31SSascha Hauer header = (struct sdma_firmware_header *)fw->data; 17145b28aa31SSascha Hauer 17155b28aa31SSascha Hauer if (header->magic != SDMA_FIRMWARE_MAGIC) 17165b28aa31SSascha Hauer goto err_firmware; 17175b28aa31SSascha Hauer if (header->ram_code_start + header->ram_code_size > fw->size) 17185b28aa31SSascha Hauer goto err_firmware; 1719cd72b846SNicolin Chen switch (header->version_major) { 1720cd72b846SNicolin Chen case 1: 1721cd72b846SNicolin Chen sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; 1722cd72b846SNicolin Chen break; 1723cd72b846SNicolin Chen case 2: 1724cd72b846SNicolin Chen sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2; 1725cd72b846SNicolin Chen break; 1726a572460bSFabio Estevam case 3: 1727a572460bSFabio Estevam sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3; 1728a572460bSFabio Estevam break; 1729b7d2648aSFabio Estevam case 4: 1730b7d2648aSFabio Estevam sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4; 1731b7d2648aSFabio Estevam break; 1732cd72b846SNicolin Chen default: 1733cd72b846SNicolin Chen dev_err(sdma->dev, "unknown firmware version\n"); 1734cd72b846SNicolin Chen goto err_firmware; 1735cd72b846SNicolin Chen } 17365b28aa31SSascha Hauer 17375b28aa31SSascha Hauer addr = (void *)header + header->script_addrs_start; 17385b28aa31SSascha Hauer ram_code = (void *)header + header->ram_code_start; 17395b28aa31SSascha Hauer 17407560e3f3SSascha Hauer clk_enable(sdma->clk_ipg); 17417560e3f3SSascha Hauer clk_enable(sdma->clk_ahb); 17425b28aa31SSascha Hauer /* download the RAM image for SDMA */ 17435b28aa31SSascha Hauer sdma_load_script(sdma, ram_code, 17445b28aa31SSascha Hauer header->ram_code_size, 17456866fd3bSSascha Hauer addr->ram_code_start_addr); 17467560e3f3SSascha Hauer clk_disable(sdma->clk_ipg); 17477560e3f3SSascha Hauer clk_disable(sdma->clk_ahb); 17485b28aa31SSascha Hauer 17495b28aa31SSascha Hauer sdma_add_scripts(sdma, addr); 17505b28aa31SSascha Hauer 17515b28aa31SSascha Hauer dev_info(sdma->dev, "loaded firmware %d.%d\n", 17525b28aa31SSascha Hauer header->version_major, 17535b28aa31SSascha Hauer header->version_minor); 17545b28aa31SSascha Hauer 17555b28aa31SSascha Hauer err_firmware: 17565b28aa31SSascha Hauer release_firmware(fw); 17577b4b88e0SSascha Hauer } 17587b4b88e0SSascha Hauer 1759d078cd1bSZidan Wang #define EVENT_REMAP_CELLS 3 1760d078cd1bSZidan Wang 176129f493daSJason Liu static int sdma_event_remap(struct sdma_engine *sdma) 1762d078cd1bSZidan Wang { 1763d078cd1bSZidan Wang struct device_node *np = sdma->dev->of_node; 1764d078cd1bSZidan Wang struct device_node *gpr_np = of_parse_phandle(np, "gpr", 0); 1765d078cd1bSZidan Wang struct property *event_remap; 1766d078cd1bSZidan Wang struct regmap *gpr; 1767d078cd1bSZidan Wang char propname[] = "fsl,sdma-event-remap"; 1768d078cd1bSZidan Wang u32 reg, val, shift, num_map, i; 1769d078cd1bSZidan Wang int ret = 0; 1770d078cd1bSZidan Wang 1771d078cd1bSZidan Wang if (IS_ERR(np) || IS_ERR(gpr_np)) 1772d078cd1bSZidan Wang goto out; 1773d078cd1bSZidan Wang 1774d078cd1bSZidan Wang event_remap = of_find_property(np, propname, NULL); 1775d078cd1bSZidan Wang num_map = event_remap ? (event_remap->length / sizeof(u32)) : 0; 1776d078cd1bSZidan Wang if (!num_map) { 1777ce078af7SFabio Estevam dev_dbg(sdma->dev, "no event needs to be remapped\n"); 1778d078cd1bSZidan Wang goto out; 1779d078cd1bSZidan Wang } else if (num_map % EVENT_REMAP_CELLS) { 1780d078cd1bSZidan Wang dev_err(sdma->dev, "the property %s must modulo %d\n", 1781d078cd1bSZidan Wang propname, EVENT_REMAP_CELLS); 1782d078cd1bSZidan Wang ret = -EINVAL; 1783d078cd1bSZidan Wang goto out; 1784d078cd1bSZidan Wang } 1785d078cd1bSZidan Wang 1786d078cd1bSZidan Wang gpr = syscon_node_to_regmap(gpr_np); 1787d078cd1bSZidan Wang if (IS_ERR(gpr)) { 1788d078cd1bSZidan Wang dev_err(sdma->dev, "failed to get gpr regmap\n"); 1789d078cd1bSZidan Wang ret = PTR_ERR(gpr); 1790d078cd1bSZidan Wang goto out; 1791d078cd1bSZidan Wang } 1792d078cd1bSZidan Wang 1793d078cd1bSZidan Wang for (i = 0; i < num_map; i += EVENT_REMAP_CELLS) { 1794d078cd1bSZidan Wang ret = of_property_read_u32_index(np, propname, i, ®); 1795d078cd1bSZidan Wang if (ret) { 1796d078cd1bSZidan Wang dev_err(sdma->dev, "failed to read property %s index %d\n", 1797d078cd1bSZidan Wang propname, i); 1798d078cd1bSZidan Wang goto out; 1799d078cd1bSZidan Wang } 1800d078cd1bSZidan Wang 1801d078cd1bSZidan Wang ret = of_property_read_u32_index(np, propname, i + 1, &shift); 1802d078cd1bSZidan Wang if (ret) { 1803d078cd1bSZidan Wang dev_err(sdma->dev, "failed to read property %s index %d\n", 1804d078cd1bSZidan Wang propname, i + 1); 1805d078cd1bSZidan Wang goto out; 1806d078cd1bSZidan Wang } 1807d078cd1bSZidan Wang 1808d078cd1bSZidan Wang ret = of_property_read_u32_index(np, propname, i + 2, &val); 1809d078cd1bSZidan Wang if (ret) { 1810d078cd1bSZidan Wang dev_err(sdma->dev, "failed to read property %s index %d\n", 1811d078cd1bSZidan Wang propname, i + 2); 1812d078cd1bSZidan Wang goto out; 1813d078cd1bSZidan Wang } 1814d078cd1bSZidan Wang 1815d078cd1bSZidan Wang regmap_update_bits(gpr, reg, BIT(shift), val << shift); 1816d078cd1bSZidan Wang } 1817d078cd1bSZidan Wang 1818d078cd1bSZidan Wang out: 1819d078cd1bSZidan Wang if (!IS_ERR(gpr_np)) 1820d078cd1bSZidan Wang of_node_put(gpr_np); 1821d078cd1bSZidan Wang 1822d078cd1bSZidan Wang return ret; 1823d078cd1bSZidan Wang } 1824d078cd1bSZidan Wang 1825fe6cf289SArnd Bergmann static int sdma_get_firmware(struct sdma_engine *sdma, 18267b4b88e0SSascha Hauer const char *fw_name) 18277b4b88e0SSascha Hauer { 18287b4b88e0SSascha Hauer int ret; 18297b4b88e0SSascha Hauer 18307b4b88e0SSascha Hauer ret = request_firmware_nowait(THIS_MODULE, 18317b4b88e0SSascha Hauer FW_ACTION_HOTPLUG, fw_name, sdma->dev, 18327b4b88e0SSascha Hauer GFP_KERNEL, sdma, sdma_load_firmware); 18335b28aa31SSascha Hauer 18345b28aa31SSascha Hauer return ret; 18355b28aa31SSascha Hauer } 18365b28aa31SSascha Hauer 183719bfc772SJingoo Han static int sdma_init(struct sdma_engine *sdma) 18381ec1e82fSSascha Hauer { 18391ec1e82fSSascha Hauer int i, ret; 18401ec1e82fSSascha Hauer dma_addr_t ccb_phys; 18411ec1e82fSSascha Hauer 1842b93edcddSFabio Estevam ret = clk_enable(sdma->clk_ipg); 1843b93edcddSFabio Estevam if (ret) 1844b93edcddSFabio Estevam return ret; 1845b93edcddSFabio Estevam ret = clk_enable(sdma->clk_ahb); 1846b93edcddSFabio Estevam if (ret) 1847b93edcddSFabio Estevam goto disable_clk_ipg; 18481ec1e82fSSascha Hauer 18491ec1e82fSSascha Hauer /* Be sure SDMA has not started yet */ 1850c4b56857SRichard Zhao writel_relaxed(0, sdma->regs + SDMA_H_C0PTR); 18511ec1e82fSSascha Hauer 18521ec1e82fSSascha Hauer sdma->channel_control = dma_alloc_coherent(NULL, 18531ec1e82fSSascha Hauer MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control) + 18541ec1e82fSSascha Hauer sizeof(struct sdma_context_data), 18551ec1e82fSSascha Hauer &ccb_phys, GFP_KERNEL); 18561ec1e82fSSascha Hauer 18571ec1e82fSSascha Hauer if (!sdma->channel_control) { 18581ec1e82fSSascha Hauer ret = -ENOMEM; 18591ec1e82fSSascha Hauer goto err_dma_alloc; 18601ec1e82fSSascha Hauer } 18611ec1e82fSSascha Hauer 18621ec1e82fSSascha Hauer sdma->context = (void *)sdma->channel_control + 18631ec1e82fSSascha Hauer MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control); 18641ec1e82fSSascha Hauer sdma->context_phys = ccb_phys + 18651ec1e82fSSascha Hauer MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control); 18661ec1e82fSSascha Hauer 18671ec1e82fSSascha Hauer /* Zero-out the CCB structures array just allocated */ 18681ec1e82fSSascha Hauer memset(sdma->channel_control, 0, 18691ec1e82fSSascha Hauer MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control)); 18701ec1e82fSSascha Hauer 18711ec1e82fSSascha Hauer /* disable all channels */ 187217bba72fSSascha Hauer for (i = 0; i < sdma->drvdata->num_events; i++) 1873c4b56857SRichard Zhao writel_relaxed(0, sdma->regs + chnenbl_ofs(sdma, i)); 18741ec1e82fSSascha Hauer 18751ec1e82fSSascha Hauer /* All channels have priority 0 */ 18761ec1e82fSSascha Hauer for (i = 0; i < MAX_DMA_CHANNELS; i++) 1877c4b56857SRichard Zhao writel_relaxed(0, sdma->regs + SDMA_CHNPRI_0 + i * 4); 18781ec1e82fSSascha Hauer 187957b772b8SRobin Gong ret = sdma_request_channel0(sdma); 18801ec1e82fSSascha Hauer if (ret) 18811ec1e82fSSascha Hauer goto err_dma_alloc; 18821ec1e82fSSascha Hauer 18831ec1e82fSSascha Hauer sdma_config_ownership(&sdma->channel[0], false, true, false); 18841ec1e82fSSascha Hauer 18851ec1e82fSSascha Hauer /* Set Command Channel (Channel Zero) */ 1886c4b56857SRichard Zhao writel_relaxed(0x4050, sdma->regs + SDMA_CHN0ADDR); 18871ec1e82fSSascha Hauer 18881ec1e82fSSascha Hauer /* Set bits of CONFIG register but with static context switching */ 18891ec1e82fSSascha Hauer /* FIXME: Check whether to set ACR bit depending on clock ratios */ 1890c4b56857SRichard Zhao writel_relaxed(0, sdma->regs + SDMA_H_CONFIG); 18911ec1e82fSSascha Hauer 1892c4b56857SRichard Zhao writel_relaxed(ccb_phys, sdma->regs + SDMA_H_C0PTR); 18931ec1e82fSSascha Hauer 18941ec1e82fSSascha Hauer /* Initializes channel's priorities */ 18951ec1e82fSSascha Hauer sdma_set_channel_priority(&sdma->channel[0], 7); 18961ec1e82fSSascha Hauer 18977560e3f3SSascha Hauer clk_disable(sdma->clk_ipg); 18987560e3f3SSascha Hauer clk_disable(sdma->clk_ahb); 18991ec1e82fSSascha Hauer 19001ec1e82fSSascha Hauer return 0; 19011ec1e82fSSascha Hauer 19021ec1e82fSSascha Hauer err_dma_alloc: 19037560e3f3SSascha Hauer clk_disable(sdma->clk_ahb); 1904b93edcddSFabio Estevam disable_clk_ipg: 1905b93edcddSFabio Estevam clk_disable(sdma->clk_ipg); 19061ec1e82fSSascha Hauer dev_err(sdma->dev, "initialisation failed with %d\n", ret); 19071ec1e82fSSascha Hauer return ret; 19081ec1e82fSSascha Hauer } 19091ec1e82fSSascha Hauer 19109479e17cSShawn Guo static bool sdma_filter_fn(struct dma_chan *chan, void *fn_param) 19119479e17cSShawn Guo { 19120b351865SNicolin Chen struct sdma_channel *sdmac = to_sdma_chan(chan); 19139479e17cSShawn Guo struct imx_dma_data *data = fn_param; 19149479e17cSShawn Guo 19159479e17cSShawn Guo if (!imx_dma_is_general_purpose(chan)) 19169479e17cSShawn Guo return false; 19179479e17cSShawn Guo 19180b351865SNicolin Chen sdmac->data = *data; 19190b351865SNicolin Chen chan->private = &sdmac->data; 19209479e17cSShawn Guo 19219479e17cSShawn Guo return true; 19229479e17cSShawn Guo } 19239479e17cSShawn Guo 19249479e17cSShawn Guo static struct dma_chan *sdma_xlate(struct of_phandle_args *dma_spec, 19259479e17cSShawn Guo struct of_dma *ofdma) 19269479e17cSShawn Guo { 19279479e17cSShawn Guo struct sdma_engine *sdma = ofdma->of_dma_data; 19289479e17cSShawn Guo dma_cap_mask_t mask = sdma->dma_device.cap_mask; 19299479e17cSShawn Guo struct imx_dma_data data; 19309479e17cSShawn Guo 19319479e17cSShawn Guo if (dma_spec->args_count != 3) 19329479e17cSShawn Guo return NULL; 19339479e17cSShawn Guo 19349479e17cSShawn Guo data.dma_request = dma_spec->args[0]; 19359479e17cSShawn Guo data.peripheral_type = dma_spec->args[1]; 19369479e17cSShawn Guo data.priority = dma_spec->args[2]; 19378391ecf4SShengjiu Wang /* 19388391ecf4SShengjiu Wang * init dma_request2 to zero, which is not used by the dts. 19398391ecf4SShengjiu Wang * For P2P, dma_request2 is init from dma_request_channel(), 19408391ecf4SShengjiu Wang * chan->private will point to the imx_dma_data, and in 19418391ecf4SShengjiu Wang * device_alloc_chan_resources(), imx_dma_data.dma_request2 will 19428391ecf4SShengjiu Wang * be set to sdmac->event_id1. 19438391ecf4SShengjiu Wang */ 19448391ecf4SShengjiu Wang data.dma_request2 = 0; 19459479e17cSShawn Guo 19469479e17cSShawn Guo return dma_request_channel(mask, sdma_filter_fn, &data); 19479479e17cSShawn Guo } 19489479e17cSShawn Guo 1949e34b731fSMark Brown static int sdma_probe(struct platform_device *pdev) 19501ec1e82fSSascha Hauer { 1951580975d7SShawn Guo const struct of_device_id *of_id = 1952580975d7SShawn Guo of_match_device(sdma_dt_ids, &pdev->dev); 1953580975d7SShawn Guo struct device_node *np = pdev->dev.of_node; 19548391ecf4SShengjiu Wang struct device_node *spba_bus; 1955580975d7SShawn Guo const char *fw_name; 19561ec1e82fSSascha Hauer int ret; 19571ec1e82fSSascha Hauer int irq; 19581ec1e82fSSascha Hauer struct resource *iores; 19598391ecf4SShengjiu Wang struct resource spba_res; 1960d4adcc01SJingoo Han struct sdma_platform_data *pdata = dev_get_platdata(&pdev->dev); 19611ec1e82fSSascha Hauer int i; 19621ec1e82fSSascha Hauer struct sdma_engine *sdma; 196336e2f21aSSascha Hauer s32 *saddr_arr; 196417bba72fSSascha Hauer const struct sdma_driver_data *drvdata = NULL; 196517bba72fSSascha Hauer 196617bba72fSSascha Hauer if (of_id) 196717bba72fSSascha Hauer drvdata = of_id->data; 196817bba72fSSascha Hauer else if (pdev->id_entry) 196917bba72fSSascha Hauer drvdata = (void *)pdev->id_entry->driver_data; 197017bba72fSSascha Hauer 197117bba72fSSascha Hauer if (!drvdata) { 197217bba72fSSascha Hauer dev_err(&pdev->dev, "unable to find driver data\n"); 197317bba72fSSascha Hauer return -EINVAL; 197417bba72fSSascha Hauer } 19751ec1e82fSSascha Hauer 197642536b9fSPhilippe Retornaz ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 197742536b9fSPhilippe Retornaz if (ret) 197842536b9fSPhilippe Retornaz return ret; 197942536b9fSPhilippe Retornaz 19807f24e0eeSFabio Estevam sdma = devm_kzalloc(&pdev->dev, sizeof(*sdma), GFP_KERNEL); 19811ec1e82fSSascha Hauer if (!sdma) 19821ec1e82fSSascha Hauer return -ENOMEM; 19831ec1e82fSSascha Hauer 19842ccaef05SRichard Zhao spin_lock_init(&sdma->channel_0_lock); 198573eab978SSascha Hauer 19861ec1e82fSSascha Hauer sdma->dev = &pdev->dev; 198717bba72fSSascha Hauer sdma->drvdata = drvdata; 19881ec1e82fSSascha Hauer 19891ec1e82fSSascha Hauer irq = platform_get_irq(pdev, 0); 19907f24e0eeSFabio Estevam if (irq < 0) 199163c72e02SFabio Estevam return irq; 19921ec1e82fSSascha Hauer 19937f24e0eeSFabio Estevam iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); 19947f24e0eeSFabio Estevam sdma->regs = devm_ioremap_resource(&pdev->dev, iores); 19957f24e0eeSFabio Estevam if (IS_ERR(sdma->regs)) 19967f24e0eeSFabio Estevam return PTR_ERR(sdma->regs); 19971ec1e82fSSascha Hauer 19987560e3f3SSascha Hauer sdma->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); 19997f24e0eeSFabio Estevam if (IS_ERR(sdma->clk_ipg)) 20007f24e0eeSFabio Estevam return PTR_ERR(sdma->clk_ipg); 20011ec1e82fSSascha Hauer 20027560e3f3SSascha Hauer sdma->clk_ahb = devm_clk_get(&pdev->dev, "ahb"); 20037f24e0eeSFabio Estevam if (IS_ERR(sdma->clk_ahb)) 20047f24e0eeSFabio Estevam return PTR_ERR(sdma->clk_ahb); 20057560e3f3SSascha Hauer 2006fb9caf37SArvind Yadav ret = clk_prepare(sdma->clk_ipg); 2007fb9caf37SArvind Yadav if (ret) 2008fb9caf37SArvind Yadav return ret; 2009fb9caf37SArvind Yadav 2010fb9caf37SArvind Yadav ret = clk_prepare(sdma->clk_ahb); 2011fb9caf37SArvind Yadav if (ret) 2012fb9caf37SArvind Yadav goto err_clk; 20137560e3f3SSascha Hauer 20147f24e0eeSFabio Estevam ret = devm_request_irq(&pdev->dev, irq, sdma_int_handler, 0, "sdma", 20157f24e0eeSFabio Estevam sdma); 20161ec1e82fSSascha Hauer if (ret) 2017fb9caf37SArvind Yadav goto err_irq; 20181ec1e82fSSascha Hauer 20195bb9dbb5SVinod Koul sdma->irq = irq; 20205bb9dbb5SVinod Koul 20215b28aa31SSascha Hauer sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL); 2022fb9caf37SArvind Yadav if (!sdma->script_addrs) { 2023fb9caf37SArvind Yadav ret = -ENOMEM; 2024fb9caf37SArvind Yadav goto err_irq; 2025fb9caf37SArvind Yadav } 20261ec1e82fSSascha Hauer 202736e2f21aSSascha Hauer /* initially no scripts available */ 202836e2f21aSSascha Hauer saddr_arr = (s32 *)sdma->script_addrs; 202936e2f21aSSascha Hauer for (i = 0; i < SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; i++) 203036e2f21aSSascha Hauer saddr_arr[i] = -EINVAL; 203136e2f21aSSascha Hauer 20327214a8b1SSascha Hauer dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask); 20337214a8b1SSascha Hauer dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask); 20340f06c027SRobin Gong dma_cap_set(DMA_MEMCPY, sdma->dma_device.cap_mask); 20357214a8b1SSascha Hauer 20361ec1e82fSSascha Hauer INIT_LIST_HEAD(&sdma->dma_device.channels); 20371ec1e82fSSascha Hauer /* Initialize channel parameters */ 20381ec1e82fSSascha Hauer for (i = 0; i < MAX_DMA_CHANNELS; i++) { 20391ec1e82fSSascha Hauer struct sdma_channel *sdmac = &sdma->channel[i]; 20401ec1e82fSSascha Hauer 20411ec1e82fSSascha Hauer sdmac->sdma = sdma; 20421ec1e82fSSascha Hauer 20431ec1e82fSSascha Hauer sdmac->channel = i; 204457b772b8SRobin Gong sdmac->vc.desc_free = sdma_desc_free; 2045b8603d2aSLucas Stach INIT_WORK(&sdmac->terminate_worker, 2046b8603d2aSLucas Stach sdma_channel_terminate_work); 204723889c63SSascha Hauer /* 204823889c63SSascha Hauer * Add the channel to the DMAC list. Do not add channel 0 though 204923889c63SSascha Hauer * because we need it internally in the SDMA driver. This also means 205023889c63SSascha Hauer * that channel 0 in dmaengine counting matches sdma channel 1. 205123889c63SSascha Hauer */ 205223889c63SSascha Hauer if (i) 205357b772b8SRobin Gong vchan_init(&sdmac->vc, &sdma->dma_device); 20541ec1e82fSSascha Hauer } 20551ec1e82fSSascha Hauer 20565b28aa31SSascha Hauer ret = sdma_init(sdma); 20571ec1e82fSSascha Hauer if (ret) 20581ec1e82fSSascha Hauer goto err_init; 20591ec1e82fSSascha Hauer 2060d078cd1bSZidan Wang ret = sdma_event_remap(sdma); 2061d078cd1bSZidan Wang if (ret) 2062d078cd1bSZidan Wang goto err_init; 2063d078cd1bSZidan Wang 2064dcfec3c0SSascha Hauer if (sdma->drvdata->script_addrs) 2065dcfec3c0SSascha Hauer sdma_add_scripts(sdma, sdma->drvdata->script_addrs); 2066580975d7SShawn Guo if (pdata && pdata->script_addrs) 20675b28aa31SSascha Hauer sdma_add_scripts(sdma, pdata->script_addrs); 20685b28aa31SSascha Hauer 2069580975d7SShawn Guo if (pdata) { 20706d0d7e2dSFabio Estevam ret = sdma_get_firmware(sdma, pdata->fw_name); 20716d0d7e2dSFabio Estevam if (ret) 2072ad1122e5SFabio Estevam dev_warn(&pdev->dev, "failed to get firmware from platform data\n"); 2073580975d7SShawn Guo } else { 2074580975d7SShawn Guo /* 2075580975d7SShawn Guo * Because that device tree does not encode ROM script address, 2076580975d7SShawn Guo * the RAM script in firmware is mandatory for device tree 2077580975d7SShawn Guo * probe, otherwise it fails. 2078580975d7SShawn Guo */ 2079580975d7SShawn Guo ret = of_property_read_string(np, "fsl,sdma-ram-script-name", 2080580975d7SShawn Guo &fw_name); 20816602b0ddSFabio Estevam if (ret) 2082ad1122e5SFabio Estevam dev_warn(&pdev->dev, "failed to get firmware name\n"); 20836602b0ddSFabio Estevam else { 2084580975d7SShawn Guo ret = sdma_get_firmware(sdma, fw_name); 20856602b0ddSFabio Estevam if (ret) 2086ad1122e5SFabio Estevam dev_warn(&pdev->dev, "failed to get firmware from device tree\n"); 2087580975d7SShawn Guo } 2088580975d7SShawn Guo } 20895b28aa31SSascha Hauer 20901ec1e82fSSascha Hauer sdma->dma_device.dev = &pdev->dev; 20911ec1e82fSSascha Hauer 20921ec1e82fSSascha Hauer sdma->dma_device.device_alloc_chan_resources = sdma_alloc_chan_resources; 20931ec1e82fSSascha Hauer sdma->dma_device.device_free_chan_resources = sdma_free_chan_resources; 20941ec1e82fSSascha Hauer sdma->dma_device.device_tx_status = sdma_tx_status; 20951ec1e82fSSascha Hauer sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg; 20961ec1e82fSSascha Hauer sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic; 20977b350ab0SMaxime Ripard sdma->dma_device.device_config = sdma_config; 2098b8603d2aSLucas Stach sdma->dma_device.device_terminate_all = sdma_disable_channel_async; 2099b8603d2aSLucas Stach sdma->dma_device.device_synchronize = sdma_channel_synchronize; 2100f9d4a398SNicolin Chen sdma->dma_device.src_addr_widths = SDMA_DMA_BUSWIDTHS; 2101f9d4a398SNicolin Chen sdma->dma_device.dst_addr_widths = SDMA_DMA_BUSWIDTHS; 2102f9d4a398SNicolin Chen sdma->dma_device.directions = SDMA_DMA_DIRECTIONS; 21036f3125ceSLucas Stach sdma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; 21040f06c027SRobin Gong sdma->dma_device.device_prep_dma_memcpy = sdma_prep_memcpy; 21051ec1e82fSSascha Hauer sdma->dma_device.device_issue_pending = sdma_issue_pending; 2106b9b3f82fSSascha Hauer sdma->dma_device.dev->dma_parms = &sdma->dma_parms; 21074a6b2e8aSRobin Gong dma_set_max_seg_size(sdma->dma_device.dev, SDMA_BD_MAX_CNT); 21081ec1e82fSSascha Hauer 210923e11811SVignesh Raman platform_set_drvdata(pdev, sdma); 211023e11811SVignesh Raman 21111ec1e82fSSascha Hauer ret = dma_async_device_register(&sdma->dma_device); 21121ec1e82fSSascha Hauer if (ret) { 21131ec1e82fSSascha Hauer dev_err(&pdev->dev, "unable to register\n"); 21141ec1e82fSSascha Hauer goto err_init; 21151ec1e82fSSascha Hauer } 21161ec1e82fSSascha Hauer 21179479e17cSShawn Guo if (np) { 21189479e17cSShawn Guo ret = of_dma_controller_register(np, sdma_xlate, sdma); 21199479e17cSShawn Guo if (ret) { 21209479e17cSShawn Guo dev_err(&pdev->dev, "failed to register controller\n"); 21219479e17cSShawn Guo goto err_register; 21229479e17cSShawn Guo } 21238391ecf4SShengjiu Wang 21248391ecf4SShengjiu Wang spba_bus = of_find_compatible_node(NULL, NULL, "fsl,spba-bus"); 21258391ecf4SShengjiu Wang ret = of_address_to_resource(spba_bus, 0, &spba_res); 21268391ecf4SShengjiu Wang if (!ret) { 21278391ecf4SShengjiu Wang sdma->spba_start_addr = spba_res.start; 21288391ecf4SShengjiu Wang sdma->spba_end_addr = spba_res.end; 21298391ecf4SShengjiu Wang } 21308391ecf4SShengjiu Wang of_node_put(spba_bus); 21319479e17cSShawn Guo } 21329479e17cSShawn Guo 21331ec1e82fSSascha Hauer return 0; 21341ec1e82fSSascha Hauer 21359479e17cSShawn Guo err_register: 21369479e17cSShawn Guo dma_async_device_unregister(&sdma->dma_device); 21371ec1e82fSSascha Hauer err_init: 21381ec1e82fSSascha Hauer kfree(sdma->script_addrs); 2139fb9caf37SArvind Yadav err_irq: 2140fb9caf37SArvind Yadav clk_unprepare(sdma->clk_ahb); 2141fb9caf37SArvind Yadav err_clk: 2142fb9caf37SArvind Yadav clk_unprepare(sdma->clk_ipg); 2143939fd4f0SShawn Guo return ret; 21441ec1e82fSSascha Hauer } 21451ec1e82fSSascha Hauer 21461d1bbd30SMaxin B. John static int sdma_remove(struct platform_device *pdev) 21471ec1e82fSSascha Hauer { 214823e11811SVignesh Raman struct sdma_engine *sdma = platform_get_drvdata(pdev); 2149c12fe497SVignesh Raman int i; 215023e11811SVignesh Raman 21515bb9dbb5SVinod Koul devm_free_irq(&pdev->dev, sdma->irq, sdma); 215223e11811SVignesh Raman dma_async_device_unregister(&sdma->dma_device); 215323e11811SVignesh Raman kfree(sdma->script_addrs); 2154fb9caf37SArvind Yadav clk_unprepare(sdma->clk_ahb); 2155fb9caf37SArvind Yadav clk_unprepare(sdma->clk_ipg); 2156c12fe497SVignesh Raman /* Kill the tasklet */ 2157c12fe497SVignesh Raman for (i = 0; i < MAX_DMA_CHANNELS; i++) { 2158c12fe497SVignesh Raman struct sdma_channel *sdmac = &sdma->channel[i]; 2159c12fe497SVignesh Raman 216057b772b8SRobin Gong tasklet_kill(&sdmac->vc.task); 216157b772b8SRobin Gong sdma_free_chan_resources(&sdmac->vc.chan); 2162c12fe497SVignesh Raman } 216323e11811SVignesh Raman 216423e11811SVignesh Raman platform_set_drvdata(pdev, NULL); 216523e11811SVignesh Raman return 0; 21661ec1e82fSSascha Hauer } 21671ec1e82fSSascha Hauer 21681ec1e82fSSascha Hauer static struct platform_driver sdma_driver = { 21691ec1e82fSSascha Hauer .driver = { 21701ec1e82fSSascha Hauer .name = "imx-sdma", 2171580975d7SShawn Guo .of_match_table = sdma_dt_ids, 21721ec1e82fSSascha Hauer }, 217362550cd7SShawn Guo .id_table = sdma_devtypes, 21741d1bbd30SMaxin B. John .remove = sdma_remove, 217523e11811SVignesh Raman .probe = sdma_probe, 21761ec1e82fSSascha Hauer }; 21771ec1e82fSSascha Hauer 217823e11811SVignesh Raman module_platform_driver(sdma_driver); 21791ec1e82fSSascha Hauer 21801ec1e82fSSascha Hauer MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>"); 21811ec1e82fSSascha Hauer MODULE_DESCRIPTION("i.MX SDMA driver"); 2182c0879342SNicolas Chauvet #if IS_ENABLED(CONFIG_SOC_IMX6Q) 2183c0879342SNicolas Chauvet MODULE_FIRMWARE("imx/sdma/sdma-imx6q.bin"); 2184c0879342SNicolas Chauvet #endif 2185c0879342SNicolas Chauvet #if IS_ENABLED(CONFIG_SOC_IMX7D) 2186c0879342SNicolas Chauvet MODULE_FIRMWARE("imx/sdma/sdma-imx7d.bin"); 2187c0879342SNicolas Chauvet #endif 21881ec1e82fSSascha Hauer MODULE_LICENSE("GPL"); 2189