1c01faacaSFabio Estevam // SPDX-License-Identifier: GPL-2.0+ 2c01faacaSFabio Estevam // 3c01faacaSFabio Estevam // drivers/dma/imx-sdma.c 4c01faacaSFabio Estevam // 5c01faacaSFabio Estevam // This file contains a driver for the Freescale Smart DMA engine 6c01faacaSFabio Estevam // 7c01faacaSFabio Estevam // Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de> 8c01faacaSFabio Estevam // 9c01faacaSFabio Estevam // Based on code from Freescale: 10c01faacaSFabio Estevam // 11c01faacaSFabio Estevam // Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved. 121ec1e82fSSascha Hauer 131ec1e82fSSascha Hauer #include <linux/init.h> 141d069bfaSMichael Olbrich #include <linux/iopoll.h> 15f8de8f4cSAxel Lin #include <linux/module.h> 161ec1e82fSSascha Hauer #include <linux/types.h> 170bbc1413SRichard Zhao #include <linux/bitops.h> 181ec1e82fSSascha Hauer #include <linux/mm.h> 191ec1e82fSSascha Hauer #include <linux/interrupt.h> 201ec1e82fSSascha Hauer #include <linux/clk.h> 212ccaef05SRichard Zhao #include <linux/delay.h> 221ec1e82fSSascha Hauer #include <linux/sched.h> 231ec1e82fSSascha Hauer #include <linux/semaphore.h> 241ec1e82fSSascha Hauer #include <linux/spinlock.h> 251ec1e82fSSascha Hauer #include <linux/device.h> 261ec1e82fSSascha Hauer #include <linux/dma-mapping.h> 271ec1e82fSSascha Hauer #include <linux/firmware.h> 281ec1e82fSSascha Hauer #include <linux/slab.h> 291ec1e82fSSascha Hauer #include <linux/platform_device.h> 301ec1e82fSSascha Hauer #include <linux/dmaengine.h> 31580975d7SShawn Guo #include <linux/of.h> 328391ecf4SShengjiu Wang #include <linux/of_address.h> 33580975d7SShawn Guo #include <linux/of_device.h> 349479e17cSShawn Guo #include <linux/of_dma.h> 35b8603d2aSLucas Stach #include <linux/workqueue.h> 361ec1e82fSSascha Hauer 371ec1e82fSSascha Hauer #include <asm/irq.h> 3882906b13SArnd Bergmann #include <linux/platform_data/dma-imx-sdma.h> 3982906b13SArnd Bergmann #include <linux/platform_data/dma-imx.h> 40d078cd1bSZidan Wang #include <linux/regmap.h> 41d078cd1bSZidan Wang #include <linux/mfd/syscon.h> 42d078cd1bSZidan Wang #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> 431ec1e82fSSascha Hauer 44d2ebfb33SRussell King - ARM Linux #include "dmaengine.h" 4557b772b8SRobin Gong #include "virt-dma.h" 46d2ebfb33SRussell King - ARM Linux 471ec1e82fSSascha Hauer /* SDMA registers */ 481ec1e82fSSascha Hauer #define SDMA_H_C0PTR 0x000 491ec1e82fSSascha Hauer #define SDMA_H_INTR 0x004 501ec1e82fSSascha Hauer #define SDMA_H_STATSTOP 0x008 511ec1e82fSSascha Hauer #define SDMA_H_START 0x00c 521ec1e82fSSascha Hauer #define SDMA_H_EVTOVR 0x010 531ec1e82fSSascha Hauer #define SDMA_H_DSPOVR 0x014 541ec1e82fSSascha Hauer #define SDMA_H_HOSTOVR 0x018 551ec1e82fSSascha Hauer #define SDMA_H_EVTPEND 0x01c 561ec1e82fSSascha Hauer #define SDMA_H_DSPENBL 0x020 571ec1e82fSSascha Hauer #define SDMA_H_RESET 0x024 581ec1e82fSSascha Hauer #define SDMA_H_EVTERR 0x028 591ec1e82fSSascha Hauer #define SDMA_H_INTRMSK 0x02c 601ec1e82fSSascha Hauer #define SDMA_H_PSW 0x030 611ec1e82fSSascha Hauer #define SDMA_H_EVTERRDBG 0x034 621ec1e82fSSascha Hauer #define SDMA_H_CONFIG 0x038 631ec1e82fSSascha Hauer #define SDMA_ONCE_ENB 0x040 641ec1e82fSSascha Hauer #define SDMA_ONCE_DATA 0x044 651ec1e82fSSascha Hauer #define SDMA_ONCE_INSTR 0x048 661ec1e82fSSascha Hauer #define SDMA_ONCE_STAT 0x04c 671ec1e82fSSascha Hauer #define SDMA_ONCE_CMD 0x050 681ec1e82fSSascha Hauer #define SDMA_EVT_MIRROR 0x054 691ec1e82fSSascha Hauer #define SDMA_ILLINSTADDR 0x058 701ec1e82fSSascha Hauer #define SDMA_CHN0ADDR 0x05c 711ec1e82fSSascha Hauer #define SDMA_ONCE_RTB 0x060 721ec1e82fSSascha Hauer #define SDMA_XTRIG_CONF1 0x070 731ec1e82fSSascha Hauer #define SDMA_XTRIG_CONF2 0x074 7462550cd7SShawn Guo #define SDMA_CHNENBL0_IMX35 0x200 7562550cd7SShawn Guo #define SDMA_CHNENBL0_IMX31 0x080 761ec1e82fSSascha Hauer #define SDMA_CHNPRI_0 0x100 771ec1e82fSSascha Hauer 781ec1e82fSSascha Hauer /* 791ec1e82fSSascha Hauer * Buffer descriptor status values. 801ec1e82fSSascha Hauer */ 811ec1e82fSSascha Hauer #define BD_DONE 0x01 821ec1e82fSSascha Hauer #define BD_WRAP 0x02 831ec1e82fSSascha Hauer #define BD_CONT 0x04 841ec1e82fSSascha Hauer #define BD_INTR 0x08 851ec1e82fSSascha Hauer #define BD_RROR 0x10 861ec1e82fSSascha Hauer #define BD_LAST 0x20 871ec1e82fSSascha Hauer #define BD_EXTD 0x80 881ec1e82fSSascha Hauer 891ec1e82fSSascha Hauer /* 901ec1e82fSSascha Hauer * Data Node descriptor status values. 911ec1e82fSSascha Hauer */ 921ec1e82fSSascha Hauer #define DND_END_OF_FRAME 0x80 931ec1e82fSSascha Hauer #define DND_END_OF_XFER 0x40 941ec1e82fSSascha Hauer #define DND_DONE 0x20 951ec1e82fSSascha Hauer #define DND_UNUSED 0x01 961ec1e82fSSascha Hauer 971ec1e82fSSascha Hauer /* 981ec1e82fSSascha Hauer * IPCV2 descriptor status values. 991ec1e82fSSascha Hauer */ 1001ec1e82fSSascha Hauer #define BD_IPCV2_END_OF_FRAME 0x40 1011ec1e82fSSascha Hauer 1021ec1e82fSSascha Hauer #define IPCV2_MAX_NODES 50 1031ec1e82fSSascha Hauer /* 1041ec1e82fSSascha Hauer * Error bit set in the CCB status field by the SDMA, 1051ec1e82fSSascha Hauer * in setbd routine, in case of a transfer error 1061ec1e82fSSascha Hauer */ 1071ec1e82fSSascha Hauer #define DATA_ERROR 0x10000000 1081ec1e82fSSascha Hauer 1091ec1e82fSSascha Hauer /* 1101ec1e82fSSascha Hauer * Buffer descriptor commands. 1111ec1e82fSSascha Hauer */ 1121ec1e82fSSascha Hauer #define C0_ADDR 0x01 1131ec1e82fSSascha Hauer #define C0_LOAD 0x02 1141ec1e82fSSascha Hauer #define C0_DUMP 0x03 1151ec1e82fSSascha Hauer #define C0_SETCTX 0x07 1161ec1e82fSSascha Hauer #define C0_GETCTX 0x03 1171ec1e82fSSascha Hauer #define C0_SETDM 0x01 1181ec1e82fSSascha Hauer #define C0_SETPM 0x04 1191ec1e82fSSascha Hauer #define C0_GETDM 0x02 1201ec1e82fSSascha Hauer #define C0_GETPM 0x08 1211ec1e82fSSascha Hauer /* 1221ec1e82fSSascha Hauer * Change endianness indicator in the BD command field 1231ec1e82fSSascha Hauer */ 1241ec1e82fSSascha Hauer #define CHANGE_ENDIANNESS 0x80 1251ec1e82fSSascha Hauer 1261ec1e82fSSascha Hauer /* 1278391ecf4SShengjiu Wang * p_2_p watermark_level description 1288391ecf4SShengjiu Wang * Bits Name Description 1298391ecf4SShengjiu Wang * 0-7 Lower WML Lower watermark level 1308391ecf4SShengjiu Wang * 8 PS 1: Pad Swallowing 1318391ecf4SShengjiu Wang * 0: No Pad Swallowing 1328391ecf4SShengjiu Wang * 9 PA 1: Pad Adding 1338391ecf4SShengjiu Wang * 0: No Pad Adding 1348391ecf4SShengjiu Wang * 10 SPDIF If this bit is set both source 1358391ecf4SShengjiu Wang * and destination are on SPBA 1368391ecf4SShengjiu Wang * 11 Source Bit(SP) 1: Source on SPBA 1378391ecf4SShengjiu Wang * 0: Source on AIPS 1388391ecf4SShengjiu Wang * 12 Destination Bit(DP) 1: Destination on SPBA 1398391ecf4SShengjiu Wang * 0: Destination on AIPS 1408391ecf4SShengjiu Wang * 13-15 --------- MUST BE 0 1418391ecf4SShengjiu Wang * 16-23 Higher WML HWML 1428391ecf4SShengjiu Wang * 24-27 N Total number of samples after 1438391ecf4SShengjiu Wang * which Pad adding/Swallowing 1448391ecf4SShengjiu Wang * must be done. It must be odd. 1458391ecf4SShengjiu Wang * 28 Lower WML Event(LWE) SDMA events reg to check for 1468391ecf4SShengjiu Wang * LWML event mask 1478391ecf4SShengjiu Wang * 0: LWE in EVENTS register 1488391ecf4SShengjiu Wang * 1: LWE in EVENTS2 register 1498391ecf4SShengjiu Wang * 29 Higher WML Event(HWE) SDMA events reg to check for 1508391ecf4SShengjiu Wang * HWML event mask 1518391ecf4SShengjiu Wang * 0: HWE in EVENTS register 1528391ecf4SShengjiu Wang * 1: HWE in EVENTS2 register 1538391ecf4SShengjiu Wang * 30 --------- MUST BE 0 1548391ecf4SShengjiu Wang * 31 CONT 1: Amount of samples to be 1558391ecf4SShengjiu Wang * transferred is unknown and 1568391ecf4SShengjiu Wang * script will keep on 1578391ecf4SShengjiu Wang * transferring samples as long as 1588391ecf4SShengjiu Wang * both events are detected and 1598391ecf4SShengjiu Wang * script must be manually stopped 1608391ecf4SShengjiu Wang * by the application 1618391ecf4SShengjiu Wang * 0: The amount of samples to be 1628391ecf4SShengjiu Wang * transferred is equal to the 1638391ecf4SShengjiu Wang * count field of mode word 1648391ecf4SShengjiu Wang */ 1658391ecf4SShengjiu Wang #define SDMA_WATERMARK_LEVEL_LWML 0xFF 1668391ecf4SShengjiu Wang #define SDMA_WATERMARK_LEVEL_PS BIT(8) 1678391ecf4SShengjiu Wang #define SDMA_WATERMARK_LEVEL_PA BIT(9) 1688391ecf4SShengjiu Wang #define SDMA_WATERMARK_LEVEL_SPDIF BIT(10) 1698391ecf4SShengjiu Wang #define SDMA_WATERMARK_LEVEL_SP BIT(11) 1708391ecf4SShengjiu Wang #define SDMA_WATERMARK_LEVEL_DP BIT(12) 1718391ecf4SShengjiu Wang #define SDMA_WATERMARK_LEVEL_HWML (0xFF << 16) 1728391ecf4SShengjiu Wang #define SDMA_WATERMARK_LEVEL_LWE BIT(28) 1738391ecf4SShengjiu Wang #define SDMA_WATERMARK_LEVEL_HWE BIT(29) 1748391ecf4SShengjiu Wang #define SDMA_WATERMARK_LEVEL_CONT BIT(31) 1758391ecf4SShengjiu Wang 176f9d4a398SNicolin Chen #define SDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ 177f9d4a398SNicolin Chen BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ 178f9d4a398SNicolin Chen BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) 179f9d4a398SNicolin Chen 180f9d4a398SNicolin Chen #define SDMA_DMA_DIRECTIONS (BIT(DMA_DEV_TO_MEM) | \ 181f9d4a398SNicolin Chen BIT(DMA_MEM_TO_DEV) | \ 182f9d4a398SNicolin Chen BIT(DMA_DEV_TO_DEV)) 183f9d4a398SNicolin Chen 1848391ecf4SShengjiu Wang /* 1851ec1e82fSSascha Hauer * Mode/Count of data node descriptors - IPCv2 1861ec1e82fSSascha Hauer */ 1871ec1e82fSSascha Hauer struct sdma_mode_count { 1884a6b2e8aSRobin Gong #define SDMA_BD_MAX_CNT 0xffff 1891ec1e82fSSascha Hauer u32 count : 16; /* size of the buffer pointed by this BD */ 1901ec1e82fSSascha Hauer u32 status : 8; /* E,R,I,C,W,D status bits stored here */ 191e4b75760SMartin Kaiser u32 command : 8; /* command mostly used for channel 0 */ 1921ec1e82fSSascha Hauer }; 1931ec1e82fSSascha Hauer 1941ec1e82fSSascha Hauer /* 1951ec1e82fSSascha Hauer * Buffer descriptor 1961ec1e82fSSascha Hauer */ 1971ec1e82fSSascha Hauer struct sdma_buffer_descriptor { 1981ec1e82fSSascha Hauer struct sdma_mode_count mode; 1991ec1e82fSSascha Hauer u32 buffer_addr; /* address of the buffer described */ 2001ec1e82fSSascha Hauer u32 ext_buffer_addr; /* extended buffer address */ 2011ec1e82fSSascha Hauer } __attribute__ ((packed)); 2021ec1e82fSSascha Hauer 2031ec1e82fSSascha Hauer /** 2041ec1e82fSSascha Hauer * struct sdma_channel_control - Channel control Block 2051ec1e82fSSascha Hauer * 20624ca312dSRobin Gong * @current_bd_ptr: current buffer descriptor processed 20724ca312dSRobin Gong * @base_bd_ptr: first element of buffer descriptor array 20824ca312dSRobin Gong * @unused: padding. The SDMA engine expects an array of 128 byte 2091ec1e82fSSascha Hauer * control blocks 2101ec1e82fSSascha Hauer */ 2111ec1e82fSSascha Hauer struct sdma_channel_control { 2121ec1e82fSSascha Hauer u32 current_bd_ptr; 2131ec1e82fSSascha Hauer u32 base_bd_ptr; 2141ec1e82fSSascha Hauer u32 unused[2]; 2151ec1e82fSSascha Hauer } __attribute__ ((packed)); 2161ec1e82fSSascha Hauer 2171ec1e82fSSascha Hauer /** 2181ec1e82fSSascha Hauer * struct sdma_state_registers - SDMA context for a channel 2191ec1e82fSSascha Hauer * 2201ec1e82fSSascha Hauer * @pc: program counter 22124ca312dSRobin Gong * @unused1: unused 2221ec1e82fSSascha Hauer * @t: test bit: status of arithmetic & test instruction 2231ec1e82fSSascha Hauer * @rpc: return program counter 22424ca312dSRobin Gong * @unused0: unused 2251ec1e82fSSascha Hauer * @sf: source fault while loading data 2261ec1e82fSSascha Hauer * @spc: loop start program counter 22724ca312dSRobin Gong * @unused2: unused 2281ec1e82fSSascha Hauer * @df: destination fault while storing data 2291ec1e82fSSascha Hauer * @epc: loop end program counter 2301ec1e82fSSascha Hauer * @lm: loop mode 2311ec1e82fSSascha Hauer */ 2321ec1e82fSSascha Hauer struct sdma_state_registers { 2331ec1e82fSSascha Hauer u32 pc :14; 2341ec1e82fSSascha Hauer u32 unused1: 1; 2351ec1e82fSSascha Hauer u32 t : 1; 2361ec1e82fSSascha Hauer u32 rpc :14; 2371ec1e82fSSascha Hauer u32 unused0: 1; 2381ec1e82fSSascha Hauer u32 sf : 1; 2391ec1e82fSSascha Hauer u32 spc :14; 2401ec1e82fSSascha Hauer u32 unused2: 1; 2411ec1e82fSSascha Hauer u32 df : 1; 2421ec1e82fSSascha Hauer u32 epc :14; 2431ec1e82fSSascha Hauer u32 lm : 2; 2441ec1e82fSSascha Hauer } __attribute__ ((packed)); 2451ec1e82fSSascha Hauer 2461ec1e82fSSascha Hauer /** 2471ec1e82fSSascha Hauer * struct sdma_context_data - sdma context specific to a channel 2481ec1e82fSSascha Hauer * 2491ec1e82fSSascha Hauer * @channel_state: channel state bits 2501ec1e82fSSascha Hauer * @gReg: general registers 2511ec1e82fSSascha Hauer * @mda: burst dma destination address register 2521ec1e82fSSascha Hauer * @msa: burst dma source address register 2531ec1e82fSSascha Hauer * @ms: burst dma status register 2541ec1e82fSSascha Hauer * @md: burst dma data register 2551ec1e82fSSascha Hauer * @pda: peripheral dma destination address register 2561ec1e82fSSascha Hauer * @psa: peripheral dma source address register 2571ec1e82fSSascha Hauer * @ps: peripheral dma status register 2581ec1e82fSSascha Hauer * @pd: peripheral dma data register 2591ec1e82fSSascha Hauer * @ca: CRC polynomial register 2601ec1e82fSSascha Hauer * @cs: CRC accumulator register 2611ec1e82fSSascha Hauer * @dda: dedicated core destination address register 2621ec1e82fSSascha Hauer * @dsa: dedicated core source address register 2631ec1e82fSSascha Hauer * @ds: dedicated core status register 2641ec1e82fSSascha Hauer * @dd: dedicated core data register 26524ca312dSRobin Gong * @scratch0: 1st word of dedicated ram for context switch 26624ca312dSRobin Gong * @scratch1: 2nd word of dedicated ram for context switch 26724ca312dSRobin Gong * @scratch2: 3rd word of dedicated ram for context switch 26824ca312dSRobin Gong * @scratch3: 4th word of dedicated ram for context switch 26924ca312dSRobin Gong * @scratch4: 5th word of dedicated ram for context switch 27024ca312dSRobin Gong * @scratch5: 6th word of dedicated ram for context switch 27124ca312dSRobin Gong * @scratch6: 7th word of dedicated ram for context switch 27224ca312dSRobin Gong * @scratch7: 8th word of dedicated ram for context switch 2731ec1e82fSSascha Hauer */ 2741ec1e82fSSascha Hauer struct sdma_context_data { 2751ec1e82fSSascha Hauer struct sdma_state_registers channel_state; 2761ec1e82fSSascha Hauer u32 gReg[8]; 2771ec1e82fSSascha Hauer u32 mda; 2781ec1e82fSSascha Hauer u32 msa; 2791ec1e82fSSascha Hauer u32 ms; 2801ec1e82fSSascha Hauer u32 md; 2811ec1e82fSSascha Hauer u32 pda; 2821ec1e82fSSascha Hauer u32 psa; 2831ec1e82fSSascha Hauer u32 ps; 2841ec1e82fSSascha Hauer u32 pd; 2851ec1e82fSSascha Hauer u32 ca; 2861ec1e82fSSascha Hauer u32 cs; 2871ec1e82fSSascha Hauer u32 dda; 2881ec1e82fSSascha Hauer u32 dsa; 2891ec1e82fSSascha Hauer u32 ds; 2901ec1e82fSSascha Hauer u32 dd; 2911ec1e82fSSascha Hauer u32 scratch0; 2921ec1e82fSSascha Hauer u32 scratch1; 2931ec1e82fSSascha Hauer u32 scratch2; 2941ec1e82fSSascha Hauer u32 scratch3; 2951ec1e82fSSascha Hauer u32 scratch4; 2961ec1e82fSSascha Hauer u32 scratch5; 2971ec1e82fSSascha Hauer u32 scratch6; 2981ec1e82fSSascha Hauer u32 scratch7; 2991ec1e82fSSascha Hauer } __attribute__ ((packed)); 3001ec1e82fSSascha Hauer 3011ec1e82fSSascha Hauer 3021ec1e82fSSascha Hauer struct sdma_engine; 3031ec1e82fSSascha Hauer 3041ec1e82fSSascha Hauer /** 30576c33d27SSascha Hauer * struct sdma_desc - descriptor structor for one transfer 30624ca312dSRobin Gong * @vd: descriptor for virt dma 30724ca312dSRobin Gong * @num_bd: number of descriptors currently handling 30824ca312dSRobin Gong * @bd_phys: physical address of bd 30924ca312dSRobin Gong * @buf_tail: ID of the buffer that was processed 31024ca312dSRobin Gong * @buf_ptail: ID of the previous buffer that was processed 31124ca312dSRobin Gong * @period_len: period length, used in cyclic. 31224ca312dSRobin Gong * @chn_real_count: the real count updated from bd->mode.count 31324ca312dSRobin Gong * @chn_count: the transfer count set 31424ca312dSRobin Gong * @sdmac: sdma_channel pointer 31524ca312dSRobin Gong * @bd: pointer of allocate bd 31676c33d27SSascha Hauer */ 31776c33d27SSascha Hauer struct sdma_desc { 31857b772b8SRobin Gong struct virt_dma_desc vd; 31976c33d27SSascha Hauer unsigned int num_bd; 32076c33d27SSascha Hauer dma_addr_t bd_phys; 32176c33d27SSascha Hauer unsigned int buf_tail; 32276c33d27SSascha Hauer unsigned int buf_ptail; 32376c33d27SSascha Hauer unsigned int period_len; 32476c33d27SSascha Hauer unsigned int chn_real_count; 32576c33d27SSascha Hauer unsigned int chn_count; 32676c33d27SSascha Hauer struct sdma_channel *sdmac; 32776c33d27SSascha Hauer struct sdma_buffer_descriptor *bd; 32876c33d27SSascha Hauer }; 32976c33d27SSascha Hauer 33076c33d27SSascha Hauer /** 3311ec1e82fSSascha Hauer * struct sdma_channel - housekeeping for a SDMA channel 3321ec1e82fSSascha Hauer * 33324ca312dSRobin Gong * @vc: virt_dma base structure 33424ca312dSRobin Gong * @desc: sdma description including vd and other special member 33524ca312dSRobin Gong * @sdma: pointer to the SDMA engine for this channel 33624ca312dSRobin Gong * @channel: the channel number, matches dmaengine chan_id + 1 33724ca312dSRobin Gong * @direction: transfer type. Needed for setting SDMA script 338107d0644SVinod Koul * @slave_config Slave configuration 33924ca312dSRobin Gong * @peripheral_type: Peripheral type. Needed for setting SDMA script 34024ca312dSRobin Gong * @event_id0: aka dma request line 34124ca312dSRobin Gong * @event_id1: for channels that use 2 events 34224ca312dSRobin Gong * @word_size: peripheral access size 34324ca312dSRobin Gong * @pc_from_device: script address for those device_2_memory 34424ca312dSRobin Gong * @pc_to_device: script address for those memory_2_device 34524ca312dSRobin Gong * @device_to_device: script address for those device_2_device 3460f06c027SRobin Gong * @pc_to_pc: script address for those memory_2_memory 34724ca312dSRobin Gong * @flags: loop mode or not 34824ca312dSRobin Gong * @per_address: peripheral source or destination address in common case 34924ca312dSRobin Gong * destination address in p_2_p case 35024ca312dSRobin Gong * @per_address2: peripheral source address in p_2_p case 35124ca312dSRobin Gong * @event_mask: event mask used in p_2_p script 35224ca312dSRobin Gong * @watermark_level: value for gReg[7], some script will extend it from 35324ca312dSRobin Gong * basic watermark such as p_2_p 35424ca312dSRobin Gong * @shp_addr: value for gReg[6] 35524ca312dSRobin Gong * @per_addr: value for gReg[2] 35624ca312dSRobin Gong * @status: status of dma channel 35724ca312dSRobin Gong * @data: specific sdma interface structure 35824ca312dSRobin Gong * @bd_pool: dma_pool for bd 3591ec1e82fSSascha Hauer */ 3601ec1e82fSSascha Hauer struct sdma_channel { 36157b772b8SRobin Gong struct virt_dma_chan vc; 36276c33d27SSascha Hauer struct sdma_desc *desc; 3631ec1e82fSSascha Hauer struct sdma_engine *sdma; 3641ec1e82fSSascha Hauer unsigned int channel; 365db8196dfSVinod Koul enum dma_transfer_direction direction; 366107d0644SVinod Koul struct dma_slave_config slave_config; 3671ec1e82fSSascha Hauer enum sdma_peripheral_type peripheral_type; 3681ec1e82fSSascha Hauer unsigned int event_id0; 3691ec1e82fSSascha Hauer unsigned int event_id1; 3701ec1e82fSSascha Hauer enum dma_slave_buswidth word_size; 3711ec1e82fSSascha Hauer unsigned int pc_from_device, pc_to_device; 3728391ecf4SShengjiu Wang unsigned int device_to_device; 3730f06c027SRobin Gong unsigned int pc_to_pc; 3741ec1e82fSSascha Hauer unsigned long flags; 3758391ecf4SShengjiu Wang dma_addr_t per_address, per_address2; 3760bbc1413SRichard Zhao unsigned long event_mask[2]; 3770bbc1413SRichard Zhao unsigned long watermark_level; 3781ec1e82fSSascha Hauer u32 shp_addr, per_addr; 3791ec1e82fSSascha Hauer enum dma_status status; 380ad0d92d7SRobin Gong bool context_loaded; 3810b351865SNicolin Chen struct imx_dma_data data; 382b8603d2aSLucas Stach struct work_struct terminate_worker; 3831ec1e82fSSascha Hauer }; 3841ec1e82fSSascha Hauer 3850bbc1413SRichard Zhao #define IMX_DMA_SG_LOOP BIT(0) 3861ec1e82fSSascha Hauer 3871ec1e82fSSascha Hauer #define MAX_DMA_CHANNELS 32 3881ec1e82fSSascha Hauer #define MXC_SDMA_DEFAULT_PRIORITY 1 3891ec1e82fSSascha Hauer #define MXC_SDMA_MIN_PRIORITY 1 3901ec1e82fSSascha Hauer #define MXC_SDMA_MAX_PRIORITY 7 3911ec1e82fSSascha Hauer 3921ec1e82fSSascha Hauer #define SDMA_FIRMWARE_MAGIC 0x414d4453 3931ec1e82fSSascha Hauer 3941ec1e82fSSascha Hauer /** 3951ec1e82fSSascha Hauer * struct sdma_firmware_header - Layout of the firmware image 3961ec1e82fSSascha Hauer * 39724ca312dSRobin Gong * @magic: "SDMA" 39824ca312dSRobin Gong * @version_major: increased whenever layout of struct 39924ca312dSRobin Gong * sdma_script_start_addrs changes. 40024ca312dSRobin Gong * @version_minor: firmware minor version (for binary compatible changes) 40124ca312dSRobin Gong * @script_addrs_start: offset of struct sdma_script_start_addrs in this image 40224ca312dSRobin Gong * @num_script_addrs: Number of script addresses in this image 40324ca312dSRobin Gong * @ram_code_start: offset of SDMA ram image in this firmware image 40424ca312dSRobin Gong * @ram_code_size: size of SDMA ram image 40524ca312dSRobin Gong * @script_addrs: Stores the start address of the SDMA scripts 4061ec1e82fSSascha Hauer * (in SDMA memory space) 4071ec1e82fSSascha Hauer */ 4081ec1e82fSSascha Hauer struct sdma_firmware_header { 4091ec1e82fSSascha Hauer u32 magic; 4101ec1e82fSSascha Hauer u32 version_major; 4111ec1e82fSSascha Hauer u32 version_minor; 4121ec1e82fSSascha Hauer u32 script_addrs_start; 4131ec1e82fSSascha Hauer u32 num_script_addrs; 4141ec1e82fSSascha Hauer u32 ram_code_start; 4151ec1e82fSSascha Hauer u32 ram_code_size; 4161ec1e82fSSascha Hauer }; 4171ec1e82fSSascha Hauer 41817bba72fSSascha Hauer struct sdma_driver_data { 41917bba72fSSascha Hauer int chnenbl0; 42017bba72fSSascha Hauer int num_events; 421dcfec3c0SSascha Hauer struct sdma_script_start_addrs *script_addrs; 422941acd56SAngus Ainslie (Purism) bool check_ratio; 42362550cd7SShawn Guo }; 42462550cd7SShawn Guo 4251ec1e82fSSascha Hauer struct sdma_engine { 4261ec1e82fSSascha Hauer struct device *dev; 427b9b3f82fSSascha Hauer struct device_dma_parameters dma_parms; 4281ec1e82fSSascha Hauer struct sdma_channel channel[MAX_DMA_CHANNELS]; 4291ec1e82fSSascha Hauer struct sdma_channel_control *channel_control; 4301ec1e82fSSascha Hauer void __iomem *regs; 4311ec1e82fSSascha Hauer struct sdma_context_data *context; 4321ec1e82fSSascha Hauer dma_addr_t context_phys; 4331ec1e82fSSascha Hauer struct dma_device dma_device; 4347560e3f3SSascha Hauer struct clk *clk_ipg; 4357560e3f3SSascha Hauer struct clk *clk_ahb; 4362ccaef05SRichard Zhao spinlock_t channel_0_lock; 437cd72b846SNicolin Chen u32 script_number; 4381ec1e82fSSascha Hauer struct sdma_script_start_addrs *script_addrs; 43917bba72fSSascha Hauer const struct sdma_driver_data *drvdata; 4408391ecf4SShengjiu Wang u32 spba_start_addr; 4418391ecf4SShengjiu Wang u32 spba_end_addr; 4425bb9dbb5SVinod Koul unsigned int irq; 44376c33d27SSascha Hauer dma_addr_t bd0_phys; 44476c33d27SSascha Hauer struct sdma_buffer_descriptor *bd0; 44525aaa75dSAngus Ainslie (Purism) /* clock ratio for AHB:SDMA core. 1:1 is 1, 2:1 is 0*/ 44625aaa75dSAngus Ainslie (Purism) bool clk_ratio; 44717bba72fSSascha Hauer }; 44817bba72fSSascha Hauer 449107d0644SVinod Koul static int sdma_config_write(struct dma_chan *chan, 450107d0644SVinod Koul struct dma_slave_config *dmaengine_cfg, 451107d0644SVinod Koul enum dma_transfer_direction direction); 452107d0644SVinod Koul 453e9fd58deSFabio Estevam static struct sdma_driver_data sdma_imx31 = { 45417bba72fSSascha Hauer .chnenbl0 = SDMA_CHNENBL0_IMX31, 45517bba72fSSascha Hauer .num_events = 32, 45617bba72fSSascha Hauer }; 45717bba72fSSascha Hauer 458dcfec3c0SSascha Hauer static struct sdma_script_start_addrs sdma_script_imx25 = { 459dcfec3c0SSascha Hauer .ap_2_ap_addr = 729, 460dcfec3c0SSascha Hauer .uart_2_mcu_addr = 904, 461dcfec3c0SSascha Hauer .per_2_app_addr = 1255, 462dcfec3c0SSascha Hauer .mcu_2_app_addr = 834, 463dcfec3c0SSascha Hauer .uartsh_2_mcu_addr = 1120, 464dcfec3c0SSascha Hauer .per_2_shp_addr = 1329, 465dcfec3c0SSascha Hauer .mcu_2_shp_addr = 1048, 466dcfec3c0SSascha Hauer .ata_2_mcu_addr = 1560, 467dcfec3c0SSascha Hauer .mcu_2_ata_addr = 1479, 468dcfec3c0SSascha Hauer .app_2_per_addr = 1189, 469dcfec3c0SSascha Hauer .app_2_mcu_addr = 770, 470dcfec3c0SSascha Hauer .shp_2_per_addr = 1407, 471dcfec3c0SSascha Hauer .shp_2_mcu_addr = 979, 472dcfec3c0SSascha Hauer }; 473dcfec3c0SSascha Hauer 474e9fd58deSFabio Estevam static struct sdma_driver_data sdma_imx25 = { 475dcfec3c0SSascha Hauer .chnenbl0 = SDMA_CHNENBL0_IMX35, 476dcfec3c0SSascha Hauer .num_events = 48, 477dcfec3c0SSascha Hauer .script_addrs = &sdma_script_imx25, 478dcfec3c0SSascha Hauer }; 479dcfec3c0SSascha Hauer 480e9fd58deSFabio Estevam static struct sdma_driver_data sdma_imx35 = { 48117bba72fSSascha Hauer .chnenbl0 = SDMA_CHNENBL0_IMX35, 48217bba72fSSascha Hauer .num_events = 48, 4831ec1e82fSSascha Hauer }; 4841ec1e82fSSascha Hauer 485dcfec3c0SSascha Hauer static struct sdma_script_start_addrs sdma_script_imx51 = { 486dcfec3c0SSascha Hauer .ap_2_ap_addr = 642, 487dcfec3c0SSascha Hauer .uart_2_mcu_addr = 817, 488dcfec3c0SSascha Hauer .mcu_2_app_addr = 747, 489dcfec3c0SSascha Hauer .mcu_2_shp_addr = 961, 490dcfec3c0SSascha Hauer .ata_2_mcu_addr = 1473, 491dcfec3c0SSascha Hauer .mcu_2_ata_addr = 1392, 492dcfec3c0SSascha Hauer .app_2_per_addr = 1033, 493dcfec3c0SSascha Hauer .app_2_mcu_addr = 683, 494dcfec3c0SSascha Hauer .shp_2_per_addr = 1251, 495dcfec3c0SSascha Hauer .shp_2_mcu_addr = 892, 496dcfec3c0SSascha Hauer }; 497dcfec3c0SSascha Hauer 498e9fd58deSFabio Estevam static struct sdma_driver_data sdma_imx51 = { 499dcfec3c0SSascha Hauer .chnenbl0 = SDMA_CHNENBL0_IMX35, 500dcfec3c0SSascha Hauer .num_events = 48, 501dcfec3c0SSascha Hauer .script_addrs = &sdma_script_imx51, 502dcfec3c0SSascha Hauer }; 503dcfec3c0SSascha Hauer 504dcfec3c0SSascha Hauer static struct sdma_script_start_addrs sdma_script_imx53 = { 505dcfec3c0SSascha Hauer .ap_2_ap_addr = 642, 506dcfec3c0SSascha Hauer .app_2_mcu_addr = 683, 507dcfec3c0SSascha Hauer .mcu_2_app_addr = 747, 508dcfec3c0SSascha Hauer .uart_2_mcu_addr = 817, 509dcfec3c0SSascha Hauer .shp_2_mcu_addr = 891, 510dcfec3c0SSascha Hauer .mcu_2_shp_addr = 960, 511dcfec3c0SSascha Hauer .uartsh_2_mcu_addr = 1032, 512dcfec3c0SSascha Hauer .spdif_2_mcu_addr = 1100, 513dcfec3c0SSascha Hauer .mcu_2_spdif_addr = 1134, 514dcfec3c0SSascha Hauer .firi_2_mcu_addr = 1193, 515dcfec3c0SSascha Hauer .mcu_2_firi_addr = 1290, 516dcfec3c0SSascha Hauer }; 517dcfec3c0SSascha Hauer 518e9fd58deSFabio Estevam static struct sdma_driver_data sdma_imx53 = { 519dcfec3c0SSascha Hauer .chnenbl0 = SDMA_CHNENBL0_IMX35, 520dcfec3c0SSascha Hauer .num_events = 48, 521dcfec3c0SSascha Hauer .script_addrs = &sdma_script_imx53, 522dcfec3c0SSascha Hauer }; 523dcfec3c0SSascha Hauer 524dcfec3c0SSascha Hauer static struct sdma_script_start_addrs sdma_script_imx6q = { 525dcfec3c0SSascha Hauer .ap_2_ap_addr = 642, 526dcfec3c0SSascha Hauer .uart_2_mcu_addr = 817, 527dcfec3c0SSascha Hauer .mcu_2_app_addr = 747, 528dcfec3c0SSascha Hauer .per_2_per_addr = 6331, 529dcfec3c0SSascha Hauer .uartsh_2_mcu_addr = 1032, 530dcfec3c0SSascha Hauer .mcu_2_shp_addr = 960, 531dcfec3c0SSascha Hauer .app_2_mcu_addr = 683, 532dcfec3c0SSascha Hauer .shp_2_mcu_addr = 891, 533dcfec3c0SSascha Hauer .spdif_2_mcu_addr = 1100, 534dcfec3c0SSascha Hauer .mcu_2_spdif_addr = 1134, 535dcfec3c0SSascha Hauer }; 536dcfec3c0SSascha Hauer 537e9fd58deSFabio Estevam static struct sdma_driver_data sdma_imx6q = { 538dcfec3c0SSascha Hauer .chnenbl0 = SDMA_CHNENBL0_IMX35, 539dcfec3c0SSascha Hauer .num_events = 48, 540dcfec3c0SSascha Hauer .script_addrs = &sdma_script_imx6q, 541dcfec3c0SSascha Hauer }; 542dcfec3c0SSascha Hauer 543b7d2648aSFabio Estevam static struct sdma_script_start_addrs sdma_script_imx7d = { 544b7d2648aSFabio Estevam .ap_2_ap_addr = 644, 545b7d2648aSFabio Estevam .uart_2_mcu_addr = 819, 546b7d2648aSFabio Estevam .mcu_2_app_addr = 749, 547b7d2648aSFabio Estevam .uartsh_2_mcu_addr = 1034, 548b7d2648aSFabio Estevam .mcu_2_shp_addr = 962, 549b7d2648aSFabio Estevam .app_2_mcu_addr = 685, 550b7d2648aSFabio Estevam .shp_2_mcu_addr = 893, 551b7d2648aSFabio Estevam .spdif_2_mcu_addr = 1102, 552b7d2648aSFabio Estevam .mcu_2_spdif_addr = 1136, 553b7d2648aSFabio Estevam }; 554b7d2648aSFabio Estevam 555b7d2648aSFabio Estevam static struct sdma_driver_data sdma_imx7d = { 556b7d2648aSFabio Estevam .chnenbl0 = SDMA_CHNENBL0_IMX35, 557b7d2648aSFabio Estevam .num_events = 48, 558b7d2648aSFabio Estevam .script_addrs = &sdma_script_imx7d, 559b7d2648aSFabio Estevam }; 560b7d2648aSFabio Estevam 561941acd56SAngus Ainslie (Purism) static struct sdma_driver_data sdma_imx8mq = { 562941acd56SAngus Ainslie (Purism) .chnenbl0 = SDMA_CHNENBL0_IMX35, 563941acd56SAngus Ainslie (Purism) .num_events = 48, 564941acd56SAngus Ainslie (Purism) .script_addrs = &sdma_script_imx7d, 565941acd56SAngus Ainslie (Purism) .check_ratio = 1, 566941acd56SAngus Ainslie (Purism) }; 567941acd56SAngus Ainslie (Purism) 568afe7cdedSKrzysztof Kozlowski static const struct platform_device_id sdma_devtypes[] = { 56962550cd7SShawn Guo { 570dcfec3c0SSascha Hauer .name = "imx25-sdma", 571dcfec3c0SSascha Hauer .driver_data = (unsigned long)&sdma_imx25, 572dcfec3c0SSascha Hauer }, { 57362550cd7SShawn Guo .name = "imx31-sdma", 57417bba72fSSascha Hauer .driver_data = (unsigned long)&sdma_imx31, 57562550cd7SShawn Guo }, { 57662550cd7SShawn Guo .name = "imx35-sdma", 57717bba72fSSascha Hauer .driver_data = (unsigned long)&sdma_imx35, 57862550cd7SShawn Guo }, { 579dcfec3c0SSascha Hauer .name = "imx51-sdma", 580dcfec3c0SSascha Hauer .driver_data = (unsigned long)&sdma_imx51, 581dcfec3c0SSascha Hauer }, { 582dcfec3c0SSascha Hauer .name = "imx53-sdma", 583dcfec3c0SSascha Hauer .driver_data = (unsigned long)&sdma_imx53, 584dcfec3c0SSascha Hauer }, { 585dcfec3c0SSascha Hauer .name = "imx6q-sdma", 586dcfec3c0SSascha Hauer .driver_data = (unsigned long)&sdma_imx6q, 587dcfec3c0SSascha Hauer }, { 588b7d2648aSFabio Estevam .name = "imx7d-sdma", 589b7d2648aSFabio Estevam .driver_data = (unsigned long)&sdma_imx7d, 590b7d2648aSFabio Estevam }, { 591941acd56SAngus Ainslie (Purism) .name = "imx8mq-sdma", 592941acd56SAngus Ainslie (Purism) .driver_data = (unsigned long)&sdma_imx8mq, 593941acd56SAngus Ainslie (Purism) }, { 59462550cd7SShawn Guo /* sentinel */ 59562550cd7SShawn Guo } 59662550cd7SShawn Guo }; 59762550cd7SShawn Guo MODULE_DEVICE_TABLE(platform, sdma_devtypes); 59862550cd7SShawn Guo 599580975d7SShawn Guo static const struct of_device_id sdma_dt_ids[] = { 600dcfec3c0SSascha Hauer { .compatible = "fsl,imx6q-sdma", .data = &sdma_imx6q, }, 601dcfec3c0SSascha Hauer { .compatible = "fsl,imx53-sdma", .data = &sdma_imx53, }, 602dcfec3c0SSascha Hauer { .compatible = "fsl,imx51-sdma", .data = &sdma_imx51, }, 60317bba72fSSascha Hauer { .compatible = "fsl,imx35-sdma", .data = &sdma_imx35, }, 604dcfec3c0SSascha Hauer { .compatible = "fsl,imx31-sdma", .data = &sdma_imx31, }, 60563edea16SMarkus Pargmann { .compatible = "fsl,imx25-sdma", .data = &sdma_imx25, }, 606b7d2648aSFabio Estevam { .compatible = "fsl,imx7d-sdma", .data = &sdma_imx7d, }, 607941acd56SAngus Ainslie (Purism) { .compatible = "fsl,imx8mq-sdma", .data = &sdma_imx8mq, }, 608580975d7SShawn Guo { /* sentinel */ } 609580975d7SShawn Guo }; 610580975d7SShawn Guo MODULE_DEVICE_TABLE(of, sdma_dt_ids); 611580975d7SShawn Guo 6120bbc1413SRichard Zhao #define SDMA_H_CONFIG_DSPDMA BIT(12) /* indicates if the DSPDMA is used */ 6130bbc1413SRichard Zhao #define SDMA_H_CONFIG_RTD_PINS BIT(11) /* indicates if Real-Time Debug pins are enabled */ 6140bbc1413SRichard Zhao #define SDMA_H_CONFIG_ACR BIT(4) /* indicates if AHB freq /core freq = 2 or 1 */ 6151ec1e82fSSascha Hauer #define SDMA_H_CONFIG_CSM (3) /* indicates which context switch mode is selected*/ 6161ec1e82fSSascha Hauer 6171ec1e82fSSascha Hauer static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event) 6181ec1e82fSSascha Hauer { 61917bba72fSSascha Hauer u32 chnenbl0 = sdma->drvdata->chnenbl0; 6201ec1e82fSSascha Hauer return chnenbl0 + event * 4; 6211ec1e82fSSascha Hauer } 6221ec1e82fSSascha Hauer 6231ec1e82fSSascha Hauer static int sdma_config_ownership(struct sdma_channel *sdmac, 6241ec1e82fSSascha Hauer bool event_override, bool mcu_override, bool dsp_override) 6251ec1e82fSSascha Hauer { 6261ec1e82fSSascha Hauer struct sdma_engine *sdma = sdmac->sdma; 6271ec1e82fSSascha Hauer int channel = sdmac->channel; 6280bbc1413SRichard Zhao unsigned long evt, mcu, dsp; 6291ec1e82fSSascha Hauer 6301ec1e82fSSascha Hauer if (event_override && mcu_override && dsp_override) 6311ec1e82fSSascha Hauer return -EINVAL; 6321ec1e82fSSascha Hauer 633c4b56857SRichard Zhao evt = readl_relaxed(sdma->regs + SDMA_H_EVTOVR); 634c4b56857SRichard Zhao mcu = readl_relaxed(sdma->regs + SDMA_H_HOSTOVR); 635c4b56857SRichard Zhao dsp = readl_relaxed(sdma->regs + SDMA_H_DSPOVR); 6361ec1e82fSSascha Hauer 6371ec1e82fSSascha Hauer if (dsp_override) 6380bbc1413SRichard Zhao __clear_bit(channel, &dsp); 6391ec1e82fSSascha Hauer else 6400bbc1413SRichard Zhao __set_bit(channel, &dsp); 6411ec1e82fSSascha Hauer 6421ec1e82fSSascha Hauer if (event_override) 6430bbc1413SRichard Zhao __clear_bit(channel, &evt); 6441ec1e82fSSascha Hauer else 6450bbc1413SRichard Zhao __set_bit(channel, &evt); 6461ec1e82fSSascha Hauer 6471ec1e82fSSascha Hauer if (mcu_override) 6480bbc1413SRichard Zhao __clear_bit(channel, &mcu); 6491ec1e82fSSascha Hauer else 6500bbc1413SRichard Zhao __set_bit(channel, &mcu); 6511ec1e82fSSascha Hauer 652c4b56857SRichard Zhao writel_relaxed(evt, sdma->regs + SDMA_H_EVTOVR); 653c4b56857SRichard Zhao writel_relaxed(mcu, sdma->regs + SDMA_H_HOSTOVR); 654c4b56857SRichard Zhao writel_relaxed(dsp, sdma->regs + SDMA_H_DSPOVR); 6551ec1e82fSSascha Hauer 6561ec1e82fSSascha Hauer return 0; 6571ec1e82fSSascha Hauer } 6581ec1e82fSSascha Hauer 659b9a59166SRichard Zhao static void sdma_enable_channel(struct sdma_engine *sdma, int channel) 660b9a59166SRichard Zhao { 6610bbc1413SRichard Zhao writel(BIT(channel), sdma->regs + SDMA_H_START); 662b9a59166SRichard Zhao } 663b9a59166SRichard Zhao 6641ec1e82fSSascha Hauer /* 6652ccaef05SRichard Zhao * sdma_run_channel0 - run a channel and wait till it's done 6661ec1e82fSSascha Hauer */ 6672ccaef05SRichard Zhao static int sdma_run_channel0(struct sdma_engine *sdma) 6681ec1e82fSSascha Hauer { 6691ec1e82fSSascha Hauer int ret; 6701d069bfaSMichael Olbrich u32 reg; 6711ec1e82fSSascha Hauer 6722ccaef05SRichard Zhao sdma_enable_channel(sdma, 0); 6731ec1e82fSSascha Hauer 6741d069bfaSMichael Olbrich ret = readl_relaxed_poll_timeout_atomic(sdma->regs + SDMA_H_STATSTOP, 6751d069bfaSMichael Olbrich reg, !(reg & 1), 1, 500); 6761d069bfaSMichael Olbrich if (ret) 6772ccaef05SRichard Zhao dev_err(sdma->dev, "Timeout waiting for CH0 ready\n"); 6781ec1e82fSSascha Hauer 679855832e4SRobin Gong /* Set bits of CONFIG register with dynamic context switching */ 68025aaa75dSAngus Ainslie (Purism) reg = readl(sdma->regs + SDMA_H_CONFIG); 68125aaa75dSAngus Ainslie (Purism) if ((reg & SDMA_H_CONFIG_CSM) == 0) { 68225aaa75dSAngus Ainslie (Purism) reg |= SDMA_H_CONFIG_CSM; 68325aaa75dSAngus Ainslie (Purism) writel_relaxed(reg, sdma->regs + SDMA_H_CONFIG); 68425aaa75dSAngus Ainslie (Purism) } 685855832e4SRobin Gong 6861d069bfaSMichael Olbrich return ret; 6871ec1e82fSSascha Hauer } 6881ec1e82fSSascha Hauer 6891ec1e82fSSascha Hauer static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size, 6901ec1e82fSSascha Hauer u32 address) 6911ec1e82fSSascha Hauer { 69276c33d27SSascha Hauer struct sdma_buffer_descriptor *bd0 = sdma->bd0; 6931ec1e82fSSascha Hauer void *buf_virt; 6941ec1e82fSSascha Hauer dma_addr_t buf_phys; 6951ec1e82fSSascha Hauer int ret; 6962ccaef05SRichard Zhao unsigned long flags; 69773eab978SSascha Hauer 698ceaf5226SAndy Duan buf_virt = dma_alloc_coherent(sdma->dev, size, &buf_phys, GFP_KERNEL); 69973eab978SSascha Hauer if (!buf_virt) { 7002ccaef05SRichard Zhao return -ENOMEM; 70173eab978SSascha Hauer } 7021ec1e82fSSascha Hauer 7032ccaef05SRichard Zhao spin_lock_irqsave(&sdma->channel_0_lock, flags); 7042ccaef05SRichard Zhao 7051ec1e82fSSascha Hauer bd0->mode.command = C0_SETPM; 7063f93a4f2SRobin Gong bd0->mode.status = BD_DONE | BD_WRAP | BD_EXTD; 7071ec1e82fSSascha Hauer bd0->mode.count = size / 2; 7081ec1e82fSSascha Hauer bd0->buffer_addr = buf_phys; 7091ec1e82fSSascha Hauer bd0->ext_buffer_addr = address; 7101ec1e82fSSascha Hauer 7111ec1e82fSSascha Hauer memcpy(buf_virt, buf, size); 7121ec1e82fSSascha Hauer 7132ccaef05SRichard Zhao ret = sdma_run_channel0(sdma); 7142ccaef05SRichard Zhao 7152ccaef05SRichard Zhao spin_unlock_irqrestore(&sdma->channel_0_lock, flags); 7161ec1e82fSSascha Hauer 717ceaf5226SAndy Duan dma_free_coherent(sdma->dev, size, buf_virt, buf_phys); 7181ec1e82fSSascha Hauer 7191ec1e82fSSascha Hauer return ret; 7201ec1e82fSSascha Hauer } 7211ec1e82fSSascha Hauer 7221ec1e82fSSascha Hauer static void sdma_event_enable(struct sdma_channel *sdmac, unsigned int event) 7231ec1e82fSSascha Hauer { 7241ec1e82fSSascha Hauer struct sdma_engine *sdma = sdmac->sdma; 7251ec1e82fSSascha Hauer int channel = sdmac->channel; 7260bbc1413SRichard Zhao unsigned long val; 7271ec1e82fSSascha Hauer u32 chnenbl = chnenbl_ofs(sdma, event); 7281ec1e82fSSascha Hauer 729c4b56857SRichard Zhao val = readl_relaxed(sdma->regs + chnenbl); 7300bbc1413SRichard Zhao __set_bit(channel, &val); 731c4b56857SRichard Zhao writel_relaxed(val, sdma->regs + chnenbl); 7321ec1e82fSSascha Hauer } 7331ec1e82fSSascha Hauer 7341ec1e82fSSascha Hauer static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event) 7351ec1e82fSSascha Hauer { 7361ec1e82fSSascha Hauer struct sdma_engine *sdma = sdmac->sdma; 7371ec1e82fSSascha Hauer int channel = sdmac->channel; 7381ec1e82fSSascha Hauer u32 chnenbl = chnenbl_ofs(sdma, event); 7390bbc1413SRichard Zhao unsigned long val; 7401ec1e82fSSascha Hauer 741c4b56857SRichard Zhao val = readl_relaxed(sdma->regs + chnenbl); 7420bbc1413SRichard Zhao __clear_bit(channel, &val); 743c4b56857SRichard Zhao writel_relaxed(val, sdma->regs + chnenbl); 7441ec1e82fSSascha Hauer } 7451ec1e82fSSascha Hauer 74657b772b8SRobin Gong static struct sdma_desc *to_sdma_desc(struct dma_async_tx_descriptor *t) 74757b772b8SRobin Gong { 74857b772b8SRobin Gong return container_of(t, struct sdma_desc, vd.tx); 74957b772b8SRobin Gong } 75057b772b8SRobin Gong 75157b772b8SRobin Gong static void sdma_start_desc(struct sdma_channel *sdmac) 75257b772b8SRobin Gong { 75357b772b8SRobin Gong struct virt_dma_desc *vd = vchan_next_desc(&sdmac->vc); 75457b772b8SRobin Gong struct sdma_desc *desc; 75557b772b8SRobin Gong struct sdma_engine *sdma = sdmac->sdma; 75657b772b8SRobin Gong int channel = sdmac->channel; 75757b772b8SRobin Gong 75857b772b8SRobin Gong if (!vd) { 75957b772b8SRobin Gong sdmac->desc = NULL; 76057b772b8SRobin Gong return; 76157b772b8SRobin Gong } 76257b772b8SRobin Gong sdmac->desc = desc = to_sdma_desc(&vd->tx); 76357b772b8SRobin Gong /* 76457b772b8SRobin Gong * Do not delete the node in desc_issued list in cyclic mode, otherwise 765680302c4SVinod Koul * the desc allocated will never be freed in vchan_dma_desc_free_list 76657b772b8SRobin Gong */ 76757b772b8SRobin Gong if (!(sdmac->flags & IMX_DMA_SG_LOOP)) 76857b772b8SRobin Gong list_del(&vd->node); 76957b772b8SRobin Gong 77057b772b8SRobin Gong sdma->channel_control[channel].base_bd_ptr = desc->bd_phys; 77157b772b8SRobin Gong sdma->channel_control[channel].current_bd_ptr = desc->bd_phys; 77257b772b8SRobin Gong sdma_enable_channel(sdma, sdmac->channel); 77357b772b8SRobin Gong } 77457b772b8SRobin Gong 775d1a792f3SRussell King - ARM Linux static void sdma_update_channel_loop(struct sdma_channel *sdmac) 776d1a792f3SRussell King - ARM Linux { 7771ec1e82fSSascha Hauer struct sdma_buffer_descriptor *bd; 7785881826dSNandor Han int error = 0; 7795881826dSNandor Han enum dma_status old_status = sdmac->status; 7801ec1e82fSSascha Hauer 7811ec1e82fSSascha Hauer /* 7821ec1e82fSSascha Hauer * loop mode. Iterate over descriptors, re-setup them and 7831ec1e82fSSascha Hauer * call callback function. 7841ec1e82fSSascha Hauer */ 78557b772b8SRobin Gong while (sdmac->desc) { 78676c33d27SSascha Hauer struct sdma_desc *desc = sdmac->desc; 78776c33d27SSascha Hauer 78876c33d27SSascha Hauer bd = &desc->bd[desc->buf_tail]; 7891ec1e82fSSascha Hauer 7901ec1e82fSSascha Hauer if (bd->mode.status & BD_DONE) 7911ec1e82fSSascha Hauer break; 7921ec1e82fSSascha Hauer 7935881826dSNandor Han if (bd->mode.status & BD_RROR) { 7945881826dSNandor Han bd->mode.status &= ~BD_RROR; 7951ec1e82fSSascha Hauer sdmac->status = DMA_ERROR; 7965881826dSNandor Han error = -EIO; 7975881826dSNandor Han } 7981ec1e82fSSascha Hauer 7995881826dSNandor Han /* 8005881826dSNandor Han * We use bd->mode.count to calculate the residue, since contains 8015881826dSNandor Han * the number of bytes present in the current buffer descriptor. 8025881826dSNandor Han */ 8035881826dSNandor Han 80476c33d27SSascha Hauer desc->chn_real_count = bd->mode.count; 8051ec1e82fSSascha Hauer bd->mode.status |= BD_DONE; 80676c33d27SSascha Hauer bd->mode.count = desc->period_len; 80776c33d27SSascha Hauer desc->buf_ptail = desc->buf_tail; 80876c33d27SSascha Hauer desc->buf_tail = (desc->buf_tail + 1) % desc->num_bd; 80915f30f51SNandor Han 81015f30f51SNandor Han /* 81115f30f51SNandor Han * The callback is called from the interrupt context in order 81215f30f51SNandor Han * to reduce latency and to avoid the risk of altering the 81315f30f51SNandor Han * SDMA transaction status by the time the client tasklet is 81415f30f51SNandor Han * executed. 81515f30f51SNandor Han */ 81657b772b8SRobin Gong spin_unlock(&sdmac->vc.lock); 81757b772b8SRobin Gong dmaengine_desc_get_callback_invoke(&desc->vd.tx, NULL); 81857b772b8SRobin Gong spin_lock(&sdmac->vc.lock); 81915f30f51SNandor Han 8205881826dSNandor Han if (error) 8215881826dSNandor Han sdmac->status = old_status; 8221ec1e82fSSascha Hauer } 8231ec1e82fSSascha Hauer } 8241ec1e82fSSascha Hauer 82557b772b8SRobin Gong static void mxc_sdma_handle_channel_normal(struct sdma_channel *data) 8261ec1e82fSSascha Hauer { 82715f30f51SNandor Han struct sdma_channel *sdmac = (struct sdma_channel *) data; 8281ec1e82fSSascha Hauer struct sdma_buffer_descriptor *bd; 8291ec1e82fSSascha Hauer int i, error = 0; 8301ec1e82fSSascha Hauer 83176c33d27SSascha Hauer sdmac->desc->chn_real_count = 0; 8321ec1e82fSSascha Hauer /* 8331ec1e82fSSascha Hauer * non loop mode. Iterate over all descriptors, collect 8341ec1e82fSSascha Hauer * errors and call callback function 8351ec1e82fSSascha Hauer */ 83676c33d27SSascha Hauer for (i = 0; i < sdmac->desc->num_bd; i++) { 83776c33d27SSascha Hauer bd = &sdmac->desc->bd[i]; 8381ec1e82fSSascha Hauer 8391ec1e82fSSascha Hauer if (bd->mode.status & (BD_DONE | BD_RROR)) 8401ec1e82fSSascha Hauer error = -EIO; 84176c33d27SSascha Hauer sdmac->desc->chn_real_count += bd->mode.count; 8421ec1e82fSSascha Hauer } 8431ec1e82fSSascha Hauer 8441ec1e82fSSascha Hauer if (error) 8451ec1e82fSSascha Hauer sdmac->status = DMA_ERROR; 8461ec1e82fSSascha Hauer else 847409bff6aSVinod Koul sdmac->status = DMA_COMPLETE; 8481ec1e82fSSascha Hauer } 8491ec1e82fSSascha Hauer 8501ec1e82fSSascha Hauer static irqreturn_t sdma_int_handler(int irq, void *dev_id) 8511ec1e82fSSascha Hauer { 8521ec1e82fSSascha Hauer struct sdma_engine *sdma = dev_id; 8530bbc1413SRichard Zhao unsigned long stat; 8541ec1e82fSSascha Hauer 855c4b56857SRichard Zhao stat = readl_relaxed(sdma->regs + SDMA_H_INTR); 856c4b56857SRichard Zhao writel_relaxed(stat, sdma->regs + SDMA_H_INTR); 8571d069bfaSMichael Olbrich /* channel 0 is special and not handled here, see run_channel0() */ 8581d069bfaSMichael Olbrich stat &= ~1; 8591ec1e82fSSascha Hauer 8601ec1e82fSSascha Hauer while (stat) { 8611ec1e82fSSascha Hauer int channel = fls(stat) - 1; 8621ec1e82fSSascha Hauer struct sdma_channel *sdmac = &sdma->channel[channel]; 86357b772b8SRobin Gong struct sdma_desc *desc; 8641ec1e82fSSascha Hauer 86557b772b8SRobin Gong spin_lock(&sdmac->vc.lock); 86657b772b8SRobin Gong desc = sdmac->desc; 86757b772b8SRobin Gong if (desc) { 86857b772b8SRobin Gong if (sdmac->flags & IMX_DMA_SG_LOOP) { 869d1a792f3SRussell King - ARM Linux sdma_update_channel_loop(sdmac); 87057b772b8SRobin Gong } else { 87157b772b8SRobin Gong mxc_sdma_handle_channel_normal(sdmac); 87257b772b8SRobin Gong vchan_cookie_complete(&desc->vd); 87357b772b8SRobin Gong sdma_start_desc(sdmac); 87457b772b8SRobin Gong } 87557b772b8SRobin Gong } 8761ec1e82fSSascha Hauer 87757b772b8SRobin Gong spin_unlock(&sdmac->vc.lock); 8780bbc1413SRichard Zhao __clear_bit(channel, &stat); 8791ec1e82fSSascha Hauer } 8801ec1e82fSSascha Hauer 8811ec1e82fSSascha Hauer return IRQ_HANDLED; 8821ec1e82fSSascha Hauer } 8831ec1e82fSSascha Hauer 8841ec1e82fSSascha Hauer /* 8851ec1e82fSSascha Hauer * sets the pc of SDMA script according to the peripheral type 8861ec1e82fSSascha Hauer */ 8871ec1e82fSSascha Hauer static void sdma_get_pc(struct sdma_channel *sdmac, 8881ec1e82fSSascha Hauer enum sdma_peripheral_type peripheral_type) 8891ec1e82fSSascha Hauer { 8901ec1e82fSSascha Hauer struct sdma_engine *sdma = sdmac->sdma; 8911ec1e82fSSascha Hauer int per_2_emi = 0, emi_2_per = 0; 8921ec1e82fSSascha Hauer /* 8931ec1e82fSSascha Hauer * These are needed once we start to support transfers between 8941ec1e82fSSascha Hauer * two peripherals or memory-to-memory transfers 8951ec1e82fSSascha Hauer */ 8960f06c027SRobin Gong int per_2_per = 0, emi_2_emi = 0; 8971ec1e82fSSascha Hauer 8981ec1e82fSSascha Hauer sdmac->pc_from_device = 0; 8991ec1e82fSSascha Hauer sdmac->pc_to_device = 0; 9008391ecf4SShengjiu Wang sdmac->device_to_device = 0; 9010f06c027SRobin Gong sdmac->pc_to_pc = 0; 9021ec1e82fSSascha Hauer 9031ec1e82fSSascha Hauer switch (peripheral_type) { 9041ec1e82fSSascha Hauer case IMX_DMATYPE_MEMORY: 9050f06c027SRobin Gong emi_2_emi = sdma->script_addrs->ap_2_ap_addr; 9061ec1e82fSSascha Hauer break; 9071ec1e82fSSascha Hauer case IMX_DMATYPE_DSP: 9081ec1e82fSSascha Hauer emi_2_per = sdma->script_addrs->bp_2_ap_addr; 9091ec1e82fSSascha Hauer per_2_emi = sdma->script_addrs->ap_2_bp_addr; 9101ec1e82fSSascha Hauer break; 9111ec1e82fSSascha Hauer case IMX_DMATYPE_FIRI: 9121ec1e82fSSascha Hauer per_2_emi = sdma->script_addrs->firi_2_mcu_addr; 9131ec1e82fSSascha Hauer emi_2_per = sdma->script_addrs->mcu_2_firi_addr; 9141ec1e82fSSascha Hauer break; 9151ec1e82fSSascha Hauer case IMX_DMATYPE_UART: 9161ec1e82fSSascha Hauer per_2_emi = sdma->script_addrs->uart_2_mcu_addr; 9171ec1e82fSSascha Hauer emi_2_per = sdma->script_addrs->mcu_2_app_addr; 9181ec1e82fSSascha Hauer break; 9191ec1e82fSSascha Hauer case IMX_DMATYPE_UART_SP: 9201ec1e82fSSascha Hauer per_2_emi = sdma->script_addrs->uartsh_2_mcu_addr; 9211ec1e82fSSascha Hauer emi_2_per = sdma->script_addrs->mcu_2_shp_addr; 9221ec1e82fSSascha Hauer break; 9231ec1e82fSSascha Hauer case IMX_DMATYPE_ATA: 9241ec1e82fSSascha Hauer per_2_emi = sdma->script_addrs->ata_2_mcu_addr; 9251ec1e82fSSascha Hauer emi_2_per = sdma->script_addrs->mcu_2_ata_addr; 9261ec1e82fSSascha Hauer break; 9271ec1e82fSSascha Hauer case IMX_DMATYPE_CSPI: 9281ec1e82fSSascha Hauer case IMX_DMATYPE_EXT: 9291ec1e82fSSascha Hauer case IMX_DMATYPE_SSI: 93029aebfdeSNicolin Chen case IMX_DMATYPE_SAI: 9311ec1e82fSSascha Hauer per_2_emi = sdma->script_addrs->app_2_mcu_addr; 9321ec1e82fSSascha Hauer emi_2_per = sdma->script_addrs->mcu_2_app_addr; 9331ec1e82fSSascha Hauer break; 9341a895578SNicolin Chen case IMX_DMATYPE_SSI_DUAL: 9351a895578SNicolin Chen per_2_emi = sdma->script_addrs->ssish_2_mcu_addr; 9361a895578SNicolin Chen emi_2_per = sdma->script_addrs->mcu_2_ssish_addr; 9371a895578SNicolin Chen break; 9381ec1e82fSSascha Hauer case IMX_DMATYPE_SSI_SP: 9391ec1e82fSSascha Hauer case IMX_DMATYPE_MMC: 9401ec1e82fSSascha Hauer case IMX_DMATYPE_SDHC: 9411ec1e82fSSascha Hauer case IMX_DMATYPE_CSPI_SP: 9421ec1e82fSSascha Hauer case IMX_DMATYPE_ESAI: 9431ec1e82fSSascha Hauer case IMX_DMATYPE_MSHC_SP: 9441ec1e82fSSascha Hauer per_2_emi = sdma->script_addrs->shp_2_mcu_addr; 9451ec1e82fSSascha Hauer emi_2_per = sdma->script_addrs->mcu_2_shp_addr; 9461ec1e82fSSascha Hauer break; 9471ec1e82fSSascha Hauer case IMX_DMATYPE_ASRC: 9481ec1e82fSSascha Hauer per_2_emi = sdma->script_addrs->asrc_2_mcu_addr; 9491ec1e82fSSascha Hauer emi_2_per = sdma->script_addrs->asrc_2_mcu_addr; 9501ec1e82fSSascha Hauer per_2_per = sdma->script_addrs->per_2_per_addr; 9511ec1e82fSSascha Hauer break; 952f892afb0SNicolin Chen case IMX_DMATYPE_ASRC_SP: 953f892afb0SNicolin Chen per_2_emi = sdma->script_addrs->shp_2_mcu_addr; 954f892afb0SNicolin Chen emi_2_per = sdma->script_addrs->mcu_2_shp_addr; 955f892afb0SNicolin Chen per_2_per = sdma->script_addrs->per_2_per_addr; 956f892afb0SNicolin Chen break; 9571ec1e82fSSascha Hauer case IMX_DMATYPE_MSHC: 9581ec1e82fSSascha Hauer per_2_emi = sdma->script_addrs->mshc_2_mcu_addr; 9591ec1e82fSSascha Hauer emi_2_per = sdma->script_addrs->mcu_2_mshc_addr; 9601ec1e82fSSascha Hauer break; 9611ec1e82fSSascha Hauer case IMX_DMATYPE_CCM: 9621ec1e82fSSascha Hauer per_2_emi = sdma->script_addrs->dptc_dvfs_addr; 9631ec1e82fSSascha Hauer break; 9641ec1e82fSSascha Hauer case IMX_DMATYPE_SPDIF: 9651ec1e82fSSascha Hauer per_2_emi = sdma->script_addrs->spdif_2_mcu_addr; 9661ec1e82fSSascha Hauer emi_2_per = sdma->script_addrs->mcu_2_spdif_addr; 9671ec1e82fSSascha Hauer break; 9681ec1e82fSSascha Hauer case IMX_DMATYPE_IPU_MEMORY: 9691ec1e82fSSascha Hauer emi_2_per = sdma->script_addrs->ext_mem_2_ipu_addr; 9701ec1e82fSSascha Hauer break; 9711ec1e82fSSascha Hauer default: 9721ec1e82fSSascha Hauer break; 9731ec1e82fSSascha Hauer } 9741ec1e82fSSascha Hauer 9751ec1e82fSSascha Hauer sdmac->pc_from_device = per_2_emi; 9761ec1e82fSSascha Hauer sdmac->pc_to_device = emi_2_per; 9778391ecf4SShengjiu Wang sdmac->device_to_device = per_2_per; 9780f06c027SRobin Gong sdmac->pc_to_pc = emi_2_emi; 9791ec1e82fSSascha Hauer } 9801ec1e82fSSascha Hauer 9811ec1e82fSSascha Hauer static int sdma_load_context(struct sdma_channel *sdmac) 9821ec1e82fSSascha Hauer { 9831ec1e82fSSascha Hauer struct sdma_engine *sdma = sdmac->sdma; 9841ec1e82fSSascha Hauer int channel = sdmac->channel; 9851ec1e82fSSascha Hauer int load_address; 9861ec1e82fSSascha Hauer struct sdma_context_data *context = sdma->context; 98776c33d27SSascha Hauer struct sdma_buffer_descriptor *bd0 = sdma->bd0; 9881ec1e82fSSascha Hauer int ret; 9892ccaef05SRichard Zhao unsigned long flags; 9901ec1e82fSSascha Hauer 991ad0d92d7SRobin Gong if (sdmac->context_loaded) 992ad0d92d7SRobin Gong return 0; 993ad0d92d7SRobin Gong 9948391ecf4SShengjiu Wang if (sdmac->direction == DMA_DEV_TO_MEM) 9951ec1e82fSSascha Hauer load_address = sdmac->pc_from_device; 9968391ecf4SShengjiu Wang else if (sdmac->direction == DMA_DEV_TO_DEV) 9978391ecf4SShengjiu Wang load_address = sdmac->device_to_device; 9980f06c027SRobin Gong else if (sdmac->direction == DMA_MEM_TO_MEM) 9990f06c027SRobin Gong load_address = sdmac->pc_to_pc; 10008391ecf4SShengjiu Wang else 10011ec1e82fSSascha Hauer load_address = sdmac->pc_to_device; 10021ec1e82fSSascha Hauer 10031ec1e82fSSascha Hauer if (load_address < 0) 10041ec1e82fSSascha Hauer return load_address; 10051ec1e82fSSascha Hauer 10061ec1e82fSSascha Hauer dev_dbg(sdma->dev, "load_address = %d\n", load_address); 10070bbc1413SRichard Zhao dev_dbg(sdma->dev, "wml = 0x%08x\n", (u32)sdmac->watermark_level); 10081ec1e82fSSascha Hauer dev_dbg(sdma->dev, "shp_addr = 0x%08x\n", sdmac->shp_addr); 10091ec1e82fSSascha Hauer dev_dbg(sdma->dev, "per_addr = 0x%08x\n", sdmac->per_addr); 10100bbc1413SRichard Zhao dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", (u32)sdmac->event_mask[0]); 10110bbc1413SRichard Zhao dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", (u32)sdmac->event_mask[1]); 10121ec1e82fSSascha Hauer 10132ccaef05SRichard Zhao spin_lock_irqsave(&sdma->channel_0_lock, flags); 101473eab978SSascha Hauer 10151ec1e82fSSascha Hauer memset(context, 0, sizeof(*context)); 10161ec1e82fSSascha Hauer context->channel_state.pc = load_address; 10171ec1e82fSSascha Hauer 10181ec1e82fSSascha Hauer /* Send by context the event mask,base address for peripheral 10191ec1e82fSSascha Hauer * and watermark level 10201ec1e82fSSascha Hauer */ 10210bbc1413SRichard Zhao context->gReg[0] = sdmac->event_mask[1]; 10220bbc1413SRichard Zhao context->gReg[1] = sdmac->event_mask[0]; 10231ec1e82fSSascha Hauer context->gReg[2] = sdmac->per_addr; 10241ec1e82fSSascha Hauer context->gReg[6] = sdmac->shp_addr; 10251ec1e82fSSascha Hauer context->gReg[7] = sdmac->watermark_level; 10261ec1e82fSSascha Hauer 10271ec1e82fSSascha Hauer bd0->mode.command = C0_SETDM; 10283f93a4f2SRobin Gong bd0->mode.status = BD_DONE | BD_WRAP | BD_EXTD; 10291ec1e82fSSascha Hauer bd0->mode.count = sizeof(*context) / 4; 10301ec1e82fSSascha Hauer bd0->buffer_addr = sdma->context_phys; 10311ec1e82fSSascha Hauer bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel; 10322ccaef05SRichard Zhao ret = sdma_run_channel0(sdma); 10331ec1e82fSSascha Hauer 10342ccaef05SRichard Zhao spin_unlock_irqrestore(&sdma->channel_0_lock, flags); 103573eab978SSascha Hauer 1036ad0d92d7SRobin Gong sdmac->context_loaded = true; 1037ad0d92d7SRobin Gong 10381ec1e82fSSascha Hauer return ret; 10391ec1e82fSSascha Hauer } 10401ec1e82fSSascha Hauer 10417b350ab0SMaxime Ripard static struct sdma_channel *to_sdma_chan(struct dma_chan *chan) 10421ec1e82fSSascha Hauer { 104357b772b8SRobin Gong return container_of(chan, struct sdma_channel, vc.chan); 10447b350ab0SMaxime Ripard } 10457b350ab0SMaxime Ripard 10467b350ab0SMaxime Ripard static int sdma_disable_channel(struct dma_chan *chan) 10477b350ab0SMaxime Ripard { 10487b350ab0SMaxime Ripard struct sdma_channel *sdmac = to_sdma_chan(chan); 10491ec1e82fSSascha Hauer struct sdma_engine *sdma = sdmac->sdma; 10501ec1e82fSSascha Hauer int channel = sdmac->channel; 10511ec1e82fSSascha Hauer 10520bbc1413SRichard Zhao writel_relaxed(BIT(channel), sdma->regs + SDMA_H_STATSTOP); 10531ec1e82fSSascha Hauer sdmac->status = DMA_ERROR; 10547b350ab0SMaxime Ripard 10557b350ab0SMaxime Ripard return 0; 10561ec1e82fSSascha Hauer } 1057b8603d2aSLucas Stach static void sdma_channel_terminate_work(struct work_struct *work) 10587f3ff14bSJiada Wang { 1059b8603d2aSLucas Stach struct sdma_channel *sdmac = container_of(work, struct sdma_channel, 1060b8603d2aSLucas Stach terminate_worker); 106157b772b8SRobin Gong unsigned long flags; 106257b772b8SRobin Gong LIST_HEAD(head); 106357b772b8SRobin Gong 10647f3ff14bSJiada Wang /* 10657f3ff14bSJiada Wang * According to NXP R&D team a delay of one BD SDMA cost time 10667f3ff14bSJiada Wang * (maximum is 1ms) should be added after disable of the channel 10677f3ff14bSJiada Wang * bit, to ensure SDMA core has really been stopped after SDMA 10687f3ff14bSJiada Wang * clients call .device_terminate_all. 10697f3ff14bSJiada Wang */ 1070b8603d2aSLucas Stach usleep_range(1000, 2000); 1071b8603d2aSLucas Stach 1072b8603d2aSLucas Stach spin_lock_irqsave(&sdmac->vc.lock, flags); 1073b8603d2aSLucas Stach vchan_get_all_descriptors(&sdmac->vc, &head); 1074b8603d2aSLucas Stach sdmac->desc = NULL; 1075b8603d2aSLucas Stach spin_unlock_irqrestore(&sdmac->vc.lock, flags); 1076b8603d2aSLucas Stach vchan_dma_desc_free_list(&sdmac->vc, &head); 1077ad0d92d7SRobin Gong sdmac->context_loaded = false; 1078b8603d2aSLucas Stach } 1079b8603d2aSLucas Stach 1080*a80f2787SSascha Hauer static int sdma_terminate_all(struct dma_chan *chan) 1081b8603d2aSLucas Stach { 1082b8603d2aSLucas Stach struct sdma_channel *sdmac = to_sdma_chan(chan); 1083b8603d2aSLucas Stach 1084b8603d2aSLucas Stach sdma_disable_channel(chan); 1085b8603d2aSLucas Stach 1086b8603d2aSLucas Stach if (sdmac->desc) 1087b8603d2aSLucas Stach schedule_work(&sdmac->terminate_worker); 10887f3ff14bSJiada Wang 10897f3ff14bSJiada Wang return 0; 10907f3ff14bSJiada Wang } 10917f3ff14bSJiada Wang 1092b8603d2aSLucas Stach static void sdma_channel_synchronize(struct dma_chan *chan) 1093b8603d2aSLucas Stach { 1094b8603d2aSLucas Stach struct sdma_channel *sdmac = to_sdma_chan(chan); 1095b8603d2aSLucas Stach 1096b8603d2aSLucas Stach vchan_synchronize(&sdmac->vc); 1097b8603d2aSLucas Stach 1098b8603d2aSLucas Stach flush_work(&sdmac->terminate_worker); 1099b8603d2aSLucas Stach } 1100b8603d2aSLucas Stach 11018391ecf4SShengjiu Wang static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac) 11028391ecf4SShengjiu Wang { 11038391ecf4SShengjiu Wang struct sdma_engine *sdma = sdmac->sdma; 11048391ecf4SShengjiu Wang 11058391ecf4SShengjiu Wang int lwml = sdmac->watermark_level & SDMA_WATERMARK_LEVEL_LWML; 11068391ecf4SShengjiu Wang int hwml = (sdmac->watermark_level & SDMA_WATERMARK_LEVEL_HWML) >> 16; 11078391ecf4SShengjiu Wang 11088391ecf4SShengjiu Wang set_bit(sdmac->event_id0 % 32, &sdmac->event_mask[1]); 11098391ecf4SShengjiu Wang set_bit(sdmac->event_id1 % 32, &sdmac->event_mask[0]); 11108391ecf4SShengjiu Wang 11118391ecf4SShengjiu Wang if (sdmac->event_id0 > 31) 11128391ecf4SShengjiu Wang sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_LWE; 11138391ecf4SShengjiu Wang 11148391ecf4SShengjiu Wang if (sdmac->event_id1 > 31) 11158391ecf4SShengjiu Wang sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_HWE; 11168391ecf4SShengjiu Wang 11178391ecf4SShengjiu Wang /* 11188391ecf4SShengjiu Wang * If LWML(src_maxburst) > HWML(dst_maxburst), we need 11198391ecf4SShengjiu Wang * swap LWML and HWML of INFO(A.3.2.5.1), also need swap 11208391ecf4SShengjiu Wang * r0(event_mask[1]) and r1(event_mask[0]). 11218391ecf4SShengjiu Wang */ 11228391ecf4SShengjiu Wang if (lwml > hwml) { 11238391ecf4SShengjiu Wang sdmac->watermark_level &= ~(SDMA_WATERMARK_LEVEL_LWML | 11248391ecf4SShengjiu Wang SDMA_WATERMARK_LEVEL_HWML); 11258391ecf4SShengjiu Wang sdmac->watermark_level |= hwml; 11268391ecf4SShengjiu Wang sdmac->watermark_level |= lwml << 16; 11278391ecf4SShengjiu Wang swap(sdmac->event_mask[0], sdmac->event_mask[1]); 11288391ecf4SShengjiu Wang } 11298391ecf4SShengjiu Wang 11308391ecf4SShengjiu Wang if (sdmac->per_address2 >= sdma->spba_start_addr && 11318391ecf4SShengjiu Wang sdmac->per_address2 <= sdma->spba_end_addr) 11328391ecf4SShengjiu Wang sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_SP; 11338391ecf4SShengjiu Wang 11348391ecf4SShengjiu Wang if (sdmac->per_address >= sdma->spba_start_addr && 11358391ecf4SShengjiu Wang sdmac->per_address <= sdma->spba_end_addr) 11368391ecf4SShengjiu Wang sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_DP; 11378391ecf4SShengjiu Wang 11388391ecf4SShengjiu Wang sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_CONT; 11398391ecf4SShengjiu Wang } 11408391ecf4SShengjiu Wang 11417b350ab0SMaxime Ripard static int sdma_config_channel(struct dma_chan *chan) 11421ec1e82fSSascha Hauer { 11437b350ab0SMaxime Ripard struct sdma_channel *sdmac = to_sdma_chan(chan); 11441ec1e82fSSascha Hauer int ret; 11451ec1e82fSSascha Hauer 11467b350ab0SMaxime Ripard sdma_disable_channel(chan); 11471ec1e82fSSascha Hauer 11480bbc1413SRichard Zhao sdmac->event_mask[0] = 0; 11490bbc1413SRichard Zhao sdmac->event_mask[1] = 0; 11501ec1e82fSSascha Hauer sdmac->shp_addr = 0; 11511ec1e82fSSascha Hauer sdmac->per_addr = 0; 11521ec1e82fSSascha Hauer 11531ec1e82fSSascha Hauer switch (sdmac->peripheral_type) { 11541ec1e82fSSascha Hauer case IMX_DMATYPE_DSP: 11551ec1e82fSSascha Hauer sdma_config_ownership(sdmac, false, true, true); 11561ec1e82fSSascha Hauer break; 11571ec1e82fSSascha Hauer case IMX_DMATYPE_MEMORY: 11581ec1e82fSSascha Hauer sdma_config_ownership(sdmac, false, true, false); 11591ec1e82fSSascha Hauer break; 11601ec1e82fSSascha Hauer default: 11611ec1e82fSSascha Hauer sdma_config_ownership(sdmac, true, true, false); 11621ec1e82fSSascha Hauer break; 11631ec1e82fSSascha Hauer } 11641ec1e82fSSascha Hauer 11651ec1e82fSSascha Hauer sdma_get_pc(sdmac, sdmac->peripheral_type); 11661ec1e82fSSascha Hauer 11671ec1e82fSSascha Hauer if ((sdmac->peripheral_type != IMX_DMATYPE_MEMORY) && 11681ec1e82fSSascha Hauer (sdmac->peripheral_type != IMX_DMATYPE_DSP)) { 11691ec1e82fSSascha Hauer /* Handle multiple event channels differently */ 11701ec1e82fSSascha Hauer if (sdmac->event_id1) { 11718391ecf4SShengjiu Wang if (sdmac->peripheral_type == IMX_DMATYPE_ASRC_SP || 11728391ecf4SShengjiu Wang sdmac->peripheral_type == IMX_DMATYPE_ASRC) 11738391ecf4SShengjiu Wang sdma_set_watermarklevel_for_p2p(sdmac); 11748391ecf4SShengjiu Wang } else 11750bbc1413SRichard Zhao __set_bit(sdmac->event_id0, sdmac->event_mask); 11768391ecf4SShengjiu Wang 11771ec1e82fSSascha Hauer /* Address */ 11781ec1e82fSSascha Hauer sdmac->shp_addr = sdmac->per_address; 11798391ecf4SShengjiu Wang sdmac->per_addr = sdmac->per_address2; 11801ec1e82fSSascha Hauer } else { 11811ec1e82fSSascha Hauer sdmac->watermark_level = 0; /* FIXME: M3_BASE_ADDRESS */ 11821ec1e82fSSascha Hauer } 11831ec1e82fSSascha Hauer 11841ec1e82fSSascha Hauer ret = sdma_load_context(sdmac); 11851ec1e82fSSascha Hauer 11861ec1e82fSSascha Hauer return ret; 11871ec1e82fSSascha Hauer } 11881ec1e82fSSascha Hauer 11891ec1e82fSSascha Hauer static int sdma_set_channel_priority(struct sdma_channel *sdmac, 11901ec1e82fSSascha Hauer unsigned int priority) 11911ec1e82fSSascha Hauer { 11921ec1e82fSSascha Hauer struct sdma_engine *sdma = sdmac->sdma; 11931ec1e82fSSascha Hauer int channel = sdmac->channel; 11941ec1e82fSSascha Hauer 11951ec1e82fSSascha Hauer if (priority < MXC_SDMA_MIN_PRIORITY 11961ec1e82fSSascha Hauer || priority > MXC_SDMA_MAX_PRIORITY) { 11971ec1e82fSSascha Hauer return -EINVAL; 11981ec1e82fSSascha Hauer } 11991ec1e82fSSascha Hauer 1200c4b56857SRichard Zhao writel_relaxed(priority, sdma->regs + SDMA_CHNPRI_0 + 4 * channel); 12011ec1e82fSSascha Hauer 12021ec1e82fSSascha Hauer return 0; 12031ec1e82fSSascha Hauer } 12041ec1e82fSSascha Hauer 120557b772b8SRobin Gong static int sdma_request_channel0(struct sdma_engine *sdma) 12061ec1e82fSSascha Hauer { 12071ec1e82fSSascha Hauer int ret = -EBUSY; 12081ec1e82fSSascha Hauer 120931ef489aSLinus Torvalds sdma->bd0 = dma_alloc_coherent(sdma->dev, PAGE_SIZE, &sdma->bd0_phys, 121057b772b8SRobin Gong GFP_NOWAIT); 121157b772b8SRobin Gong if (!sdma->bd0) { 12121ec1e82fSSascha Hauer ret = -ENOMEM; 12131ec1e82fSSascha Hauer goto out; 12141ec1e82fSSascha Hauer } 12151ec1e82fSSascha Hauer 121657b772b8SRobin Gong sdma->channel_control[0].base_bd_ptr = sdma->bd0_phys; 121757b772b8SRobin Gong sdma->channel_control[0].current_bd_ptr = sdma->bd0_phys; 12181ec1e82fSSascha Hauer 121957b772b8SRobin Gong sdma_set_channel_priority(&sdma->channel[0], MXC_SDMA_DEFAULT_PRIORITY); 12201ec1e82fSSascha Hauer return 0; 12211ec1e82fSSascha Hauer out: 12221ec1e82fSSascha Hauer 12231ec1e82fSSascha Hauer return ret; 12241ec1e82fSSascha Hauer } 12251ec1e82fSSascha Hauer 122657b772b8SRobin Gong 122757b772b8SRobin Gong static int sdma_alloc_bd(struct sdma_desc *desc) 12281ec1e82fSSascha Hauer { 1229ebb853b1SLucas Stach u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor); 123057b772b8SRobin Gong int ret = 0; 12311ec1e82fSSascha Hauer 123231ef489aSLinus Torvalds desc->bd = dma_alloc_coherent(desc->sdmac->sdma->dev, bd_size, 1233ceaf5226SAndy Duan &desc->bd_phys, GFP_NOWAIT); 123457b772b8SRobin Gong if (!desc->bd) { 123557b772b8SRobin Gong ret = -ENOMEM; 123657b772b8SRobin Gong goto out; 123757b772b8SRobin Gong } 123857b772b8SRobin Gong out: 123957b772b8SRobin Gong return ret; 124057b772b8SRobin Gong } 12411ec1e82fSSascha Hauer 124257b772b8SRobin Gong static void sdma_free_bd(struct sdma_desc *desc) 124357b772b8SRobin Gong { 1244ebb853b1SLucas Stach u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor); 1245ebb853b1SLucas Stach 1246ceaf5226SAndy Duan dma_free_coherent(desc->sdmac->sdma->dev, bd_size, desc->bd, 1247ceaf5226SAndy Duan desc->bd_phys); 124857b772b8SRobin Gong } 12491ec1e82fSSascha Hauer 125057b772b8SRobin Gong static void sdma_desc_free(struct virt_dma_desc *vd) 125157b772b8SRobin Gong { 125257b772b8SRobin Gong struct sdma_desc *desc = container_of(vd, struct sdma_desc, vd); 125357b772b8SRobin Gong 125457b772b8SRobin Gong sdma_free_bd(desc); 125557b772b8SRobin Gong kfree(desc); 12561ec1e82fSSascha Hauer } 12571ec1e82fSSascha Hauer 12581ec1e82fSSascha Hauer static int sdma_alloc_chan_resources(struct dma_chan *chan) 12591ec1e82fSSascha Hauer { 12601ec1e82fSSascha Hauer struct sdma_channel *sdmac = to_sdma_chan(chan); 12611ec1e82fSSascha Hauer struct imx_dma_data *data = chan->private; 12620f06c027SRobin Gong struct imx_dma_data mem_data; 12631ec1e82fSSascha Hauer int prio, ret; 12641ec1e82fSSascha Hauer 12650f06c027SRobin Gong /* 12660f06c027SRobin Gong * MEMCPY may never setup chan->private by filter function such as 12670f06c027SRobin Gong * dmatest, thus create 'struct imx_dma_data mem_data' for this case. 12680f06c027SRobin Gong * Please note in any other slave case, you have to setup chan->private 12690f06c027SRobin Gong * with 'struct imx_dma_data' in your own filter function if you want to 12700f06c027SRobin Gong * request dma channel by dma_request_channel() rather than 12710f06c027SRobin Gong * dma_request_slave_channel(). Othwise, 'MEMCPY in case?' will appear 12720f06c027SRobin Gong * to warn you to correct your filter function. 12730f06c027SRobin Gong */ 12740f06c027SRobin Gong if (!data) { 12750f06c027SRobin Gong dev_dbg(sdmac->sdma->dev, "MEMCPY in case?\n"); 12760f06c027SRobin Gong mem_data.priority = 2; 12770f06c027SRobin Gong mem_data.peripheral_type = IMX_DMATYPE_MEMORY; 12780f06c027SRobin Gong mem_data.dma_request = 0; 12790f06c027SRobin Gong mem_data.dma_request2 = 0; 12800f06c027SRobin Gong data = &mem_data; 12810f06c027SRobin Gong 12820f06c027SRobin Gong sdma_get_pc(sdmac, IMX_DMATYPE_MEMORY); 12830f06c027SRobin Gong } 12841ec1e82fSSascha Hauer 12851ec1e82fSSascha Hauer switch (data->priority) { 12861ec1e82fSSascha Hauer case DMA_PRIO_HIGH: 12871ec1e82fSSascha Hauer prio = 3; 12881ec1e82fSSascha Hauer break; 12891ec1e82fSSascha Hauer case DMA_PRIO_MEDIUM: 12901ec1e82fSSascha Hauer prio = 2; 12911ec1e82fSSascha Hauer break; 12921ec1e82fSSascha Hauer case DMA_PRIO_LOW: 12931ec1e82fSSascha Hauer default: 12941ec1e82fSSascha Hauer prio = 1; 12951ec1e82fSSascha Hauer break; 12961ec1e82fSSascha Hauer } 12971ec1e82fSSascha Hauer 12981ec1e82fSSascha Hauer sdmac->peripheral_type = data->peripheral_type; 12991ec1e82fSSascha Hauer sdmac->event_id0 = data->dma_request; 13008391ecf4SShengjiu Wang sdmac->event_id1 = data->dma_request2; 1301c2c744d3SRichard Zhao 1302b93edcddSFabio Estevam ret = clk_enable(sdmac->sdma->clk_ipg); 1303b93edcddSFabio Estevam if (ret) 1304b93edcddSFabio Estevam return ret; 1305b93edcddSFabio Estevam ret = clk_enable(sdmac->sdma->clk_ahb); 1306b93edcddSFabio Estevam if (ret) 1307b93edcddSFabio Estevam goto disable_clk_ipg; 1308c2c744d3SRichard Zhao 13093bb5e7caSRichard Zhao ret = sdma_set_channel_priority(sdmac, prio); 13101ec1e82fSSascha Hauer if (ret) 1311b93edcddSFabio Estevam goto disable_clk_ahb; 13121ec1e82fSSascha Hauer 13131ec1e82fSSascha Hauer return 0; 1314b93edcddSFabio Estevam 1315b93edcddSFabio Estevam disable_clk_ahb: 1316b93edcddSFabio Estevam clk_disable(sdmac->sdma->clk_ahb); 1317b93edcddSFabio Estevam disable_clk_ipg: 1318b93edcddSFabio Estevam clk_disable(sdmac->sdma->clk_ipg); 1319b93edcddSFabio Estevam return ret; 13201ec1e82fSSascha Hauer } 13211ec1e82fSSascha Hauer 13221ec1e82fSSascha Hauer static void sdma_free_chan_resources(struct dma_chan *chan) 13231ec1e82fSSascha Hauer { 13241ec1e82fSSascha Hauer struct sdma_channel *sdmac = to_sdma_chan(chan); 13251ec1e82fSSascha Hauer struct sdma_engine *sdma = sdmac->sdma; 13261ec1e82fSSascha Hauer 1327*a80f2787SSascha Hauer sdma_terminate_all(chan); 1328b8603d2aSLucas Stach 1329b8603d2aSLucas Stach sdma_channel_synchronize(chan); 13301ec1e82fSSascha Hauer 13311ec1e82fSSascha Hauer if (sdmac->event_id0) 13321ec1e82fSSascha Hauer sdma_event_disable(sdmac, sdmac->event_id0); 13331ec1e82fSSascha Hauer if (sdmac->event_id1) 13341ec1e82fSSascha Hauer sdma_event_disable(sdmac, sdmac->event_id1); 13351ec1e82fSSascha Hauer 13361ec1e82fSSascha Hauer sdmac->event_id0 = 0; 13371ec1e82fSSascha Hauer sdmac->event_id1 = 0; 13381ec1e82fSSascha Hauer 13391ec1e82fSSascha Hauer sdma_set_channel_priority(sdmac, 0); 13401ec1e82fSSascha Hauer 13417560e3f3SSascha Hauer clk_disable(sdma->clk_ipg); 13427560e3f3SSascha Hauer clk_disable(sdma->clk_ahb); 13431ec1e82fSSascha Hauer } 13441ec1e82fSSascha Hauer 134521420841SRobin Gong static struct sdma_desc *sdma_transfer_init(struct sdma_channel *sdmac, 134621420841SRobin Gong enum dma_transfer_direction direction, u32 bds) 134721420841SRobin Gong { 134821420841SRobin Gong struct sdma_desc *desc; 134921420841SRobin Gong 135021420841SRobin Gong desc = kzalloc((sizeof(*desc)), GFP_NOWAIT); 135121420841SRobin Gong if (!desc) 135221420841SRobin Gong goto err_out; 135321420841SRobin Gong 135421420841SRobin Gong sdmac->status = DMA_IN_PROGRESS; 135521420841SRobin Gong sdmac->direction = direction; 135621420841SRobin Gong sdmac->flags = 0; 135721420841SRobin Gong 135821420841SRobin Gong desc->chn_count = 0; 135921420841SRobin Gong desc->chn_real_count = 0; 136021420841SRobin Gong desc->buf_tail = 0; 136121420841SRobin Gong desc->buf_ptail = 0; 136221420841SRobin Gong desc->sdmac = sdmac; 136321420841SRobin Gong desc->num_bd = bds; 136421420841SRobin Gong 136521420841SRobin Gong if (sdma_alloc_bd(desc)) 136621420841SRobin Gong goto err_desc_out; 136721420841SRobin Gong 13680f06c027SRobin Gong /* No slave_config called in MEMCPY case, so do here */ 13690f06c027SRobin Gong if (direction == DMA_MEM_TO_MEM) 13700f06c027SRobin Gong sdma_config_ownership(sdmac, false, true, false); 13710f06c027SRobin Gong 137221420841SRobin Gong if (sdma_load_context(sdmac)) 137321420841SRobin Gong goto err_desc_out; 137421420841SRobin Gong 137521420841SRobin Gong return desc; 137621420841SRobin Gong 137721420841SRobin Gong err_desc_out: 137821420841SRobin Gong kfree(desc); 137921420841SRobin Gong err_out: 138021420841SRobin Gong return NULL; 138121420841SRobin Gong } 138221420841SRobin Gong 13830f06c027SRobin Gong static struct dma_async_tx_descriptor *sdma_prep_memcpy( 13840f06c027SRobin Gong struct dma_chan *chan, dma_addr_t dma_dst, 13850f06c027SRobin Gong dma_addr_t dma_src, size_t len, unsigned long flags) 13860f06c027SRobin Gong { 13870f06c027SRobin Gong struct sdma_channel *sdmac = to_sdma_chan(chan); 13880f06c027SRobin Gong struct sdma_engine *sdma = sdmac->sdma; 13890f06c027SRobin Gong int channel = sdmac->channel; 13900f06c027SRobin Gong size_t count; 13910f06c027SRobin Gong int i = 0, param; 13920f06c027SRobin Gong struct sdma_buffer_descriptor *bd; 13930f06c027SRobin Gong struct sdma_desc *desc; 13940f06c027SRobin Gong 13950f06c027SRobin Gong if (!chan || !len) 13960f06c027SRobin Gong return NULL; 13970f06c027SRobin Gong 13980f06c027SRobin Gong dev_dbg(sdma->dev, "memcpy: %pad->%pad, len=%zu, channel=%d.\n", 13990f06c027SRobin Gong &dma_src, &dma_dst, len, channel); 14000f06c027SRobin Gong 14010f06c027SRobin Gong desc = sdma_transfer_init(sdmac, DMA_MEM_TO_MEM, 14020f06c027SRobin Gong len / SDMA_BD_MAX_CNT + 1); 14030f06c027SRobin Gong if (!desc) 14040f06c027SRobin Gong return NULL; 14050f06c027SRobin Gong 14060f06c027SRobin Gong do { 14070f06c027SRobin Gong count = min_t(size_t, len, SDMA_BD_MAX_CNT); 14080f06c027SRobin Gong bd = &desc->bd[i]; 14090f06c027SRobin Gong bd->buffer_addr = dma_src; 14100f06c027SRobin Gong bd->ext_buffer_addr = dma_dst; 14110f06c027SRobin Gong bd->mode.count = count; 14120f06c027SRobin Gong desc->chn_count += count; 14130f06c027SRobin Gong bd->mode.command = 0; 14140f06c027SRobin Gong 14150f06c027SRobin Gong dma_src += count; 14160f06c027SRobin Gong dma_dst += count; 14170f06c027SRobin Gong len -= count; 14180f06c027SRobin Gong i++; 14190f06c027SRobin Gong 14200f06c027SRobin Gong param = BD_DONE | BD_EXTD | BD_CONT; 14210f06c027SRobin Gong /* last bd */ 14220f06c027SRobin Gong if (!len) { 14230f06c027SRobin Gong param |= BD_INTR; 14240f06c027SRobin Gong param |= BD_LAST; 14250f06c027SRobin Gong param &= ~BD_CONT; 14260f06c027SRobin Gong } 14270f06c027SRobin Gong 14280f06c027SRobin Gong dev_dbg(sdma->dev, "entry %d: count: %zd dma: 0x%x %s%s\n", 14290f06c027SRobin Gong i, count, bd->buffer_addr, 14300f06c027SRobin Gong param & BD_WRAP ? "wrap" : "", 14310f06c027SRobin Gong param & BD_INTR ? " intr" : ""); 14320f06c027SRobin Gong 14330f06c027SRobin Gong bd->mode.status = param; 14340f06c027SRobin Gong } while (len); 14350f06c027SRobin Gong 14360f06c027SRobin Gong return vchan_tx_prep(&sdmac->vc, &desc->vd, flags); 14370f06c027SRobin Gong } 14380f06c027SRobin Gong 14391ec1e82fSSascha Hauer static struct dma_async_tx_descriptor *sdma_prep_slave_sg( 14401ec1e82fSSascha Hauer struct dma_chan *chan, struct scatterlist *sgl, 1441db8196dfSVinod Koul unsigned int sg_len, enum dma_transfer_direction direction, 1442185ecb5fSAlexandre Bounine unsigned long flags, void *context) 14431ec1e82fSSascha Hauer { 14441ec1e82fSSascha Hauer struct sdma_channel *sdmac = to_sdma_chan(chan); 14451ec1e82fSSascha Hauer struct sdma_engine *sdma = sdmac->sdma; 1446ad78b000SVinod Koul int i, count; 144723889c63SSascha Hauer int channel = sdmac->channel; 14481ec1e82fSSascha Hauer struct scatterlist *sg; 144957b772b8SRobin Gong struct sdma_desc *desc; 14501ec1e82fSSascha Hauer 1451107d0644SVinod Koul sdma_config_write(chan, &sdmac->slave_config, direction); 1452107d0644SVinod Koul 145321420841SRobin Gong desc = sdma_transfer_init(sdmac, direction, sg_len); 145457b772b8SRobin Gong if (!desc) 145557b772b8SRobin Gong goto err_out; 145657b772b8SRobin Gong 14571ec1e82fSSascha Hauer dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n", 14581ec1e82fSSascha Hauer sg_len, channel); 14591ec1e82fSSascha Hauer 14601ec1e82fSSascha Hauer for_each_sg(sgl, sg, sg_len, i) { 146176c33d27SSascha Hauer struct sdma_buffer_descriptor *bd = &desc->bd[i]; 14621ec1e82fSSascha Hauer int param; 14631ec1e82fSSascha Hauer 1464d2f5c276SAnatolij Gustschin bd->buffer_addr = sg->dma_address; 14651ec1e82fSSascha Hauer 1466fdaf9c4bSLars-Peter Clausen count = sg_dma_len(sg); 14671ec1e82fSSascha Hauer 14684a6b2e8aSRobin Gong if (count > SDMA_BD_MAX_CNT) { 14691ec1e82fSSascha Hauer dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n", 14704a6b2e8aSRobin Gong channel, count, SDMA_BD_MAX_CNT); 147157b772b8SRobin Gong goto err_bd_out; 14721ec1e82fSSascha Hauer } 14731ec1e82fSSascha Hauer 14741ec1e82fSSascha Hauer bd->mode.count = count; 147576c33d27SSascha Hauer desc->chn_count += count; 14761ec1e82fSSascha Hauer 1477ad78b000SVinod Koul if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) 147857b772b8SRobin Gong goto err_bd_out; 14791fa81c27SSascha Hauer 14801fa81c27SSascha Hauer switch (sdmac->word_size) { 14811fa81c27SSascha Hauer case DMA_SLAVE_BUSWIDTH_4_BYTES: 14821ec1e82fSSascha Hauer bd->mode.command = 0; 14831fa81c27SSascha Hauer if (count & 3 || sg->dma_address & 3) 148457b772b8SRobin Gong goto err_bd_out; 14851fa81c27SSascha Hauer break; 14861fa81c27SSascha Hauer case DMA_SLAVE_BUSWIDTH_2_BYTES: 14871fa81c27SSascha Hauer bd->mode.command = 2; 14881fa81c27SSascha Hauer if (count & 1 || sg->dma_address & 1) 148957b772b8SRobin Gong goto err_bd_out; 14901fa81c27SSascha Hauer break; 14911fa81c27SSascha Hauer case DMA_SLAVE_BUSWIDTH_1_BYTE: 14921fa81c27SSascha Hauer bd->mode.command = 1; 14931fa81c27SSascha Hauer break; 14941fa81c27SSascha Hauer default: 149557b772b8SRobin Gong goto err_bd_out; 14961fa81c27SSascha Hauer } 14971ec1e82fSSascha Hauer 14981ec1e82fSSascha Hauer param = BD_DONE | BD_EXTD | BD_CONT; 14991ec1e82fSSascha Hauer 1500341b9419SShawn Guo if (i + 1 == sg_len) { 15011ec1e82fSSascha Hauer param |= BD_INTR; 1502341b9419SShawn Guo param |= BD_LAST; 1503341b9419SShawn Guo param &= ~BD_CONT; 15041ec1e82fSSascha Hauer } 15051ec1e82fSSascha Hauer 1506c3cc74b2SOlof Johansson dev_dbg(sdma->dev, "entry %d: count: %d dma: %#llx %s%s\n", 1507c3cc74b2SOlof Johansson i, count, (u64)sg->dma_address, 15081ec1e82fSSascha Hauer param & BD_WRAP ? "wrap" : "", 15091ec1e82fSSascha Hauer param & BD_INTR ? " intr" : ""); 15101ec1e82fSSascha Hauer 15111ec1e82fSSascha Hauer bd->mode.status = param; 15121ec1e82fSSascha Hauer } 15131ec1e82fSSascha Hauer 151457b772b8SRobin Gong return vchan_tx_prep(&sdmac->vc, &desc->vd, flags); 151557b772b8SRobin Gong err_bd_out: 151657b772b8SRobin Gong sdma_free_bd(desc); 151757b772b8SRobin Gong kfree(desc); 15181ec1e82fSSascha Hauer err_out: 15194b2ce9ddSShawn Guo sdmac->status = DMA_ERROR; 15201ec1e82fSSascha Hauer return NULL; 15211ec1e82fSSascha Hauer } 15221ec1e82fSSascha Hauer 15231ec1e82fSSascha Hauer static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic( 15241ec1e82fSSascha Hauer struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, 1525185ecb5fSAlexandre Bounine size_t period_len, enum dma_transfer_direction direction, 152631c1e5a1SLaurent Pinchart unsigned long flags) 15271ec1e82fSSascha Hauer { 15281ec1e82fSSascha Hauer struct sdma_channel *sdmac = to_sdma_chan(chan); 15291ec1e82fSSascha Hauer struct sdma_engine *sdma = sdmac->sdma; 15301ec1e82fSSascha Hauer int num_periods = buf_len / period_len; 153123889c63SSascha Hauer int channel = sdmac->channel; 153221420841SRobin Gong int i = 0, buf = 0; 153357b772b8SRobin Gong struct sdma_desc *desc; 15341ec1e82fSSascha Hauer 15351ec1e82fSSascha Hauer dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel); 15361ec1e82fSSascha Hauer 1537107d0644SVinod Koul sdma_config_write(chan, &sdmac->slave_config, direction); 1538107d0644SVinod Koul 153921420841SRobin Gong desc = sdma_transfer_init(sdmac, direction, num_periods); 154057b772b8SRobin Gong if (!desc) 154157b772b8SRobin Gong goto err_out; 154257b772b8SRobin Gong 154376c33d27SSascha Hauer desc->period_len = period_len; 15448e2e27c7SRichard Zhao 15451ec1e82fSSascha Hauer sdmac->flags |= IMX_DMA_SG_LOOP; 15461ec1e82fSSascha Hauer 15474a6b2e8aSRobin Gong if (period_len > SDMA_BD_MAX_CNT) { 1548ba6ab3b3SArvind Yadav dev_err(sdma->dev, "SDMA channel %d: maximum period size exceeded: %zu > %d\n", 15494a6b2e8aSRobin Gong channel, period_len, SDMA_BD_MAX_CNT); 155057b772b8SRobin Gong goto err_bd_out; 15511ec1e82fSSascha Hauer } 15521ec1e82fSSascha Hauer 15531ec1e82fSSascha Hauer while (buf < buf_len) { 155476c33d27SSascha Hauer struct sdma_buffer_descriptor *bd = &desc->bd[i]; 15551ec1e82fSSascha Hauer int param; 15561ec1e82fSSascha Hauer 15571ec1e82fSSascha Hauer bd->buffer_addr = dma_addr; 15581ec1e82fSSascha Hauer 15591ec1e82fSSascha Hauer bd->mode.count = period_len; 15601ec1e82fSSascha Hauer 15611ec1e82fSSascha Hauer if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) 156257b772b8SRobin Gong goto err_bd_out; 15631ec1e82fSSascha Hauer if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES) 15641ec1e82fSSascha Hauer bd->mode.command = 0; 15651ec1e82fSSascha Hauer else 15661ec1e82fSSascha Hauer bd->mode.command = sdmac->word_size; 15671ec1e82fSSascha Hauer 15681ec1e82fSSascha Hauer param = BD_DONE | BD_EXTD | BD_CONT | BD_INTR; 15691ec1e82fSSascha Hauer if (i + 1 == num_periods) 15701ec1e82fSSascha Hauer param |= BD_WRAP; 15711ec1e82fSSascha Hauer 1572ba6ab3b3SArvind Yadav dev_dbg(sdma->dev, "entry %d: count: %zu dma: %#llx %s%s\n", 1573c3cc74b2SOlof Johansson i, period_len, (u64)dma_addr, 15741ec1e82fSSascha Hauer param & BD_WRAP ? "wrap" : "", 15751ec1e82fSSascha Hauer param & BD_INTR ? " intr" : ""); 15761ec1e82fSSascha Hauer 15771ec1e82fSSascha Hauer bd->mode.status = param; 15781ec1e82fSSascha Hauer 15791ec1e82fSSascha Hauer dma_addr += period_len; 15801ec1e82fSSascha Hauer buf += period_len; 15811ec1e82fSSascha Hauer 15821ec1e82fSSascha Hauer i++; 15831ec1e82fSSascha Hauer } 15841ec1e82fSSascha Hauer 158557b772b8SRobin Gong return vchan_tx_prep(&sdmac->vc, &desc->vd, flags); 158657b772b8SRobin Gong err_bd_out: 158757b772b8SRobin Gong sdma_free_bd(desc); 158857b772b8SRobin Gong kfree(desc); 15891ec1e82fSSascha Hauer err_out: 15901ec1e82fSSascha Hauer sdmac->status = DMA_ERROR; 15911ec1e82fSSascha Hauer return NULL; 15921ec1e82fSSascha Hauer } 15931ec1e82fSSascha Hauer 1594107d0644SVinod Koul static int sdma_config_write(struct dma_chan *chan, 1595107d0644SVinod Koul struct dma_slave_config *dmaengine_cfg, 1596107d0644SVinod Koul enum dma_transfer_direction direction) 15971ec1e82fSSascha Hauer { 15981ec1e82fSSascha Hauer struct sdma_channel *sdmac = to_sdma_chan(chan); 15991ec1e82fSSascha Hauer 1600107d0644SVinod Koul if (direction == DMA_DEV_TO_MEM) { 16011ec1e82fSSascha Hauer sdmac->per_address = dmaengine_cfg->src_addr; 160294ac27a5SPhilippe Rétornaz sdmac->watermark_level = dmaengine_cfg->src_maxburst * 160394ac27a5SPhilippe Rétornaz dmaengine_cfg->src_addr_width; 16041ec1e82fSSascha Hauer sdmac->word_size = dmaengine_cfg->src_addr_width; 1605107d0644SVinod Koul } else if (direction == DMA_DEV_TO_DEV) { 16068391ecf4SShengjiu Wang sdmac->per_address2 = dmaengine_cfg->src_addr; 16078391ecf4SShengjiu Wang sdmac->per_address = dmaengine_cfg->dst_addr; 16088391ecf4SShengjiu Wang sdmac->watermark_level = dmaengine_cfg->src_maxburst & 16098391ecf4SShengjiu Wang SDMA_WATERMARK_LEVEL_LWML; 16108391ecf4SShengjiu Wang sdmac->watermark_level |= (dmaengine_cfg->dst_maxburst << 16) & 16118391ecf4SShengjiu Wang SDMA_WATERMARK_LEVEL_HWML; 16128391ecf4SShengjiu Wang sdmac->word_size = dmaengine_cfg->dst_addr_width; 16131ec1e82fSSascha Hauer } else { 16141ec1e82fSSascha Hauer sdmac->per_address = dmaengine_cfg->dst_addr; 161594ac27a5SPhilippe Rétornaz sdmac->watermark_level = dmaengine_cfg->dst_maxburst * 161694ac27a5SPhilippe Rétornaz dmaengine_cfg->dst_addr_width; 16171ec1e82fSSascha Hauer sdmac->word_size = dmaengine_cfg->dst_addr_width; 16181ec1e82fSSascha Hauer } 1619107d0644SVinod Koul sdmac->direction = direction; 16207b350ab0SMaxime Ripard return sdma_config_channel(chan); 16211ec1e82fSSascha Hauer } 16221ec1e82fSSascha Hauer 1623107d0644SVinod Koul static int sdma_config(struct dma_chan *chan, 1624107d0644SVinod Koul struct dma_slave_config *dmaengine_cfg) 1625107d0644SVinod Koul { 1626107d0644SVinod Koul struct sdma_channel *sdmac = to_sdma_chan(chan); 1627107d0644SVinod Koul 1628107d0644SVinod Koul memcpy(&sdmac->slave_config, dmaengine_cfg, sizeof(*dmaengine_cfg)); 1629107d0644SVinod Koul 1630107d0644SVinod Koul /* Set ENBLn earlier to make sure dma request triggered after that */ 1631107d0644SVinod Koul if (sdmac->event_id0) { 1632107d0644SVinod Koul if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events) 1633107d0644SVinod Koul return -EINVAL; 1634107d0644SVinod Koul sdma_event_enable(sdmac, sdmac->event_id0); 1635107d0644SVinod Koul } 1636107d0644SVinod Koul 1637107d0644SVinod Koul if (sdmac->event_id1) { 1638107d0644SVinod Koul if (sdmac->event_id1 >= sdmac->sdma->drvdata->num_events) 1639107d0644SVinod Koul return -EINVAL; 1640107d0644SVinod Koul sdma_event_enable(sdmac, sdmac->event_id1); 1641107d0644SVinod Koul } 1642107d0644SVinod Koul 1643107d0644SVinod Koul return 0; 1644107d0644SVinod Koul } 1645107d0644SVinod Koul 16461ec1e82fSSascha Hauer static enum dma_status sdma_tx_status(struct dma_chan *chan, 16471ec1e82fSSascha Hauer dma_cookie_t cookie, 16481ec1e82fSSascha Hauer struct dma_tx_state *txstate) 16491ec1e82fSSascha Hauer { 16501ec1e82fSSascha Hauer struct sdma_channel *sdmac = to_sdma_chan(chan); 165157b772b8SRobin Gong struct sdma_desc *desc; 1652d1a792f3SRussell King - ARM Linux u32 residue; 165357b772b8SRobin Gong struct virt_dma_desc *vd; 165457b772b8SRobin Gong enum dma_status ret; 165557b772b8SRobin Gong unsigned long flags; 1656d1a792f3SRussell King - ARM Linux 165757b772b8SRobin Gong ret = dma_cookie_status(chan, cookie, txstate); 165857b772b8SRobin Gong if (ret == DMA_COMPLETE || !txstate) 165957b772b8SRobin Gong return ret; 166057b772b8SRobin Gong 166157b772b8SRobin Gong spin_lock_irqsave(&sdmac->vc.lock, flags); 166257b772b8SRobin Gong vd = vchan_find_desc(&sdmac->vc, cookie); 166357b772b8SRobin Gong if (vd) { 166457b772b8SRobin Gong desc = to_sdma_desc(&vd->tx); 1665d1a792f3SRussell King - ARM Linux if (sdmac->flags & IMX_DMA_SG_LOOP) 166676c33d27SSascha Hauer residue = (desc->num_bd - desc->buf_ptail) * 166776c33d27SSascha Hauer desc->period_len - desc->chn_real_count; 1668d1a792f3SRussell King - ARM Linux else 166976c33d27SSascha Hauer residue = desc->chn_count - desc->chn_real_count; 167057b772b8SRobin Gong } else if (sdmac->desc && sdmac->desc->vd.tx.cookie == cookie) { 167157b772b8SRobin Gong residue = sdmac->desc->chn_count - sdmac->desc->chn_real_count; 167257b772b8SRobin Gong } else { 167357b772b8SRobin Gong residue = 0; 167457b772b8SRobin Gong } 167557b772b8SRobin Gong spin_unlock_irqrestore(&sdmac->vc.lock, flags); 16761ec1e82fSSascha Hauer 1677e8e3a790SAndy Shevchenko dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, 1678d1a792f3SRussell King - ARM Linux residue); 16791ec1e82fSSascha Hauer 16808a965911SShawn Guo return sdmac->status; 16811ec1e82fSSascha Hauer } 16821ec1e82fSSascha Hauer 16831ec1e82fSSascha Hauer static void sdma_issue_pending(struct dma_chan *chan) 16841ec1e82fSSascha Hauer { 16852b4f130eSSascha Hauer struct sdma_channel *sdmac = to_sdma_chan(chan); 168657b772b8SRobin Gong unsigned long flags; 16872b4f130eSSascha Hauer 168857b772b8SRobin Gong spin_lock_irqsave(&sdmac->vc.lock, flags); 168957b772b8SRobin Gong if (vchan_issue_pending(&sdmac->vc) && !sdmac->desc) 169057b772b8SRobin Gong sdma_start_desc(sdmac); 169157b772b8SRobin Gong spin_unlock_irqrestore(&sdmac->vc.lock, flags); 16921ec1e82fSSascha Hauer } 16931ec1e82fSSascha Hauer 16945b28aa31SSascha Hauer #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34 1695cd72b846SNicolin Chen #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2 38 1696a572460bSFabio Estevam #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3 41 1697b7d2648aSFabio Estevam #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4 42 16985b28aa31SSascha Hauer 16995b28aa31SSascha Hauer static void sdma_add_scripts(struct sdma_engine *sdma, 17005b28aa31SSascha Hauer const struct sdma_script_start_addrs *addr) 17015b28aa31SSascha Hauer { 17025b28aa31SSascha Hauer s32 *addr_arr = (u32 *)addr; 17035b28aa31SSascha Hauer s32 *saddr_arr = (u32 *)sdma->script_addrs; 17045b28aa31SSascha Hauer int i; 17055b28aa31SSascha Hauer 170670dabaedSNicolin Chen /* use the default firmware in ROM if missing external firmware */ 170770dabaedSNicolin Chen if (!sdma->script_number) 170870dabaedSNicolin Chen sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; 170970dabaedSNicolin Chen 1710bd73dfabSRobin Gong if (sdma->script_number > sizeof(struct sdma_script_start_addrs) 1711bd73dfabSRobin Gong / sizeof(s32)) { 1712bd73dfabSRobin Gong dev_err(sdma->dev, 1713bd73dfabSRobin Gong "SDMA script number %d not match with firmware.\n", 1714bd73dfabSRobin Gong sdma->script_number); 1715bd73dfabSRobin Gong return; 1716bd73dfabSRobin Gong } 1717bd73dfabSRobin Gong 1718cd72b846SNicolin Chen for (i = 0; i < sdma->script_number; i++) 17195b28aa31SSascha Hauer if (addr_arr[i] > 0) 17205b28aa31SSascha Hauer saddr_arr[i] = addr_arr[i]; 17215b28aa31SSascha Hauer } 17225b28aa31SSascha Hauer 17237b4b88e0SSascha Hauer static void sdma_load_firmware(const struct firmware *fw, void *context) 17245b28aa31SSascha Hauer { 17257b4b88e0SSascha Hauer struct sdma_engine *sdma = context; 17265b28aa31SSascha Hauer const struct sdma_firmware_header *header; 17275b28aa31SSascha Hauer const struct sdma_script_start_addrs *addr; 17285b28aa31SSascha Hauer unsigned short *ram_code; 17295b28aa31SSascha Hauer 17307b4b88e0SSascha Hauer if (!fw) { 17310f927a11SSascha Hauer dev_info(sdma->dev, "external firmware not found, using ROM firmware\n"); 17320f927a11SSascha Hauer /* In this case we just use the ROM firmware. */ 17337b4b88e0SSascha Hauer return; 17347b4b88e0SSascha Hauer } 17355b28aa31SSascha Hauer 17365b28aa31SSascha Hauer if (fw->size < sizeof(*header)) 17375b28aa31SSascha Hauer goto err_firmware; 17385b28aa31SSascha Hauer 17395b28aa31SSascha Hauer header = (struct sdma_firmware_header *)fw->data; 17405b28aa31SSascha Hauer 17415b28aa31SSascha Hauer if (header->magic != SDMA_FIRMWARE_MAGIC) 17425b28aa31SSascha Hauer goto err_firmware; 17435b28aa31SSascha Hauer if (header->ram_code_start + header->ram_code_size > fw->size) 17445b28aa31SSascha Hauer goto err_firmware; 1745cd72b846SNicolin Chen switch (header->version_major) { 1746cd72b846SNicolin Chen case 1: 1747cd72b846SNicolin Chen sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; 1748cd72b846SNicolin Chen break; 1749cd72b846SNicolin Chen case 2: 1750cd72b846SNicolin Chen sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2; 1751cd72b846SNicolin Chen break; 1752a572460bSFabio Estevam case 3: 1753a572460bSFabio Estevam sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3; 1754a572460bSFabio Estevam break; 1755b7d2648aSFabio Estevam case 4: 1756b7d2648aSFabio Estevam sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4; 1757b7d2648aSFabio Estevam break; 1758cd72b846SNicolin Chen default: 1759cd72b846SNicolin Chen dev_err(sdma->dev, "unknown firmware version\n"); 1760cd72b846SNicolin Chen goto err_firmware; 1761cd72b846SNicolin Chen } 17625b28aa31SSascha Hauer 17635b28aa31SSascha Hauer addr = (void *)header + header->script_addrs_start; 17645b28aa31SSascha Hauer ram_code = (void *)header + header->ram_code_start; 17655b28aa31SSascha Hauer 17667560e3f3SSascha Hauer clk_enable(sdma->clk_ipg); 17677560e3f3SSascha Hauer clk_enable(sdma->clk_ahb); 17685b28aa31SSascha Hauer /* download the RAM image for SDMA */ 17695b28aa31SSascha Hauer sdma_load_script(sdma, ram_code, 17705b28aa31SSascha Hauer header->ram_code_size, 17716866fd3bSSascha Hauer addr->ram_code_start_addr); 17727560e3f3SSascha Hauer clk_disable(sdma->clk_ipg); 17737560e3f3SSascha Hauer clk_disable(sdma->clk_ahb); 17745b28aa31SSascha Hauer 17755b28aa31SSascha Hauer sdma_add_scripts(sdma, addr); 17765b28aa31SSascha Hauer 17775b28aa31SSascha Hauer dev_info(sdma->dev, "loaded firmware %d.%d\n", 17785b28aa31SSascha Hauer header->version_major, 17795b28aa31SSascha Hauer header->version_minor); 17805b28aa31SSascha Hauer 17815b28aa31SSascha Hauer err_firmware: 17825b28aa31SSascha Hauer release_firmware(fw); 17837b4b88e0SSascha Hauer } 17847b4b88e0SSascha Hauer 1785d078cd1bSZidan Wang #define EVENT_REMAP_CELLS 3 1786d078cd1bSZidan Wang 178729f493daSJason Liu static int sdma_event_remap(struct sdma_engine *sdma) 1788d078cd1bSZidan Wang { 1789d078cd1bSZidan Wang struct device_node *np = sdma->dev->of_node; 1790d078cd1bSZidan Wang struct device_node *gpr_np = of_parse_phandle(np, "gpr", 0); 1791d078cd1bSZidan Wang struct property *event_remap; 1792d078cd1bSZidan Wang struct regmap *gpr; 1793d078cd1bSZidan Wang char propname[] = "fsl,sdma-event-remap"; 1794d078cd1bSZidan Wang u32 reg, val, shift, num_map, i; 1795d078cd1bSZidan Wang int ret = 0; 1796d078cd1bSZidan Wang 1797d078cd1bSZidan Wang if (IS_ERR(np) || IS_ERR(gpr_np)) 1798d078cd1bSZidan Wang goto out; 1799d078cd1bSZidan Wang 1800d078cd1bSZidan Wang event_remap = of_find_property(np, propname, NULL); 1801d078cd1bSZidan Wang num_map = event_remap ? (event_remap->length / sizeof(u32)) : 0; 1802d078cd1bSZidan Wang if (!num_map) { 1803ce078af7SFabio Estevam dev_dbg(sdma->dev, "no event needs to be remapped\n"); 1804d078cd1bSZidan Wang goto out; 1805d078cd1bSZidan Wang } else if (num_map % EVENT_REMAP_CELLS) { 1806d078cd1bSZidan Wang dev_err(sdma->dev, "the property %s must modulo %d\n", 1807d078cd1bSZidan Wang propname, EVENT_REMAP_CELLS); 1808d078cd1bSZidan Wang ret = -EINVAL; 1809d078cd1bSZidan Wang goto out; 1810d078cd1bSZidan Wang } 1811d078cd1bSZidan Wang 1812d078cd1bSZidan Wang gpr = syscon_node_to_regmap(gpr_np); 1813d078cd1bSZidan Wang if (IS_ERR(gpr)) { 1814d078cd1bSZidan Wang dev_err(sdma->dev, "failed to get gpr regmap\n"); 1815d078cd1bSZidan Wang ret = PTR_ERR(gpr); 1816d078cd1bSZidan Wang goto out; 1817d078cd1bSZidan Wang } 1818d078cd1bSZidan Wang 1819d078cd1bSZidan Wang for (i = 0; i < num_map; i += EVENT_REMAP_CELLS) { 1820d078cd1bSZidan Wang ret = of_property_read_u32_index(np, propname, i, ®); 1821d078cd1bSZidan Wang if (ret) { 1822d078cd1bSZidan Wang dev_err(sdma->dev, "failed to read property %s index %d\n", 1823d078cd1bSZidan Wang propname, i); 1824d078cd1bSZidan Wang goto out; 1825d078cd1bSZidan Wang } 1826d078cd1bSZidan Wang 1827d078cd1bSZidan Wang ret = of_property_read_u32_index(np, propname, i + 1, &shift); 1828d078cd1bSZidan Wang if (ret) { 1829d078cd1bSZidan Wang dev_err(sdma->dev, "failed to read property %s index %d\n", 1830d078cd1bSZidan Wang propname, i + 1); 1831d078cd1bSZidan Wang goto out; 1832d078cd1bSZidan Wang } 1833d078cd1bSZidan Wang 1834d078cd1bSZidan Wang ret = of_property_read_u32_index(np, propname, i + 2, &val); 1835d078cd1bSZidan Wang if (ret) { 1836d078cd1bSZidan Wang dev_err(sdma->dev, "failed to read property %s index %d\n", 1837d078cd1bSZidan Wang propname, i + 2); 1838d078cd1bSZidan Wang goto out; 1839d078cd1bSZidan Wang } 1840d078cd1bSZidan Wang 1841d078cd1bSZidan Wang regmap_update_bits(gpr, reg, BIT(shift), val << shift); 1842d078cd1bSZidan Wang } 1843d078cd1bSZidan Wang 1844d078cd1bSZidan Wang out: 1845d078cd1bSZidan Wang if (!IS_ERR(gpr_np)) 1846d078cd1bSZidan Wang of_node_put(gpr_np); 1847d078cd1bSZidan Wang 1848d078cd1bSZidan Wang return ret; 1849d078cd1bSZidan Wang } 1850d078cd1bSZidan Wang 1851fe6cf289SArnd Bergmann static int sdma_get_firmware(struct sdma_engine *sdma, 18527b4b88e0SSascha Hauer const char *fw_name) 18537b4b88e0SSascha Hauer { 18547b4b88e0SSascha Hauer int ret; 18557b4b88e0SSascha Hauer 18567b4b88e0SSascha Hauer ret = request_firmware_nowait(THIS_MODULE, 18577b4b88e0SSascha Hauer FW_ACTION_HOTPLUG, fw_name, sdma->dev, 18587b4b88e0SSascha Hauer GFP_KERNEL, sdma, sdma_load_firmware); 18595b28aa31SSascha Hauer 18605b28aa31SSascha Hauer return ret; 18615b28aa31SSascha Hauer } 18625b28aa31SSascha Hauer 186319bfc772SJingoo Han static int sdma_init(struct sdma_engine *sdma) 18641ec1e82fSSascha Hauer { 18651ec1e82fSSascha Hauer int i, ret; 18661ec1e82fSSascha Hauer dma_addr_t ccb_phys; 18671ec1e82fSSascha Hauer 1868b93edcddSFabio Estevam ret = clk_enable(sdma->clk_ipg); 1869b93edcddSFabio Estevam if (ret) 1870b93edcddSFabio Estevam return ret; 1871b93edcddSFabio Estevam ret = clk_enable(sdma->clk_ahb); 1872b93edcddSFabio Estevam if (ret) 1873b93edcddSFabio Estevam goto disable_clk_ipg; 18741ec1e82fSSascha Hauer 1875941acd56SAngus Ainslie (Purism) if (sdma->drvdata->check_ratio && 1876941acd56SAngus Ainslie (Purism) (clk_get_rate(sdma->clk_ahb) == clk_get_rate(sdma->clk_ipg))) 187725aaa75dSAngus Ainslie (Purism) sdma->clk_ratio = 1; 187825aaa75dSAngus Ainslie (Purism) 18791ec1e82fSSascha Hauer /* Be sure SDMA has not started yet */ 1880c4b56857SRichard Zhao writel_relaxed(0, sdma->regs + SDMA_H_C0PTR); 18811ec1e82fSSascha Hauer 1882ceaf5226SAndy Duan sdma->channel_control = dma_alloc_coherent(sdma->dev, 18831ec1e82fSSascha Hauer MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control) + 18841ec1e82fSSascha Hauer sizeof(struct sdma_context_data), 18851ec1e82fSSascha Hauer &ccb_phys, GFP_KERNEL); 18861ec1e82fSSascha Hauer 18871ec1e82fSSascha Hauer if (!sdma->channel_control) { 18881ec1e82fSSascha Hauer ret = -ENOMEM; 18891ec1e82fSSascha Hauer goto err_dma_alloc; 18901ec1e82fSSascha Hauer } 18911ec1e82fSSascha Hauer 18921ec1e82fSSascha Hauer sdma->context = (void *)sdma->channel_control + 18931ec1e82fSSascha Hauer MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control); 18941ec1e82fSSascha Hauer sdma->context_phys = ccb_phys + 18951ec1e82fSSascha Hauer MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control); 18961ec1e82fSSascha Hauer 18971ec1e82fSSascha Hauer /* disable all channels */ 189817bba72fSSascha Hauer for (i = 0; i < sdma->drvdata->num_events; i++) 1899c4b56857SRichard Zhao writel_relaxed(0, sdma->regs + chnenbl_ofs(sdma, i)); 19001ec1e82fSSascha Hauer 19011ec1e82fSSascha Hauer /* All channels have priority 0 */ 19021ec1e82fSSascha Hauer for (i = 0; i < MAX_DMA_CHANNELS; i++) 1903c4b56857SRichard Zhao writel_relaxed(0, sdma->regs + SDMA_CHNPRI_0 + i * 4); 19041ec1e82fSSascha Hauer 190557b772b8SRobin Gong ret = sdma_request_channel0(sdma); 19061ec1e82fSSascha Hauer if (ret) 19071ec1e82fSSascha Hauer goto err_dma_alloc; 19081ec1e82fSSascha Hauer 19091ec1e82fSSascha Hauer sdma_config_ownership(&sdma->channel[0], false, true, false); 19101ec1e82fSSascha Hauer 19111ec1e82fSSascha Hauer /* Set Command Channel (Channel Zero) */ 1912c4b56857SRichard Zhao writel_relaxed(0x4050, sdma->regs + SDMA_CHN0ADDR); 19131ec1e82fSSascha Hauer 19141ec1e82fSSascha Hauer /* Set bits of CONFIG register but with static context switching */ 191525aaa75dSAngus Ainslie (Purism) if (sdma->clk_ratio) 191625aaa75dSAngus Ainslie (Purism) writel_relaxed(SDMA_H_CONFIG_ACR, sdma->regs + SDMA_H_CONFIG); 191725aaa75dSAngus Ainslie (Purism) else 1918c4b56857SRichard Zhao writel_relaxed(0, sdma->regs + SDMA_H_CONFIG); 19191ec1e82fSSascha Hauer 1920c4b56857SRichard Zhao writel_relaxed(ccb_phys, sdma->regs + SDMA_H_C0PTR); 19211ec1e82fSSascha Hauer 19221ec1e82fSSascha Hauer /* Initializes channel's priorities */ 19231ec1e82fSSascha Hauer sdma_set_channel_priority(&sdma->channel[0], 7); 19241ec1e82fSSascha Hauer 19257560e3f3SSascha Hauer clk_disable(sdma->clk_ipg); 19267560e3f3SSascha Hauer clk_disable(sdma->clk_ahb); 19271ec1e82fSSascha Hauer 19281ec1e82fSSascha Hauer return 0; 19291ec1e82fSSascha Hauer 19301ec1e82fSSascha Hauer err_dma_alloc: 19317560e3f3SSascha Hauer clk_disable(sdma->clk_ahb); 1932b93edcddSFabio Estevam disable_clk_ipg: 1933b93edcddSFabio Estevam clk_disable(sdma->clk_ipg); 19341ec1e82fSSascha Hauer dev_err(sdma->dev, "initialisation failed with %d\n", ret); 19351ec1e82fSSascha Hauer return ret; 19361ec1e82fSSascha Hauer } 19371ec1e82fSSascha Hauer 19389479e17cSShawn Guo static bool sdma_filter_fn(struct dma_chan *chan, void *fn_param) 19399479e17cSShawn Guo { 19400b351865SNicolin Chen struct sdma_channel *sdmac = to_sdma_chan(chan); 19419479e17cSShawn Guo struct imx_dma_data *data = fn_param; 19429479e17cSShawn Guo 19439479e17cSShawn Guo if (!imx_dma_is_general_purpose(chan)) 19449479e17cSShawn Guo return false; 19459479e17cSShawn Guo 19460b351865SNicolin Chen sdmac->data = *data; 19470b351865SNicolin Chen chan->private = &sdmac->data; 19489479e17cSShawn Guo 19499479e17cSShawn Guo return true; 19509479e17cSShawn Guo } 19519479e17cSShawn Guo 19529479e17cSShawn Guo static struct dma_chan *sdma_xlate(struct of_phandle_args *dma_spec, 19539479e17cSShawn Guo struct of_dma *ofdma) 19549479e17cSShawn Guo { 19559479e17cSShawn Guo struct sdma_engine *sdma = ofdma->of_dma_data; 19569479e17cSShawn Guo dma_cap_mask_t mask = sdma->dma_device.cap_mask; 19579479e17cSShawn Guo struct imx_dma_data data; 19589479e17cSShawn Guo 19599479e17cSShawn Guo if (dma_spec->args_count != 3) 19609479e17cSShawn Guo return NULL; 19619479e17cSShawn Guo 19629479e17cSShawn Guo data.dma_request = dma_spec->args[0]; 19639479e17cSShawn Guo data.peripheral_type = dma_spec->args[1]; 19649479e17cSShawn Guo data.priority = dma_spec->args[2]; 19658391ecf4SShengjiu Wang /* 19668391ecf4SShengjiu Wang * init dma_request2 to zero, which is not used by the dts. 19678391ecf4SShengjiu Wang * For P2P, dma_request2 is init from dma_request_channel(), 19688391ecf4SShengjiu Wang * chan->private will point to the imx_dma_data, and in 19698391ecf4SShengjiu Wang * device_alloc_chan_resources(), imx_dma_data.dma_request2 will 19708391ecf4SShengjiu Wang * be set to sdmac->event_id1. 19718391ecf4SShengjiu Wang */ 19728391ecf4SShengjiu Wang data.dma_request2 = 0; 19739479e17cSShawn Guo 1974990c0b53SBaolin Wang return __dma_request_channel(&mask, sdma_filter_fn, &data, 1975990c0b53SBaolin Wang ofdma->of_node); 19769479e17cSShawn Guo } 19779479e17cSShawn Guo 1978e34b731fSMark Brown static int sdma_probe(struct platform_device *pdev) 19791ec1e82fSSascha Hauer { 1980580975d7SShawn Guo const struct of_device_id *of_id = 1981580975d7SShawn Guo of_match_device(sdma_dt_ids, &pdev->dev); 1982580975d7SShawn Guo struct device_node *np = pdev->dev.of_node; 19838391ecf4SShengjiu Wang struct device_node *spba_bus; 1984580975d7SShawn Guo const char *fw_name; 19851ec1e82fSSascha Hauer int ret; 19861ec1e82fSSascha Hauer int irq; 19871ec1e82fSSascha Hauer struct resource *iores; 19888391ecf4SShengjiu Wang struct resource spba_res; 1989d4adcc01SJingoo Han struct sdma_platform_data *pdata = dev_get_platdata(&pdev->dev); 19901ec1e82fSSascha Hauer int i; 19911ec1e82fSSascha Hauer struct sdma_engine *sdma; 199236e2f21aSSascha Hauer s32 *saddr_arr; 199317bba72fSSascha Hauer const struct sdma_driver_data *drvdata = NULL; 199417bba72fSSascha Hauer 199517bba72fSSascha Hauer if (of_id) 199617bba72fSSascha Hauer drvdata = of_id->data; 199717bba72fSSascha Hauer else if (pdev->id_entry) 199817bba72fSSascha Hauer drvdata = (void *)pdev->id_entry->driver_data; 199917bba72fSSascha Hauer 200017bba72fSSascha Hauer if (!drvdata) { 200117bba72fSSascha Hauer dev_err(&pdev->dev, "unable to find driver data\n"); 200217bba72fSSascha Hauer return -EINVAL; 200317bba72fSSascha Hauer } 20041ec1e82fSSascha Hauer 200542536b9fSPhilippe Retornaz ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 200642536b9fSPhilippe Retornaz if (ret) 200742536b9fSPhilippe Retornaz return ret; 200842536b9fSPhilippe Retornaz 20097f24e0eeSFabio Estevam sdma = devm_kzalloc(&pdev->dev, sizeof(*sdma), GFP_KERNEL); 20101ec1e82fSSascha Hauer if (!sdma) 20111ec1e82fSSascha Hauer return -ENOMEM; 20121ec1e82fSSascha Hauer 20132ccaef05SRichard Zhao spin_lock_init(&sdma->channel_0_lock); 201473eab978SSascha Hauer 20151ec1e82fSSascha Hauer sdma->dev = &pdev->dev; 201617bba72fSSascha Hauer sdma->drvdata = drvdata; 20171ec1e82fSSascha Hauer 20181ec1e82fSSascha Hauer irq = platform_get_irq(pdev, 0); 20197f24e0eeSFabio Estevam if (irq < 0) 202063c72e02SFabio Estevam return irq; 20211ec1e82fSSascha Hauer 20227f24e0eeSFabio Estevam iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); 20237f24e0eeSFabio Estevam sdma->regs = devm_ioremap_resource(&pdev->dev, iores); 20247f24e0eeSFabio Estevam if (IS_ERR(sdma->regs)) 20257f24e0eeSFabio Estevam return PTR_ERR(sdma->regs); 20261ec1e82fSSascha Hauer 20277560e3f3SSascha Hauer sdma->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); 20287f24e0eeSFabio Estevam if (IS_ERR(sdma->clk_ipg)) 20297f24e0eeSFabio Estevam return PTR_ERR(sdma->clk_ipg); 20301ec1e82fSSascha Hauer 20317560e3f3SSascha Hauer sdma->clk_ahb = devm_clk_get(&pdev->dev, "ahb"); 20327f24e0eeSFabio Estevam if (IS_ERR(sdma->clk_ahb)) 20337f24e0eeSFabio Estevam return PTR_ERR(sdma->clk_ahb); 20347560e3f3SSascha Hauer 2035fb9caf37SArvind Yadav ret = clk_prepare(sdma->clk_ipg); 2036fb9caf37SArvind Yadav if (ret) 2037fb9caf37SArvind Yadav return ret; 2038fb9caf37SArvind Yadav 2039fb9caf37SArvind Yadav ret = clk_prepare(sdma->clk_ahb); 2040fb9caf37SArvind Yadav if (ret) 2041fb9caf37SArvind Yadav goto err_clk; 20427560e3f3SSascha Hauer 20437f24e0eeSFabio Estevam ret = devm_request_irq(&pdev->dev, irq, sdma_int_handler, 0, "sdma", 20447f24e0eeSFabio Estevam sdma); 20451ec1e82fSSascha Hauer if (ret) 2046fb9caf37SArvind Yadav goto err_irq; 20471ec1e82fSSascha Hauer 20485bb9dbb5SVinod Koul sdma->irq = irq; 20495bb9dbb5SVinod Koul 20505b28aa31SSascha Hauer sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL); 2051fb9caf37SArvind Yadav if (!sdma->script_addrs) { 2052fb9caf37SArvind Yadav ret = -ENOMEM; 2053fb9caf37SArvind Yadav goto err_irq; 2054fb9caf37SArvind Yadav } 20551ec1e82fSSascha Hauer 205636e2f21aSSascha Hauer /* initially no scripts available */ 205736e2f21aSSascha Hauer saddr_arr = (s32 *)sdma->script_addrs; 205836e2f21aSSascha Hauer for (i = 0; i < SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; i++) 205936e2f21aSSascha Hauer saddr_arr[i] = -EINVAL; 206036e2f21aSSascha Hauer 20617214a8b1SSascha Hauer dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask); 20627214a8b1SSascha Hauer dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask); 20630f06c027SRobin Gong dma_cap_set(DMA_MEMCPY, sdma->dma_device.cap_mask); 20647214a8b1SSascha Hauer 20651ec1e82fSSascha Hauer INIT_LIST_HEAD(&sdma->dma_device.channels); 20661ec1e82fSSascha Hauer /* Initialize channel parameters */ 20671ec1e82fSSascha Hauer for (i = 0; i < MAX_DMA_CHANNELS; i++) { 20681ec1e82fSSascha Hauer struct sdma_channel *sdmac = &sdma->channel[i]; 20691ec1e82fSSascha Hauer 20701ec1e82fSSascha Hauer sdmac->sdma = sdma; 20711ec1e82fSSascha Hauer 20721ec1e82fSSascha Hauer sdmac->channel = i; 207357b772b8SRobin Gong sdmac->vc.desc_free = sdma_desc_free; 2074b8603d2aSLucas Stach INIT_WORK(&sdmac->terminate_worker, 2075b8603d2aSLucas Stach sdma_channel_terminate_work); 207623889c63SSascha Hauer /* 207723889c63SSascha Hauer * Add the channel to the DMAC list. Do not add channel 0 though 207823889c63SSascha Hauer * because we need it internally in the SDMA driver. This also means 207923889c63SSascha Hauer * that channel 0 in dmaengine counting matches sdma channel 1. 208023889c63SSascha Hauer */ 208123889c63SSascha Hauer if (i) 208257b772b8SRobin Gong vchan_init(&sdmac->vc, &sdma->dma_device); 20831ec1e82fSSascha Hauer } 20841ec1e82fSSascha Hauer 20855b28aa31SSascha Hauer ret = sdma_init(sdma); 20861ec1e82fSSascha Hauer if (ret) 20871ec1e82fSSascha Hauer goto err_init; 20881ec1e82fSSascha Hauer 2089d078cd1bSZidan Wang ret = sdma_event_remap(sdma); 2090d078cd1bSZidan Wang if (ret) 2091d078cd1bSZidan Wang goto err_init; 2092d078cd1bSZidan Wang 2093dcfec3c0SSascha Hauer if (sdma->drvdata->script_addrs) 2094dcfec3c0SSascha Hauer sdma_add_scripts(sdma, sdma->drvdata->script_addrs); 2095580975d7SShawn Guo if (pdata && pdata->script_addrs) 20965b28aa31SSascha Hauer sdma_add_scripts(sdma, pdata->script_addrs); 20975b28aa31SSascha Hauer 20981ec1e82fSSascha Hauer sdma->dma_device.dev = &pdev->dev; 20991ec1e82fSSascha Hauer 21001ec1e82fSSascha Hauer sdma->dma_device.device_alloc_chan_resources = sdma_alloc_chan_resources; 21011ec1e82fSSascha Hauer sdma->dma_device.device_free_chan_resources = sdma_free_chan_resources; 21021ec1e82fSSascha Hauer sdma->dma_device.device_tx_status = sdma_tx_status; 21031ec1e82fSSascha Hauer sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg; 21041ec1e82fSSascha Hauer sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic; 21057b350ab0SMaxime Ripard sdma->dma_device.device_config = sdma_config; 2106*a80f2787SSascha Hauer sdma->dma_device.device_terminate_all = sdma_terminate_all; 2107b8603d2aSLucas Stach sdma->dma_device.device_synchronize = sdma_channel_synchronize; 2108f9d4a398SNicolin Chen sdma->dma_device.src_addr_widths = SDMA_DMA_BUSWIDTHS; 2109f9d4a398SNicolin Chen sdma->dma_device.dst_addr_widths = SDMA_DMA_BUSWIDTHS; 2110f9d4a398SNicolin Chen sdma->dma_device.directions = SDMA_DMA_DIRECTIONS; 21116f3125ceSLucas Stach sdma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; 21120f06c027SRobin Gong sdma->dma_device.device_prep_dma_memcpy = sdma_prep_memcpy; 21131ec1e82fSSascha Hauer sdma->dma_device.device_issue_pending = sdma_issue_pending; 2114b9b3f82fSSascha Hauer sdma->dma_device.dev->dma_parms = &sdma->dma_parms; 2115a3711d49SAngus Ainslie (Purism) sdma->dma_device.copy_align = 2; 21164a6b2e8aSRobin Gong dma_set_max_seg_size(sdma->dma_device.dev, SDMA_BD_MAX_CNT); 21171ec1e82fSSascha Hauer 211823e11811SVignesh Raman platform_set_drvdata(pdev, sdma); 211923e11811SVignesh Raman 21201ec1e82fSSascha Hauer ret = dma_async_device_register(&sdma->dma_device); 21211ec1e82fSSascha Hauer if (ret) { 21221ec1e82fSSascha Hauer dev_err(&pdev->dev, "unable to register\n"); 21231ec1e82fSSascha Hauer goto err_init; 21241ec1e82fSSascha Hauer } 21251ec1e82fSSascha Hauer 21269479e17cSShawn Guo if (np) { 21279479e17cSShawn Guo ret = of_dma_controller_register(np, sdma_xlate, sdma); 21289479e17cSShawn Guo if (ret) { 21299479e17cSShawn Guo dev_err(&pdev->dev, "failed to register controller\n"); 21309479e17cSShawn Guo goto err_register; 21319479e17cSShawn Guo } 21328391ecf4SShengjiu Wang 21338391ecf4SShengjiu Wang spba_bus = of_find_compatible_node(NULL, NULL, "fsl,spba-bus"); 21348391ecf4SShengjiu Wang ret = of_address_to_resource(spba_bus, 0, &spba_res); 21358391ecf4SShengjiu Wang if (!ret) { 21368391ecf4SShengjiu Wang sdma->spba_start_addr = spba_res.start; 21378391ecf4SShengjiu Wang sdma->spba_end_addr = spba_res.end; 21388391ecf4SShengjiu Wang } 21398391ecf4SShengjiu Wang of_node_put(spba_bus); 21409479e17cSShawn Guo } 21419479e17cSShawn Guo 21422b8066c3SSven Van Asbroeck /* 21432b8066c3SSven Van Asbroeck * Kick off firmware loading as the very last step: 21442b8066c3SSven Van Asbroeck * attempt to load firmware only if we're not on the error path, because 21452b8066c3SSven Van Asbroeck * the firmware callback requires a fully functional and allocated sdma 21462b8066c3SSven Van Asbroeck * instance. 21472b8066c3SSven Van Asbroeck */ 21482b8066c3SSven Van Asbroeck if (pdata) { 21492b8066c3SSven Van Asbroeck ret = sdma_get_firmware(sdma, pdata->fw_name); 21502b8066c3SSven Van Asbroeck if (ret) 21512b8066c3SSven Van Asbroeck dev_warn(&pdev->dev, "failed to get firmware from platform data\n"); 21522b8066c3SSven Van Asbroeck } else { 21532b8066c3SSven Van Asbroeck /* 21542b8066c3SSven Van Asbroeck * Because that device tree does not encode ROM script address, 21552b8066c3SSven Van Asbroeck * the RAM script in firmware is mandatory for device tree 21562b8066c3SSven Van Asbroeck * probe, otherwise it fails. 21572b8066c3SSven Van Asbroeck */ 21582b8066c3SSven Van Asbroeck ret = of_property_read_string(np, "fsl,sdma-ram-script-name", 21592b8066c3SSven Van Asbroeck &fw_name); 21602b8066c3SSven Van Asbroeck if (ret) { 21612b8066c3SSven Van Asbroeck dev_warn(&pdev->dev, "failed to get firmware name\n"); 21622b8066c3SSven Van Asbroeck } else { 21632b8066c3SSven Van Asbroeck ret = sdma_get_firmware(sdma, fw_name); 21642b8066c3SSven Van Asbroeck if (ret) 21652b8066c3SSven Van Asbroeck dev_warn(&pdev->dev, "failed to get firmware from device tree\n"); 21662b8066c3SSven Van Asbroeck } 21672b8066c3SSven Van Asbroeck } 21682b8066c3SSven Van Asbroeck 21691ec1e82fSSascha Hauer return 0; 21701ec1e82fSSascha Hauer 21719479e17cSShawn Guo err_register: 21729479e17cSShawn Guo dma_async_device_unregister(&sdma->dma_device); 21731ec1e82fSSascha Hauer err_init: 21741ec1e82fSSascha Hauer kfree(sdma->script_addrs); 2175fb9caf37SArvind Yadav err_irq: 2176fb9caf37SArvind Yadav clk_unprepare(sdma->clk_ahb); 2177fb9caf37SArvind Yadav err_clk: 2178fb9caf37SArvind Yadav clk_unprepare(sdma->clk_ipg); 2179939fd4f0SShawn Guo return ret; 21801ec1e82fSSascha Hauer } 21811ec1e82fSSascha Hauer 21821d1bbd30SMaxin B. John static int sdma_remove(struct platform_device *pdev) 21831ec1e82fSSascha Hauer { 218423e11811SVignesh Raman struct sdma_engine *sdma = platform_get_drvdata(pdev); 2185c12fe497SVignesh Raman int i; 218623e11811SVignesh Raman 21875bb9dbb5SVinod Koul devm_free_irq(&pdev->dev, sdma->irq, sdma); 218823e11811SVignesh Raman dma_async_device_unregister(&sdma->dma_device); 218923e11811SVignesh Raman kfree(sdma->script_addrs); 2190fb9caf37SArvind Yadav clk_unprepare(sdma->clk_ahb); 2191fb9caf37SArvind Yadav clk_unprepare(sdma->clk_ipg); 2192c12fe497SVignesh Raman /* Kill the tasklet */ 2193c12fe497SVignesh Raman for (i = 0; i < MAX_DMA_CHANNELS; i++) { 2194c12fe497SVignesh Raman struct sdma_channel *sdmac = &sdma->channel[i]; 2195c12fe497SVignesh Raman 219657b772b8SRobin Gong tasklet_kill(&sdmac->vc.task); 219757b772b8SRobin Gong sdma_free_chan_resources(&sdmac->vc.chan); 2198c12fe497SVignesh Raman } 219923e11811SVignesh Raman 220023e11811SVignesh Raman platform_set_drvdata(pdev, NULL); 220123e11811SVignesh Raman return 0; 22021ec1e82fSSascha Hauer } 22031ec1e82fSSascha Hauer 22041ec1e82fSSascha Hauer static struct platform_driver sdma_driver = { 22051ec1e82fSSascha Hauer .driver = { 22061ec1e82fSSascha Hauer .name = "imx-sdma", 2207580975d7SShawn Guo .of_match_table = sdma_dt_ids, 22081ec1e82fSSascha Hauer }, 220962550cd7SShawn Guo .id_table = sdma_devtypes, 22101d1bbd30SMaxin B. John .remove = sdma_remove, 221123e11811SVignesh Raman .probe = sdma_probe, 22121ec1e82fSSascha Hauer }; 22131ec1e82fSSascha Hauer 221423e11811SVignesh Raman module_platform_driver(sdma_driver); 22151ec1e82fSSascha Hauer 22161ec1e82fSSascha Hauer MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>"); 22171ec1e82fSSascha Hauer MODULE_DESCRIPTION("i.MX SDMA driver"); 2218c0879342SNicolas Chauvet #if IS_ENABLED(CONFIG_SOC_IMX6Q) 2219c0879342SNicolas Chauvet MODULE_FIRMWARE("imx/sdma/sdma-imx6q.bin"); 2220c0879342SNicolas Chauvet #endif 2221c0879342SNicolas Chauvet #if IS_ENABLED(CONFIG_SOC_IMX7D) 2222c0879342SNicolas Chauvet MODULE_FIRMWARE("imx/sdma/sdma-imx7d.bin"); 2223c0879342SNicolas Chauvet #endif 22241ec1e82fSSascha Hauer MODULE_LICENSE("GPL"); 2225