1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2013,2018,2020-2021 Intel Corporation
3
4 #include <linux/bitops.h>
5 #include <linux/dmaengine.h>
6 #include <linux/errno.h>
7 #include <linux/io.h>
8 #include <linux/pci.h>
9 #include <linux/slab.h>
10 #include <linux/types.h>
11
12 #include "internal.h"
13
14 #define DMA_CTL_CH(x) (0x1000 + (x) * 4)
15 #define DMA_SRC_ADDR_FILLIN(x) (0x1100 + (x) * 4)
16 #define DMA_DST_ADDR_FILLIN(x) (0x1200 + (x) * 4)
17 #define DMA_XBAR_SEL(x) (0x1300 + (x) * 4)
18 #define DMA_REGACCESS_CHID_CFG (0x1400)
19
20 #define CTL_CH_TRANSFER_MODE_MASK GENMASK(1, 0)
21 #define CTL_CH_TRANSFER_MODE_S2S 0
22 #define CTL_CH_TRANSFER_MODE_S2D 1
23 #define CTL_CH_TRANSFER_MODE_D2S 2
24 #define CTL_CH_TRANSFER_MODE_D2D 3
25 #define CTL_CH_RD_RS_MASK GENMASK(4, 3)
26 #define CTL_CH_WR_RS_MASK GENMASK(6, 5)
27 #define CTL_CH_RD_NON_SNOOP_BIT BIT(8)
28 #define CTL_CH_WR_NON_SNOOP_BIT BIT(9)
29
30 #define XBAR_SEL_DEVID_MASK GENMASK(15, 0)
31 #define XBAR_SEL_RX_TX_BIT BIT(16)
32 #define XBAR_SEL_RX_TX_SHIFT 16
33
34 #define REGACCESS_CHID_MASK GENMASK(2, 0)
35
idma32_get_slave_devfn(struct dw_dma_chan * dwc)36 static unsigned int idma32_get_slave_devfn(struct dw_dma_chan *dwc)
37 {
38 struct device *slave = dwc->chan.slave;
39
40 if (!slave || !dev_is_pci(slave))
41 return 0;
42
43 return to_pci_dev(slave)->devfn;
44 }
45
idma32_initialize_chan_xbar(struct dw_dma_chan * dwc)46 static void idma32_initialize_chan_xbar(struct dw_dma_chan *dwc)
47 {
48 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
49 void __iomem *misc = __dw_regs(dw);
50 u32 cfghi = 0, cfglo = 0;
51 u8 dst_id, src_id;
52 u32 value;
53
54 /* DMA Channel ID Configuration register must be programmed first */
55 value = readl(misc + DMA_REGACCESS_CHID_CFG);
56
57 value &= ~REGACCESS_CHID_MASK;
58 value |= dwc->chan.chan_id;
59
60 writel(value, misc + DMA_REGACCESS_CHID_CFG);
61
62 /* Configure channel attributes */
63 value = readl(misc + DMA_CTL_CH(dwc->chan.chan_id));
64
65 value &= ~(CTL_CH_RD_NON_SNOOP_BIT | CTL_CH_WR_NON_SNOOP_BIT);
66 value &= ~(CTL_CH_RD_RS_MASK | CTL_CH_WR_RS_MASK);
67 value &= ~CTL_CH_TRANSFER_MODE_MASK;
68
69 switch (dwc->direction) {
70 case DMA_MEM_TO_DEV:
71 value |= CTL_CH_TRANSFER_MODE_D2S;
72 value |= CTL_CH_WR_NON_SNOOP_BIT;
73 break;
74 case DMA_DEV_TO_MEM:
75 value |= CTL_CH_TRANSFER_MODE_S2D;
76 value |= CTL_CH_RD_NON_SNOOP_BIT;
77 break;
78 default:
79 /*
80 * Memory-to-Memory and Device-to-Device are ignored for now.
81 *
82 * For Memory-to-Memory transfers we would need to set mode
83 * and disable snooping on both sides.
84 */
85 return;
86 }
87
88 writel(value, misc + DMA_CTL_CH(dwc->chan.chan_id));
89
90 /* Configure crossbar selection */
91 value = readl(misc + DMA_XBAR_SEL(dwc->chan.chan_id));
92
93 /* DEVFN selection */
94 value &= ~XBAR_SEL_DEVID_MASK;
95 value |= idma32_get_slave_devfn(dwc);
96
97 switch (dwc->direction) {
98 case DMA_MEM_TO_DEV:
99 value |= XBAR_SEL_RX_TX_BIT;
100 break;
101 case DMA_DEV_TO_MEM:
102 value &= ~XBAR_SEL_RX_TX_BIT;
103 break;
104 default:
105 /* Memory-to-Memory and Device-to-Device are ignored for now */
106 return;
107 }
108
109 writel(value, misc + DMA_XBAR_SEL(dwc->chan.chan_id));
110
111 /* Configure DMA channel low and high registers */
112 switch (dwc->direction) {
113 case DMA_MEM_TO_DEV:
114 dst_id = dwc->chan.chan_id;
115 src_id = dwc->dws.src_id;
116 break;
117 case DMA_DEV_TO_MEM:
118 dst_id = dwc->dws.dst_id;
119 src_id = dwc->chan.chan_id;
120 break;
121 default:
122 /* Memory-to-Memory and Device-to-Device are ignored for now */
123 return;
124 }
125
126 /* Set default burst alignment */
127 cfglo |= IDMA32C_CFGL_DST_BURST_ALIGN | IDMA32C_CFGL_SRC_BURST_ALIGN;
128
129 /* Low 4 bits of the request lines */
130 cfghi |= IDMA32C_CFGH_DST_PER(dst_id & 0xf);
131 cfghi |= IDMA32C_CFGH_SRC_PER(src_id & 0xf);
132
133 /* Request line extension (2 bits) */
134 cfghi |= IDMA32C_CFGH_DST_PER_EXT(dst_id >> 4 & 0x3);
135 cfghi |= IDMA32C_CFGH_SRC_PER_EXT(src_id >> 4 & 0x3);
136
137 channel_writel(dwc, CFG_LO, cfglo);
138 channel_writel(dwc, CFG_HI, cfghi);
139 }
140
idma32_initialize_chan_generic(struct dw_dma_chan * dwc)141 static void idma32_initialize_chan_generic(struct dw_dma_chan *dwc)
142 {
143 u32 cfghi = 0;
144 u32 cfglo = 0;
145
146 /* Set default burst alignment */
147 cfglo |= IDMA32C_CFGL_DST_BURST_ALIGN | IDMA32C_CFGL_SRC_BURST_ALIGN;
148
149 /* Low 4 bits of the request lines */
150 cfghi |= IDMA32C_CFGH_DST_PER(dwc->dws.dst_id & 0xf);
151 cfghi |= IDMA32C_CFGH_SRC_PER(dwc->dws.src_id & 0xf);
152
153 /* Request line extension (2 bits) */
154 cfghi |= IDMA32C_CFGH_DST_PER_EXT(dwc->dws.dst_id >> 4 & 0x3);
155 cfghi |= IDMA32C_CFGH_SRC_PER_EXT(dwc->dws.src_id >> 4 & 0x3);
156
157 channel_writel(dwc, CFG_LO, cfglo);
158 channel_writel(dwc, CFG_HI, cfghi);
159 }
160
idma32_suspend_chan(struct dw_dma_chan * dwc,bool drain)161 static void idma32_suspend_chan(struct dw_dma_chan *dwc, bool drain)
162 {
163 u32 cfglo = channel_readl(dwc, CFG_LO);
164
165 if (drain)
166 cfglo |= IDMA32C_CFGL_CH_DRAIN;
167
168 channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
169 }
170
idma32_resume_chan(struct dw_dma_chan * dwc,bool drain)171 static void idma32_resume_chan(struct dw_dma_chan *dwc, bool drain)
172 {
173 u32 cfglo = channel_readl(dwc, CFG_LO);
174
175 if (drain)
176 cfglo &= ~IDMA32C_CFGL_CH_DRAIN;
177
178 channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP);
179 }
180
idma32_bytes2block(struct dw_dma_chan * dwc,size_t bytes,unsigned int width,size_t * len)181 static u32 idma32_bytes2block(struct dw_dma_chan *dwc,
182 size_t bytes, unsigned int width, size_t *len)
183 {
184 u32 block;
185
186 if (bytes > dwc->block_size) {
187 block = dwc->block_size;
188 *len = dwc->block_size;
189 } else {
190 block = bytes;
191 *len = bytes;
192 }
193
194 return block;
195 }
196
idma32_block2bytes(struct dw_dma_chan * dwc,u32 block,u32 width)197 static size_t idma32_block2bytes(struct dw_dma_chan *dwc, u32 block, u32 width)
198 {
199 return IDMA32C_CTLH_BLOCK_TS(block);
200 }
201
idma32_prepare_ctllo(struct dw_dma_chan * dwc)202 static u32 idma32_prepare_ctllo(struct dw_dma_chan *dwc)
203 {
204 struct dma_slave_config *sconfig = &dwc->dma_sconfig;
205 u8 smsize = (dwc->direction == DMA_DEV_TO_MEM) ? sconfig->src_maxburst : 0;
206 u8 dmsize = (dwc->direction == DMA_MEM_TO_DEV) ? sconfig->dst_maxburst : 0;
207
208 return DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN |
209 DWC_CTLL_DST_MSIZE(dmsize) | DWC_CTLL_SRC_MSIZE(smsize);
210 }
211
idma32_encode_maxburst(struct dw_dma_chan * dwc,u32 * maxburst)212 static void idma32_encode_maxburst(struct dw_dma_chan *dwc, u32 *maxburst)
213 {
214 *maxburst = *maxburst > 1 ? fls(*maxburst) - 1 : 0;
215 }
216
idma32_set_device_name(struct dw_dma * dw,int id)217 static void idma32_set_device_name(struct dw_dma *dw, int id)
218 {
219 snprintf(dw->name, sizeof(dw->name), "idma32:dmac%d", id);
220 }
221
222 /*
223 * Program FIFO size of channels.
224 *
225 * By default full FIFO (512 bytes) is assigned to channel 0. Here we
226 * slice FIFO on equal parts between channels.
227 */
idma32_fifo_partition(struct dw_dma * dw)228 static void idma32_fifo_partition(struct dw_dma *dw)
229 {
230 u64 value = IDMA32C_FP_PSIZE_CH0(64) | IDMA32C_FP_PSIZE_CH1(64) |
231 IDMA32C_FP_UPDATE;
232 u64 fifo_partition = 0;
233
234 /* Fill FIFO_PARTITION low bits (Channels 0..1, 4..5) */
235 fifo_partition |= value << 0;
236
237 /* Fill FIFO_PARTITION high bits (Channels 2..3, 6..7) */
238 fifo_partition |= value << 32;
239
240 /* Program FIFO Partition registers - 64 bytes per channel */
241 idma32_writeq(dw, FIFO_PARTITION1, fifo_partition);
242 idma32_writeq(dw, FIFO_PARTITION0, fifo_partition);
243 }
244
idma32_disable(struct dw_dma * dw)245 static void idma32_disable(struct dw_dma *dw)
246 {
247 do_dw_dma_off(dw);
248 idma32_fifo_partition(dw);
249 }
250
idma32_enable(struct dw_dma * dw)251 static void idma32_enable(struct dw_dma *dw)
252 {
253 idma32_fifo_partition(dw);
254 do_dw_dma_on(dw);
255 }
256
idma32_dma_probe(struct dw_dma_chip * chip)257 int idma32_dma_probe(struct dw_dma_chip *chip)
258 {
259 struct dw_dma *dw;
260
261 dw = devm_kzalloc(chip->dev, sizeof(*dw), GFP_KERNEL);
262 if (!dw)
263 return -ENOMEM;
264
265 /* Channel operations */
266 if (chip->pdata->quirks & DW_DMA_QUIRK_XBAR_PRESENT)
267 dw->initialize_chan = idma32_initialize_chan_xbar;
268 else
269 dw->initialize_chan = idma32_initialize_chan_generic;
270 dw->suspend_chan = idma32_suspend_chan;
271 dw->resume_chan = idma32_resume_chan;
272 dw->prepare_ctllo = idma32_prepare_ctllo;
273 dw->encode_maxburst = idma32_encode_maxburst;
274 dw->bytes2block = idma32_bytes2block;
275 dw->block2bytes = idma32_block2bytes;
276
277 /* Device operations */
278 dw->set_device_name = idma32_set_device_name;
279 dw->disable = idma32_disable;
280 dw->enable = idma32_enable;
281
282 chip->dw = dw;
283 return do_dma_probe(chip);
284 }
285 EXPORT_SYMBOL_GPL(idma32_dma_probe);
286
idma32_dma_remove(struct dw_dma_chip * chip)287 int idma32_dma_remove(struct dw_dma_chip *chip)
288 {
289 return do_dma_remove(chip);
290 }
291 EXPORT_SYMBOL_GPL(idma32_dma_remove);
292