xref: /openbmc/linux/drivers/dma/lgm/lgm-dma.c (revision c900529f3d9161bfde5cca0754f83b4d3c3e0220)
1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * Lightning Mountain centralized DMA controller driver
4   *
5   * Copyright (c) 2016 - 2020 Intel Corporation.
6   */
7  
8  #include <linux/bitfield.h>
9  #include <linux/clk.h>
10  #include <linux/dma-mapping.h>
11  #include <linux/dmapool.h>
12  #include <linux/err.h>
13  #include <linux/export.h>
14  #include <linux/init.h>
15  #include <linux/interrupt.h>
16  #include <linux/iopoll.h>
17  #include <linux/of_dma.h>
18  #include <linux/of_irq.h>
19  #include <linux/platform_device.h>
20  #include <linux/reset.h>
21  
22  #include "../dmaengine.h"
23  #include "../virt-dma.h"
24  
25  #define DRIVER_NAME			"lgm-dma"
26  
27  #define DMA_ID				0x0008
28  #define DMA_ID_REV			GENMASK(7, 0)
29  #define DMA_ID_PNR			GENMASK(19, 16)
30  #define DMA_ID_CHNR			GENMASK(26, 20)
31  #define DMA_ID_DW_128B			BIT(27)
32  #define DMA_ID_AW_36B			BIT(28)
33  #define DMA_VER32			0x32
34  #define DMA_VER31			0x31
35  #define DMA_VER22			0x0A
36  
37  #define DMA_CTRL			0x0010
38  #define DMA_CTRL_RST			BIT(0)
39  #define DMA_CTRL_DSRAM_PATH		BIT(1)
40  #define DMA_CTRL_DBURST_WR		BIT(3)
41  #define DMA_CTRL_VLD_DF_ACK		BIT(4)
42  #define DMA_CTRL_CH_FL			BIT(6)
43  #define DMA_CTRL_DS_FOD			BIT(7)
44  #define DMA_CTRL_DRB			BIT(8)
45  #define DMA_CTRL_ENBE			BIT(9)
46  #define DMA_CTRL_DESC_TMOUT_CNT_V31	GENMASK(27, 16)
47  #define DMA_CTRL_DESC_TMOUT_EN_V31	BIT(30)
48  #define DMA_CTRL_PKTARB			BIT(31)
49  
50  #define DMA_CPOLL			0x0014
51  #define DMA_CPOLL_CNT			GENMASK(15, 4)
52  #define DMA_CPOLL_EN			BIT(31)
53  
54  #define DMA_CS				0x0018
55  #define DMA_CS_MASK			GENMASK(5, 0)
56  
57  #define DMA_CCTRL			0x001C
58  #define DMA_CCTRL_ON			BIT(0)
59  #define DMA_CCTRL_RST			BIT(1)
60  #define DMA_CCTRL_CH_POLL_EN		BIT(2)
61  #define DMA_CCTRL_CH_ABC		BIT(3) /* Adaptive Burst Chop */
62  #define DMA_CDBA_MSB			GENMASK(7, 4)
63  #define DMA_CCTRL_DIR_TX		BIT(8)
64  #define DMA_CCTRL_CLASS			GENMASK(11, 9)
65  #define DMA_CCTRL_CLASSH		GENMASK(19, 18)
66  #define DMA_CCTRL_WR_NP_EN		BIT(21)
67  #define DMA_CCTRL_PDEN			BIT(23)
68  #define DMA_MAX_CLASS			(SZ_32 - 1)
69  
70  #define DMA_CDBA			0x0020
71  #define DMA_CDLEN			0x0024
72  #define DMA_CIS				0x0028
73  #define DMA_CIE				0x002C
74  #define DMA_CI_EOP			BIT(1)
75  #define DMA_CI_DUR			BIT(2)
76  #define DMA_CI_DESCPT			BIT(3)
77  #define DMA_CI_CHOFF			BIT(4)
78  #define DMA_CI_RDERR			BIT(5)
79  #define DMA_CI_ALL							\
80  	(DMA_CI_EOP | DMA_CI_DUR | DMA_CI_DESCPT | DMA_CI_CHOFF | DMA_CI_RDERR)
81  
82  #define DMA_PS				0x0040
83  #define DMA_PCTRL			0x0044
84  #define DMA_PCTRL_RXBL16		BIT(0)
85  #define DMA_PCTRL_TXBL16		BIT(1)
86  #define DMA_PCTRL_RXBL			GENMASK(3, 2)
87  #define DMA_PCTRL_RXBL_8		3
88  #define DMA_PCTRL_TXBL			GENMASK(5, 4)
89  #define DMA_PCTRL_TXBL_8		3
90  #define DMA_PCTRL_PDEN			BIT(6)
91  #define DMA_PCTRL_RXBL32		BIT(7)
92  #define DMA_PCTRL_RXENDI		GENMASK(9, 8)
93  #define DMA_PCTRL_TXENDI		GENMASK(11, 10)
94  #define DMA_PCTRL_TXBL32		BIT(15)
95  #define DMA_PCTRL_MEM_FLUSH		BIT(16)
96  
97  #define DMA_IRNEN1			0x00E8
98  #define DMA_IRNCR1			0x00EC
99  #define DMA_IRNEN			0x00F4
100  #define DMA_IRNCR			0x00F8
101  #define DMA_C_DP_TICK			0x100
102  #define DMA_C_DP_TICK_TIKNARB		GENMASK(15, 0)
103  #define DMA_C_DP_TICK_TIKARB		GENMASK(31, 16)
104  
105  #define DMA_C_HDRM			0x110
106  /*
107   * If header mode is set in DMA descriptor,
108   *   If bit 30 is disabled, HDR_LEN must be configured according to channel
109   *     requirement.
110   *   If bit 30 is enabled(checksum with heade mode), HDR_LEN has no need to
111   *     be configured. It will enable check sum for switch
112   * If header mode is not set in DMA descriptor,
113   *   This register setting doesn't matter
114   */
115  #define DMA_C_HDRM_HDR_SUM		BIT(30)
116  
117  #define DMA_C_BOFF			0x120
118  #define DMA_C_BOFF_BOF_LEN		GENMASK(7, 0)
119  #define DMA_C_BOFF_EN			BIT(31)
120  
121  #define DMA_ORRC			0x190
122  #define DMA_ORRC_ORRCNT			GENMASK(8, 4)
123  #define DMA_ORRC_EN			BIT(31)
124  
125  #define DMA_C_ENDIAN			0x200
126  #define DMA_C_END_DATAENDI		GENMASK(1, 0)
127  #define DMA_C_END_DE_EN			BIT(7)
128  #define DMA_C_END_DESENDI		GENMASK(9, 8)
129  #define DMA_C_END_DES_EN		BIT(16)
130  
131  /* DMA controller capability */
132  #define DMA_ADDR_36BIT			BIT(0)
133  #define DMA_DATA_128BIT			BIT(1)
134  #define DMA_CHAN_FLOW_CTL		BIT(2)
135  #define DMA_DESC_FOD			BIT(3)
136  #define DMA_DESC_IN_SRAM		BIT(4)
137  #define DMA_EN_BYTE_EN			BIT(5)
138  #define DMA_DBURST_WR			BIT(6)
139  #define DMA_VALID_DESC_FETCH_ACK	BIT(7)
140  #define DMA_DFT_DRB			BIT(8)
141  
142  #define DMA_ORRC_MAX_CNT		(SZ_32 - 1)
143  #define DMA_DFT_POLL_CNT		SZ_4
144  #define DMA_DFT_BURST_V22		SZ_2
145  #define DMA_BURSTL_8DW			SZ_8
146  #define DMA_BURSTL_16DW			SZ_16
147  #define DMA_BURSTL_32DW			SZ_32
148  #define DMA_DFT_BURST			DMA_BURSTL_16DW
149  #define DMA_MAX_DESC_NUM		(SZ_8K - 1)
150  #define DMA_CHAN_BOFF_MAX		(SZ_256 - 1)
151  #define DMA_DFT_ENDIAN			0
152  
153  #define DMA_DFT_DESC_TCNT		50
154  #define DMA_HDR_LEN_MAX			(SZ_16K - 1)
155  
156  /* DMA flags */
157  #define DMA_TX_CH			BIT(0)
158  #define DMA_RX_CH			BIT(1)
159  #define DEVICE_ALLOC_DESC		BIT(2)
160  #define CHAN_IN_USE			BIT(3)
161  #define DMA_HW_DESC			BIT(4)
162  
163  /* Descriptor fields */
164  #define DESC_DATA_LEN			GENMASK(15, 0)
165  #define DESC_BYTE_OFF			GENMASK(25, 23)
166  #define DESC_EOP			BIT(28)
167  #define DESC_SOP			BIT(29)
168  #define DESC_C				BIT(30)
169  #define DESC_OWN			BIT(31)
170  
171  #define DMA_CHAN_RST			1
172  #define DMA_MAX_SIZE			(BIT(16) - 1)
173  #define MAX_LOWER_CHANS			32
174  #define MASK_LOWER_CHANS		GENMASK(4, 0)
175  #define DMA_OWN				1
176  #define HIGH_4_BITS			GENMASK(3, 0)
177  #define DMA_DFT_DESC_NUM		1
178  #define DMA_PKT_DROP_DIS		0
179  
180  enum ldma_chan_on_off {
181  	DMA_CH_OFF = 0,
182  	DMA_CH_ON = 1,
183  };
184  
185  enum {
186  	DMA_TYPE_TX = 0,
187  	DMA_TYPE_RX,
188  	DMA_TYPE_MCPY,
189  };
190  
191  struct ldma_dev;
192  struct ldma_port;
193  
194  struct ldma_chan {
195  	struct virt_dma_chan	vchan;
196  	struct ldma_port	*port; /* back pointer */
197  	char			name[8]; /* Channel name */
198  	int			nr; /* Channel id in hardware */
199  	u32			flags; /* central way or channel based way */
200  	enum ldma_chan_on_off	onoff;
201  	dma_addr_t		desc_phys;
202  	void			*desc_base; /* Virtual address */
203  	u32			desc_cnt; /* Number of descriptors */
204  	int			rst;
205  	u32			hdrm_len;
206  	bool			hdrm_csum;
207  	u32			boff_len;
208  	u32			data_endian;
209  	u32			desc_endian;
210  	bool			pden;
211  	bool			desc_rx_np;
212  	bool			data_endian_en;
213  	bool			desc_endian_en;
214  	bool			abc_en;
215  	bool			desc_init;
216  	struct dma_pool		*desc_pool; /* Descriptors pool */
217  	u32			desc_num;
218  	struct dw2_desc_sw	*ds;
219  	struct work_struct	work;
220  	struct dma_slave_config config;
221  };
222  
223  struct ldma_port {
224  	struct ldma_dev		*ldev; /* back pointer */
225  	u32			portid;
226  	u32			rxbl;
227  	u32			txbl;
228  	u32			rxendi;
229  	u32			txendi;
230  	u32			pkt_drop;
231  };
232  
233  /* Instance specific data */
234  struct ldma_inst_data {
235  	bool			desc_in_sram;
236  	bool			chan_fc;
237  	bool			desc_fod; /* Fetch On Demand */
238  	bool			valid_desc_fetch_ack;
239  	u32			orrc; /* Outstanding read count */
240  	const char		*name;
241  	u32			type;
242  };
243  
244  struct ldma_dev {
245  	struct device		*dev;
246  	void __iomem		*base;
247  	struct reset_control	*rst;
248  	struct clk		*core_clk;
249  	struct dma_device	dma_dev;
250  	u32			ver;
251  	int			irq;
252  	struct ldma_port	*ports;
253  	struct ldma_chan	*chans; /* channel list on this DMA or port */
254  	spinlock_t		dev_lock; /* Controller register exclusive */
255  	u32			chan_nrs;
256  	u32			port_nrs;
257  	u32			channels_mask;
258  	u32			flags;
259  	u32			pollcnt;
260  	const struct ldma_inst_data *inst;
261  	struct workqueue_struct	*wq;
262  };
263  
264  struct dw2_desc {
265  	u32 field;
266  	u32 addr;
267  } __packed __aligned(8);
268  
269  struct dw2_desc_sw {
270  	struct virt_dma_desc	vdesc;
271  	struct ldma_chan	*chan;
272  	dma_addr_t		desc_phys;
273  	size_t			desc_cnt;
274  	size_t			size;
275  	struct dw2_desc		*desc_hw;
276  };
277  
278  static inline void
ldma_update_bits(struct ldma_dev * d,u32 mask,u32 val,u32 ofs)279  ldma_update_bits(struct ldma_dev *d, u32 mask, u32 val, u32 ofs)
280  {
281  	u32 old_val, new_val;
282  
283  	old_val = readl(d->base +  ofs);
284  	new_val = (old_val & ~mask) | (val & mask);
285  
286  	if (new_val != old_val)
287  		writel(new_val, d->base + ofs);
288  }
289  
to_ldma_chan(struct dma_chan * chan)290  static inline struct ldma_chan *to_ldma_chan(struct dma_chan *chan)
291  {
292  	return container_of(chan, struct ldma_chan, vchan.chan);
293  }
294  
to_ldma_dev(struct dma_device * dma_dev)295  static inline struct ldma_dev *to_ldma_dev(struct dma_device *dma_dev)
296  {
297  	return container_of(dma_dev, struct ldma_dev, dma_dev);
298  }
299  
to_lgm_dma_desc(struct virt_dma_desc * vdesc)300  static inline struct dw2_desc_sw *to_lgm_dma_desc(struct virt_dma_desc *vdesc)
301  {
302  	return container_of(vdesc, struct dw2_desc_sw, vdesc);
303  }
304  
ldma_chan_tx(struct ldma_chan * c)305  static inline bool ldma_chan_tx(struct ldma_chan *c)
306  {
307  	return !!(c->flags & DMA_TX_CH);
308  }
309  
ldma_chan_is_hw_desc(struct ldma_chan * c)310  static inline bool ldma_chan_is_hw_desc(struct ldma_chan *c)
311  {
312  	return !!(c->flags & DMA_HW_DESC);
313  }
314  
ldma_dev_reset(struct ldma_dev * d)315  static void ldma_dev_reset(struct ldma_dev *d)
316  
317  {
318  	unsigned long flags;
319  
320  	spin_lock_irqsave(&d->dev_lock, flags);
321  	ldma_update_bits(d, DMA_CTRL_RST, DMA_CTRL_RST, DMA_CTRL);
322  	spin_unlock_irqrestore(&d->dev_lock, flags);
323  }
324  
ldma_dev_pkt_arb_cfg(struct ldma_dev * d,bool enable)325  static void ldma_dev_pkt_arb_cfg(struct ldma_dev *d, bool enable)
326  {
327  	unsigned long flags;
328  	u32 mask = DMA_CTRL_PKTARB;
329  	u32 val = enable ? DMA_CTRL_PKTARB : 0;
330  
331  	spin_lock_irqsave(&d->dev_lock, flags);
332  	ldma_update_bits(d, mask, val, DMA_CTRL);
333  	spin_unlock_irqrestore(&d->dev_lock, flags);
334  }
335  
ldma_dev_sram_desc_cfg(struct ldma_dev * d,bool enable)336  static void ldma_dev_sram_desc_cfg(struct ldma_dev *d, bool enable)
337  {
338  	unsigned long flags;
339  	u32 mask = DMA_CTRL_DSRAM_PATH;
340  	u32 val = enable ? DMA_CTRL_DSRAM_PATH : 0;
341  
342  	spin_lock_irqsave(&d->dev_lock, flags);
343  	ldma_update_bits(d, mask, val, DMA_CTRL);
344  	spin_unlock_irqrestore(&d->dev_lock, flags);
345  }
346  
ldma_dev_chan_flow_ctl_cfg(struct ldma_dev * d,bool enable)347  static void ldma_dev_chan_flow_ctl_cfg(struct ldma_dev *d, bool enable)
348  {
349  	unsigned long flags;
350  	u32 mask, val;
351  
352  	if (d->inst->type != DMA_TYPE_TX)
353  		return;
354  
355  	mask = DMA_CTRL_CH_FL;
356  	val = enable ? DMA_CTRL_CH_FL : 0;
357  
358  	spin_lock_irqsave(&d->dev_lock, flags);
359  	ldma_update_bits(d, mask, val, DMA_CTRL);
360  	spin_unlock_irqrestore(&d->dev_lock, flags);
361  }
362  
ldma_dev_global_polling_enable(struct ldma_dev * d)363  static void ldma_dev_global_polling_enable(struct ldma_dev *d)
364  {
365  	unsigned long flags;
366  	u32 mask = DMA_CPOLL_EN | DMA_CPOLL_CNT;
367  	u32 val = DMA_CPOLL_EN;
368  
369  	val |= FIELD_PREP(DMA_CPOLL_CNT, d->pollcnt);
370  
371  	spin_lock_irqsave(&d->dev_lock, flags);
372  	ldma_update_bits(d, mask, val, DMA_CPOLL);
373  	spin_unlock_irqrestore(&d->dev_lock, flags);
374  }
375  
ldma_dev_desc_fetch_on_demand_cfg(struct ldma_dev * d,bool enable)376  static void ldma_dev_desc_fetch_on_demand_cfg(struct ldma_dev *d, bool enable)
377  {
378  	unsigned long flags;
379  	u32 mask, val;
380  
381  	if (d->inst->type == DMA_TYPE_MCPY)
382  		return;
383  
384  	mask = DMA_CTRL_DS_FOD;
385  	val = enable ? DMA_CTRL_DS_FOD : 0;
386  
387  	spin_lock_irqsave(&d->dev_lock, flags);
388  	ldma_update_bits(d, mask, val, DMA_CTRL);
389  	spin_unlock_irqrestore(&d->dev_lock, flags);
390  }
391  
ldma_dev_byte_enable_cfg(struct ldma_dev * d,bool enable)392  static void ldma_dev_byte_enable_cfg(struct ldma_dev *d, bool enable)
393  {
394  	unsigned long flags;
395  	u32 mask = DMA_CTRL_ENBE;
396  	u32 val = enable ? DMA_CTRL_ENBE : 0;
397  
398  	spin_lock_irqsave(&d->dev_lock, flags);
399  	ldma_update_bits(d, mask, val, DMA_CTRL);
400  	spin_unlock_irqrestore(&d->dev_lock, flags);
401  }
402  
ldma_dev_orrc_cfg(struct ldma_dev * d)403  static void ldma_dev_orrc_cfg(struct ldma_dev *d)
404  {
405  	unsigned long flags;
406  	u32 val = 0;
407  	u32 mask;
408  
409  	if (d->inst->type == DMA_TYPE_RX)
410  		return;
411  
412  	mask = DMA_ORRC_EN | DMA_ORRC_ORRCNT;
413  	if (d->inst->orrc > 0 && d->inst->orrc <= DMA_ORRC_MAX_CNT)
414  		val = DMA_ORRC_EN | FIELD_PREP(DMA_ORRC_ORRCNT, d->inst->orrc);
415  
416  	spin_lock_irqsave(&d->dev_lock, flags);
417  	ldma_update_bits(d, mask, val, DMA_ORRC);
418  	spin_unlock_irqrestore(&d->dev_lock, flags);
419  }
420  
ldma_dev_df_tout_cfg(struct ldma_dev * d,bool enable,int tcnt)421  static void ldma_dev_df_tout_cfg(struct ldma_dev *d, bool enable, int tcnt)
422  {
423  	u32 mask = DMA_CTRL_DESC_TMOUT_CNT_V31;
424  	unsigned long flags;
425  	u32 val;
426  
427  	if (enable)
428  		val = DMA_CTRL_DESC_TMOUT_EN_V31 | FIELD_PREP(DMA_CTRL_DESC_TMOUT_CNT_V31, tcnt);
429  	else
430  		val = 0;
431  
432  	spin_lock_irqsave(&d->dev_lock, flags);
433  	ldma_update_bits(d, mask, val, DMA_CTRL);
434  	spin_unlock_irqrestore(&d->dev_lock, flags);
435  }
436  
ldma_dev_dburst_wr_cfg(struct ldma_dev * d,bool enable)437  static void ldma_dev_dburst_wr_cfg(struct ldma_dev *d, bool enable)
438  {
439  	unsigned long flags;
440  	u32 mask, val;
441  
442  	if (d->inst->type != DMA_TYPE_RX && d->inst->type != DMA_TYPE_MCPY)
443  		return;
444  
445  	mask = DMA_CTRL_DBURST_WR;
446  	val = enable ? DMA_CTRL_DBURST_WR : 0;
447  
448  	spin_lock_irqsave(&d->dev_lock, flags);
449  	ldma_update_bits(d, mask, val, DMA_CTRL);
450  	spin_unlock_irqrestore(&d->dev_lock, flags);
451  }
452  
ldma_dev_vld_fetch_ack_cfg(struct ldma_dev * d,bool enable)453  static void ldma_dev_vld_fetch_ack_cfg(struct ldma_dev *d, bool enable)
454  {
455  	unsigned long flags;
456  	u32 mask, val;
457  
458  	if (d->inst->type != DMA_TYPE_TX)
459  		return;
460  
461  	mask = DMA_CTRL_VLD_DF_ACK;
462  	val = enable ? DMA_CTRL_VLD_DF_ACK : 0;
463  
464  	spin_lock_irqsave(&d->dev_lock, flags);
465  	ldma_update_bits(d, mask, val, DMA_CTRL);
466  	spin_unlock_irqrestore(&d->dev_lock, flags);
467  }
468  
ldma_dev_drb_cfg(struct ldma_dev * d,int enable)469  static void ldma_dev_drb_cfg(struct ldma_dev *d, int enable)
470  {
471  	unsigned long flags;
472  	u32 mask = DMA_CTRL_DRB;
473  	u32 val = enable ? DMA_CTRL_DRB : 0;
474  
475  	spin_lock_irqsave(&d->dev_lock, flags);
476  	ldma_update_bits(d, mask, val, DMA_CTRL);
477  	spin_unlock_irqrestore(&d->dev_lock, flags);
478  }
479  
ldma_dev_cfg(struct ldma_dev * d)480  static int ldma_dev_cfg(struct ldma_dev *d)
481  {
482  	bool enable;
483  
484  	ldma_dev_pkt_arb_cfg(d, true);
485  	ldma_dev_global_polling_enable(d);
486  
487  	enable = !!(d->flags & DMA_DFT_DRB);
488  	ldma_dev_drb_cfg(d, enable);
489  
490  	enable = !!(d->flags & DMA_EN_BYTE_EN);
491  	ldma_dev_byte_enable_cfg(d, enable);
492  
493  	enable = !!(d->flags & DMA_CHAN_FLOW_CTL);
494  	ldma_dev_chan_flow_ctl_cfg(d, enable);
495  
496  	enable = !!(d->flags & DMA_DESC_FOD);
497  	ldma_dev_desc_fetch_on_demand_cfg(d, enable);
498  
499  	enable = !!(d->flags & DMA_DESC_IN_SRAM);
500  	ldma_dev_sram_desc_cfg(d, enable);
501  
502  	enable = !!(d->flags & DMA_DBURST_WR);
503  	ldma_dev_dburst_wr_cfg(d, enable);
504  
505  	enable = !!(d->flags & DMA_VALID_DESC_FETCH_ACK);
506  	ldma_dev_vld_fetch_ack_cfg(d, enable);
507  
508  	if (d->ver > DMA_VER22) {
509  		ldma_dev_orrc_cfg(d);
510  		ldma_dev_df_tout_cfg(d, true, DMA_DFT_DESC_TCNT);
511  	}
512  
513  	dev_dbg(d->dev, "%s Controller 0x%08x configuration done\n",
514  		d->inst->name, readl(d->base + DMA_CTRL));
515  
516  	return 0;
517  }
518  
ldma_chan_cctrl_cfg(struct ldma_chan * c,u32 val)519  static int ldma_chan_cctrl_cfg(struct ldma_chan *c, u32 val)
520  {
521  	struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
522  	u32 class_low, class_high;
523  	unsigned long flags;
524  	u32 reg;
525  
526  	spin_lock_irqsave(&d->dev_lock, flags);
527  	ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
528  	reg = readl(d->base + DMA_CCTRL);
529  	/* Read from hardware */
530  	if (reg & DMA_CCTRL_DIR_TX)
531  		c->flags |= DMA_TX_CH;
532  	else
533  		c->flags |= DMA_RX_CH;
534  
535  	/* Keep the class value unchanged */
536  	class_low = FIELD_GET(DMA_CCTRL_CLASS, reg);
537  	class_high = FIELD_GET(DMA_CCTRL_CLASSH, reg);
538  	val &= ~DMA_CCTRL_CLASS;
539  	val |= FIELD_PREP(DMA_CCTRL_CLASS, class_low);
540  	val &= ~DMA_CCTRL_CLASSH;
541  	val |= FIELD_PREP(DMA_CCTRL_CLASSH, class_high);
542  	writel(val, d->base + DMA_CCTRL);
543  	spin_unlock_irqrestore(&d->dev_lock, flags);
544  
545  	return 0;
546  }
547  
ldma_chan_irq_init(struct ldma_chan * c)548  static void ldma_chan_irq_init(struct ldma_chan *c)
549  {
550  	struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
551  	unsigned long flags;
552  	u32 enofs, crofs;
553  	u32 cn_bit;
554  
555  	if (c->nr < MAX_LOWER_CHANS) {
556  		enofs = DMA_IRNEN;
557  		crofs = DMA_IRNCR;
558  	} else {
559  		enofs = DMA_IRNEN1;
560  		crofs = DMA_IRNCR1;
561  	}
562  
563  	cn_bit = BIT(c->nr & MASK_LOWER_CHANS);
564  	spin_lock_irqsave(&d->dev_lock, flags);
565  	ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
566  
567  	/* Clear all interrupts and disabled it */
568  	writel(0, d->base + DMA_CIE);
569  	writel(DMA_CI_ALL, d->base + DMA_CIS);
570  
571  	ldma_update_bits(d, cn_bit, 0, enofs);
572  	writel(cn_bit, d->base + crofs);
573  	spin_unlock_irqrestore(&d->dev_lock, flags);
574  }
575  
ldma_chan_set_class(struct ldma_chan * c,u32 val)576  static void ldma_chan_set_class(struct ldma_chan *c, u32 val)
577  {
578  	struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
579  	u32 class_val;
580  
581  	if (d->inst->type == DMA_TYPE_MCPY || val > DMA_MAX_CLASS)
582  		return;
583  
584  	/* 3 bits low */
585  	class_val = FIELD_PREP(DMA_CCTRL_CLASS, val & 0x7);
586  	/* 2 bits high */
587  	class_val |= FIELD_PREP(DMA_CCTRL_CLASSH, (val >> 3) & 0x3);
588  
589  	ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
590  	ldma_update_bits(d, DMA_CCTRL_CLASS | DMA_CCTRL_CLASSH, class_val,
591  			 DMA_CCTRL);
592  }
593  
ldma_chan_on(struct ldma_chan * c)594  static int ldma_chan_on(struct ldma_chan *c)
595  {
596  	struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
597  	unsigned long flags;
598  
599  	/* If descriptors not configured, not allow to turn on channel */
600  	if (WARN_ON(!c->desc_init))
601  		return -EINVAL;
602  
603  	spin_lock_irqsave(&d->dev_lock, flags);
604  	ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
605  	ldma_update_bits(d, DMA_CCTRL_ON, DMA_CCTRL_ON, DMA_CCTRL);
606  	spin_unlock_irqrestore(&d->dev_lock, flags);
607  
608  	c->onoff = DMA_CH_ON;
609  
610  	return 0;
611  }
612  
ldma_chan_off(struct ldma_chan * c)613  static int ldma_chan_off(struct ldma_chan *c)
614  {
615  	struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
616  	unsigned long flags;
617  	u32 val;
618  	int ret;
619  
620  	spin_lock_irqsave(&d->dev_lock, flags);
621  	ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
622  	ldma_update_bits(d, DMA_CCTRL_ON, 0, DMA_CCTRL);
623  	spin_unlock_irqrestore(&d->dev_lock, flags);
624  
625  	ret = readl_poll_timeout_atomic(d->base + DMA_CCTRL, val,
626  					!(val & DMA_CCTRL_ON), 0, 10000);
627  	if (ret)
628  		return ret;
629  
630  	c->onoff = DMA_CH_OFF;
631  
632  	return 0;
633  }
634  
ldma_chan_desc_hw_cfg(struct ldma_chan * c,dma_addr_t desc_base,int desc_num)635  static void ldma_chan_desc_hw_cfg(struct ldma_chan *c, dma_addr_t desc_base,
636  				  int desc_num)
637  {
638  	struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
639  	unsigned long flags;
640  
641  	spin_lock_irqsave(&d->dev_lock, flags);
642  	ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
643  	writel(lower_32_bits(desc_base), d->base + DMA_CDBA);
644  
645  	/* Higher 4 bits of 36 bit addressing */
646  	if (IS_ENABLED(CONFIG_64BIT)) {
647  		u32 hi = upper_32_bits(desc_base) & HIGH_4_BITS;
648  
649  		ldma_update_bits(d, DMA_CDBA_MSB,
650  				 FIELD_PREP(DMA_CDBA_MSB, hi), DMA_CCTRL);
651  	}
652  	writel(desc_num, d->base + DMA_CDLEN);
653  	spin_unlock_irqrestore(&d->dev_lock, flags);
654  
655  	c->desc_init = true;
656  }
657  
658  static struct dma_async_tx_descriptor *
ldma_chan_desc_cfg(struct dma_chan * chan,dma_addr_t desc_base,int desc_num)659  ldma_chan_desc_cfg(struct dma_chan *chan, dma_addr_t desc_base, int desc_num)
660  {
661  	struct ldma_chan *c = to_ldma_chan(chan);
662  	struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
663  	struct dma_async_tx_descriptor *tx;
664  	struct dw2_desc_sw *ds;
665  
666  	if (!desc_num) {
667  		dev_err(d->dev, "Channel %d must allocate descriptor first\n",
668  			c->nr);
669  		return NULL;
670  	}
671  
672  	if (desc_num > DMA_MAX_DESC_NUM) {
673  		dev_err(d->dev, "Channel %d descriptor number out of range %d\n",
674  			c->nr, desc_num);
675  		return NULL;
676  	}
677  
678  	ldma_chan_desc_hw_cfg(c, desc_base, desc_num);
679  
680  	c->flags |= DMA_HW_DESC;
681  	c->desc_cnt = desc_num;
682  	c->desc_phys = desc_base;
683  
684  	ds = kzalloc(sizeof(*ds), GFP_NOWAIT);
685  	if (!ds)
686  		return NULL;
687  
688  	tx = &ds->vdesc.tx;
689  	dma_async_tx_descriptor_init(tx, chan);
690  
691  	return tx;
692  }
693  
ldma_chan_reset(struct ldma_chan * c)694  static int ldma_chan_reset(struct ldma_chan *c)
695  {
696  	struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
697  	unsigned long flags;
698  	u32 val;
699  	int ret;
700  
701  	ret = ldma_chan_off(c);
702  	if (ret)
703  		return ret;
704  
705  	spin_lock_irqsave(&d->dev_lock, flags);
706  	ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
707  	ldma_update_bits(d, DMA_CCTRL_RST, DMA_CCTRL_RST, DMA_CCTRL);
708  	spin_unlock_irqrestore(&d->dev_lock, flags);
709  
710  	ret = readl_poll_timeout_atomic(d->base + DMA_CCTRL, val,
711  					!(val & DMA_CCTRL_RST), 0, 10000);
712  	if (ret)
713  		return ret;
714  
715  	c->rst = 1;
716  	c->desc_init = false;
717  
718  	return 0;
719  }
720  
ldma_chan_byte_offset_cfg(struct ldma_chan * c,u32 boff_len)721  static void ldma_chan_byte_offset_cfg(struct ldma_chan *c, u32 boff_len)
722  {
723  	struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
724  	u32 mask = DMA_C_BOFF_EN | DMA_C_BOFF_BOF_LEN;
725  	u32 val;
726  
727  	if (boff_len > 0 && boff_len <= DMA_CHAN_BOFF_MAX)
728  		val = FIELD_PREP(DMA_C_BOFF_BOF_LEN, boff_len) | DMA_C_BOFF_EN;
729  	else
730  		val = 0;
731  
732  	ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
733  	ldma_update_bits(d, mask, val, DMA_C_BOFF);
734  }
735  
ldma_chan_data_endian_cfg(struct ldma_chan * c,bool enable,u32 endian_type)736  static void ldma_chan_data_endian_cfg(struct ldma_chan *c, bool enable,
737  				      u32 endian_type)
738  {
739  	struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
740  	u32 mask = DMA_C_END_DE_EN | DMA_C_END_DATAENDI;
741  	u32 val;
742  
743  	if (enable)
744  		val = DMA_C_END_DE_EN | FIELD_PREP(DMA_C_END_DATAENDI, endian_type);
745  	else
746  		val = 0;
747  
748  	ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
749  	ldma_update_bits(d, mask, val, DMA_C_ENDIAN);
750  }
751  
ldma_chan_desc_endian_cfg(struct ldma_chan * c,bool enable,u32 endian_type)752  static void ldma_chan_desc_endian_cfg(struct ldma_chan *c, bool enable,
753  				      u32 endian_type)
754  {
755  	struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
756  	u32 mask = DMA_C_END_DES_EN | DMA_C_END_DESENDI;
757  	u32 val;
758  
759  	if (enable)
760  		val = DMA_C_END_DES_EN | FIELD_PREP(DMA_C_END_DESENDI, endian_type);
761  	else
762  		val = 0;
763  
764  	ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
765  	ldma_update_bits(d, mask, val, DMA_C_ENDIAN);
766  }
767  
ldma_chan_hdr_mode_cfg(struct ldma_chan * c,u32 hdr_len,bool csum)768  static void ldma_chan_hdr_mode_cfg(struct ldma_chan *c, u32 hdr_len, bool csum)
769  {
770  	struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
771  	u32 mask, val;
772  
773  	/* NB, csum disabled, hdr length must be provided */
774  	if (!csum && (!hdr_len || hdr_len > DMA_HDR_LEN_MAX))
775  		return;
776  
777  	mask = DMA_C_HDRM_HDR_SUM;
778  	val = DMA_C_HDRM_HDR_SUM;
779  
780  	if (!csum && hdr_len)
781  		val = hdr_len;
782  
783  	ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
784  	ldma_update_bits(d, mask, val, DMA_C_HDRM);
785  }
786  
ldma_chan_rxwr_np_cfg(struct ldma_chan * c,bool enable)787  static void ldma_chan_rxwr_np_cfg(struct ldma_chan *c, bool enable)
788  {
789  	struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
790  	u32 mask, val;
791  
792  	/* Only valid for RX channel */
793  	if (ldma_chan_tx(c))
794  		return;
795  
796  	mask = DMA_CCTRL_WR_NP_EN;
797  	val = enable ? DMA_CCTRL_WR_NP_EN : 0;
798  
799  	ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
800  	ldma_update_bits(d, mask, val, DMA_CCTRL);
801  }
802  
ldma_chan_abc_cfg(struct ldma_chan * c,bool enable)803  static void ldma_chan_abc_cfg(struct ldma_chan *c, bool enable)
804  {
805  	struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
806  	u32 mask, val;
807  
808  	if (d->ver < DMA_VER32 || ldma_chan_tx(c))
809  		return;
810  
811  	mask = DMA_CCTRL_CH_ABC;
812  	val = enable ? DMA_CCTRL_CH_ABC : 0;
813  
814  	ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
815  	ldma_update_bits(d, mask, val, DMA_CCTRL);
816  }
817  
ldma_port_cfg(struct ldma_port * p)818  static int ldma_port_cfg(struct ldma_port *p)
819  {
820  	unsigned long flags;
821  	struct ldma_dev *d;
822  	u32 reg;
823  
824  	d = p->ldev;
825  	reg = FIELD_PREP(DMA_PCTRL_TXENDI, p->txendi);
826  	reg |= FIELD_PREP(DMA_PCTRL_RXENDI, p->rxendi);
827  
828  	if (d->ver == DMA_VER22) {
829  		reg |= FIELD_PREP(DMA_PCTRL_TXBL, p->txbl);
830  		reg |= FIELD_PREP(DMA_PCTRL_RXBL, p->rxbl);
831  	} else {
832  		reg |= FIELD_PREP(DMA_PCTRL_PDEN, p->pkt_drop);
833  
834  		if (p->txbl == DMA_BURSTL_32DW)
835  			reg |= DMA_PCTRL_TXBL32;
836  		else if (p->txbl == DMA_BURSTL_16DW)
837  			reg |= DMA_PCTRL_TXBL16;
838  		else
839  			reg |= FIELD_PREP(DMA_PCTRL_TXBL, DMA_PCTRL_TXBL_8);
840  
841  		if (p->rxbl == DMA_BURSTL_32DW)
842  			reg |= DMA_PCTRL_RXBL32;
843  		else if (p->rxbl == DMA_BURSTL_16DW)
844  			reg |= DMA_PCTRL_RXBL16;
845  		else
846  			reg |= FIELD_PREP(DMA_PCTRL_RXBL, DMA_PCTRL_RXBL_8);
847  	}
848  
849  	spin_lock_irqsave(&d->dev_lock, flags);
850  	writel(p->portid, d->base + DMA_PS);
851  	writel(reg, d->base + DMA_PCTRL);
852  	spin_unlock_irqrestore(&d->dev_lock, flags);
853  
854  	reg = readl(d->base + DMA_PCTRL); /* read back */
855  	dev_dbg(d->dev, "Port Control 0x%08x configuration done\n", reg);
856  
857  	return 0;
858  }
859  
ldma_chan_cfg(struct ldma_chan * c)860  static int ldma_chan_cfg(struct ldma_chan *c)
861  {
862  	struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
863  	unsigned long flags;
864  	u32 reg;
865  
866  	reg = c->pden ? DMA_CCTRL_PDEN : 0;
867  	reg |= c->onoff ? DMA_CCTRL_ON : 0;
868  	reg |= c->rst ? DMA_CCTRL_RST : 0;
869  
870  	ldma_chan_cctrl_cfg(c, reg);
871  	ldma_chan_irq_init(c);
872  
873  	if (d->ver <= DMA_VER22)
874  		return 0;
875  
876  	spin_lock_irqsave(&d->dev_lock, flags);
877  	ldma_chan_set_class(c, c->nr);
878  	ldma_chan_byte_offset_cfg(c, c->boff_len);
879  	ldma_chan_data_endian_cfg(c, c->data_endian_en, c->data_endian);
880  	ldma_chan_desc_endian_cfg(c, c->desc_endian_en, c->desc_endian);
881  	ldma_chan_hdr_mode_cfg(c, c->hdrm_len, c->hdrm_csum);
882  	ldma_chan_rxwr_np_cfg(c, c->desc_rx_np);
883  	ldma_chan_abc_cfg(c, c->abc_en);
884  	spin_unlock_irqrestore(&d->dev_lock, flags);
885  
886  	if (ldma_chan_is_hw_desc(c))
887  		ldma_chan_desc_hw_cfg(c, c->desc_phys, c->desc_cnt);
888  
889  	return 0;
890  }
891  
ldma_dev_init(struct ldma_dev * d)892  static void ldma_dev_init(struct ldma_dev *d)
893  {
894  	unsigned long ch_mask = (unsigned long)d->channels_mask;
895  	struct ldma_port *p;
896  	struct ldma_chan *c;
897  	int i;
898  	u32 j;
899  
900  	spin_lock_init(&d->dev_lock);
901  	ldma_dev_reset(d);
902  	ldma_dev_cfg(d);
903  
904  	/* DMA port initialization */
905  	for (i = 0; i < d->port_nrs; i++) {
906  		p = &d->ports[i];
907  		ldma_port_cfg(p);
908  	}
909  
910  	/* DMA channel initialization */
911  	for_each_set_bit(j, &ch_mask, d->chan_nrs) {
912  		c = &d->chans[j];
913  		ldma_chan_cfg(c);
914  	}
915  }
916  
ldma_parse_dt(struct ldma_dev * d)917  static int ldma_parse_dt(struct ldma_dev *d)
918  {
919  	struct fwnode_handle *fwnode = dev_fwnode(d->dev);
920  	struct ldma_port *p;
921  	int i;
922  
923  	if (fwnode_property_read_bool(fwnode, "intel,dma-byte-en"))
924  		d->flags |= DMA_EN_BYTE_EN;
925  
926  	if (fwnode_property_read_bool(fwnode, "intel,dma-dburst-wr"))
927  		d->flags |= DMA_DBURST_WR;
928  
929  	if (fwnode_property_read_bool(fwnode, "intel,dma-drb"))
930  		d->flags |= DMA_DFT_DRB;
931  
932  	if (fwnode_property_read_u32(fwnode, "intel,dma-poll-cnt",
933  				     &d->pollcnt))
934  		d->pollcnt = DMA_DFT_POLL_CNT;
935  
936  	if (d->inst->chan_fc)
937  		d->flags |= DMA_CHAN_FLOW_CTL;
938  
939  	if (d->inst->desc_fod)
940  		d->flags |= DMA_DESC_FOD;
941  
942  	if (d->inst->desc_in_sram)
943  		d->flags |= DMA_DESC_IN_SRAM;
944  
945  	if (d->inst->valid_desc_fetch_ack)
946  		d->flags |= DMA_VALID_DESC_FETCH_ACK;
947  
948  	if (d->ver > DMA_VER22) {
949  		if (!d->port_nrs)
950  			return -EINVAL;
951  
952  		for (i = 0; i < d->port_nrs; i++) {
953  			p = &d->ports[i];
954  			p->rxendi = DMA_DFT_ENDIAN;
955  			p->txendi = DMA_DFT_ENDIAN;
956  			p->rxbl = DMA_DFT_BURST;
957  			p->txbl = DMA_DFT_BURST;
958  			p->pkt_drop = DMA_PKT_DROP_DIS;
959  		}
960  	}
961  
962  	return 0;
963  }
964  
dma_free_desc_resource(struct virt_dma_desc * vdesc)965  static void dma_free_desc_resource(struct virt_dma_desc *vdesc)
966  {
967  	struct dw2_desc_sw *ds = to_lgm_dma_desc(vdesc);
968  	struct ldma_chan *c = ds->chan;
969  
970  	dma_pool_free(c->desc_pool, ds->desc_hw, ds->desc_phys);
971  	kfree(ds);
972  }
973  
974  static struct dw2_desc_sw *
dma_alloc_desc_resource(int num,struct ldma_chan * c)975  dma_alloc_desc_resource(int num, struct ldma_chan *c)
976  {
977  	struct device *dev = c->vchan.chan.device->dev;
978  	struct dw2_desc_sw *ds;
979  
980  	if (num > c->desc_num) {
981  		dev_err(dev, "sg num %d exceed max %d\n", num, c->desc_num);
982  		return NULL;
983  	}
984  
985  	ds = kzalloc(sizeof(*ds), GFP_NOWAIT);
986  	if (!ds)
987  		return NULL;
988  
989  	ds->chan = c;
990  	ds->desc_hw = dma_pool_zalloc(c->desc_pool, GFP_ATOMIC,
991  				      &ds->desc_phys);
992  	if (!ds->desc_hw) {
993  		dev_dbg(dev, "out of memory for link descriptor\n");
994  		kfree(ds);
995  		return NULL;
996  	}
997  	ds->desc_cnt = num;
998  
999  	return ds;
1000  }
1001  
ldma_chan_irq_en(struct ldma_chan * c)1002  static void ldma_chan_irq_en(struct ldma_chan *c)
1003  {
1004  	struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
1005  	unsigned long flags;
1006  
1007  	spin_lock_irqsave(&d->dev_lock, flags);
1008  	writel(c->nr, d->base + DMA_CS);
1009  	writel(DMA_CI_EOP, d->base + DMA_CIE);
1010  	writel(BIT(c->nr), d->base + DMA_IRNEN);
1011  	spin_unlock_irqrestore(&d->dev_lock, flags);
1012  }
1013  
ldma_issue_pending(struct dma_chan * chan)1014  static void ldma_issue_pending(struct dma_chan *chan)
1015  {
1016  	struct ldma_chan *c = to_ldma_chan(chan);
1017  	struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
1018  	unsigned long flags;
1019  
1020  	if (d->ver == DMA_VER22) {
1021  		spin_lock_irqsave(&c->vchan.lock, flags);
1022  		if (vchan_issue_pending(&c->vchan)) {
1023  			struct virt_dma_desc *vdesc;
1024  
1025  			/* Get the next descriptor */
1026  			vdesc = vchan_next_desc(&c->vchan);
1027  			if (!vdesc) {
1028  				c->ds = NULL;
1029  				spin_unlock_irqrestore(&c->vchan.lock, flags);
1030  				return;
1031  			}
1032  			list_del(&vdesc->node);
1033  			c->ds = to_lgm_dma_desc(vdesc);
1034  			ldma_chan_desc_hw_cfg(c, c->ds->desc_phys, c->ds->desc_cnt);
1035  			ldma_chan_irq_en(c);
1036  		}
1037  		spin_unlock_irqrestore(&c->vchan.lock, flags);
1038  	}
1039  	ldma_chan_on(c);
1040  }
1041  
ldma_synchronize(struct dma_chan * chan)1042  static void ldma_synchronize(struct dma_chan *chan)
1043  {
1044  	struct ldma_chan *c = to_ldma_chan(chan);
1045  
1046  	/*
1047  	 * clear any pending work if any. In that
1048  	 * case the resource needs to be free here.
1049  	 */
1050  	cancel_work_sync(&c->work);
1051  	vchan_synchronize(&c->vchan);
1052  	if (c->ds)
1053  		dma_free_desc_resource(&c->ds->vdesc);
1054  }
1055  
ldma_terminate_all(struct dma_chan * chan)1056  static int ldma_terminate_all(struct dma_chan *chan)
1057  {
1058  	struct ldma_chan *c = to_ldma_chan(chan);
1059  	unsigned long flags;
1060  	LIST_HEAD(head);
1061  
1062  	spin_lock_irqsave(&c->vchan.lock, flags);
1063  	vchan_get_all_descriptors(&c->vchan, &head);
1064  	spin_unlock_irqrestore(&c->vchan.lock, flags);
1065  	vchan_dma_desc_free_list(&c->vchan, &head);
1066  
1067  	return ldma_chan_reset(c);
1068  }
1069  
ldma_resume_chan(struct dma_chan * chan)1070  static int ldma_resume_chan(struct dma_chan *chan)
1071  {
1072  	struct ldma_chan *c = to_ldma_chan(chan);
1073  
1074  	ldma_chan_on(c);
1075  
1076  	return 0;
1077  }
1078  
ldma_pause_chan(struct dma_chan * chan)1079  static int ldma_pause_chan(struct dma_chan *chan)
1080  {
1081  	struct ldma_chan *c = to_ldma_chan(chan);
1082  
1083  	return ldma_chan_off(c);
1084  }
1085  
1086  static enum dma_status
ldma_tx_status(struct dma_chan * chan,dma_cookie_t cookie,struct dma_tx_state * txstate)1087  ldma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
1088  	       struct dma_tx_state *txstate)
1089  {
1090  	struct ldma_chan *c = to_ldma_chan(chan);
1091  	struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
1092  	enum dma_status status = DMA_COMPLETE;
1093  
1094  	if (d->ver == DMA_VER22)
1095  		status = dma_cookie_status(chan, cookie, txstate);
1096  
1097  	return status;
1098  }
1099  
dma_chan_irq(int irq,void * data)1100  static void dma_chan_irq(int irq, void *data)
1101  {
1102  	struct ldma_chan *c = data;
1103  	struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
1104  	u32 stat;
1105  
1106  	/* Disable channel interrupts  */
1107  	writel(c->nr, d->base + DMA_CS);
1108  	stat = readl(d->base + DMA_CIS);
1109  	if (!stat)
1110  		return;
1111  
1112  	writel(readl(d->base + DMA_CIE) & ~DMA_CI_ALL, d->base + DMA_CIE);
1113  	writel(stat, d->base + DMA_CIS);
1114  	queue_work(d->wq, &c->work);
1115  }
1116  
dma_interrupt(int irq,void * dev_id)1117  static irqreturn_t dma_interrupt(int irq, void *dev_id)
1118  {
1119  	struct ldma_dev *d = dev_id;
1120  	struct ldma_chan *c;
1121  	unsigned long irncr;
1122  	u32 cid;
1123  
1124  	irncr = readl(d->base + DMA_IRNCR);
1125  	if (!irncr) {
1126  		dev_err(d->dev, "dummy interrupt\n");
1127  		return IRQ_NONE;
1128  	}
1129  
1130  	for_each_set_bit(cid, &irncr, d->chan_nrs) {
1131  		/* Mask */
1132  		writel(readl(d->base + DMA_IRNEN) & ~BIT(cid), d->base + DMA_IRNEN);
1133  		/* Ack */
1134  		writel(readl(d->base + DMA_IRNCR) | BIT(cid), d->base + DMA_IRNCR);
1135  
1136  		c = &d->chans[cid];
1137  		dma_chan_irq(irq, c);
1138  	}
1139  
1140  	return IRQ_HANDLED;
1141  }
1142  
prep_slave_burst_len(struct ldma_chan * c)1143  static void prep_slave_burst_len(struct ldma_chan *c)
1144  {
1145  	struct ldma_port *p = c->port;
1146  	struct dma_slave_config *cfg = &c->config;
1147  
1148  	if (cfg->dst_maxburst)
1149  		cfg->src_maxburst = cfg->dst_maxburst;
1150  
1151  	/* TX and RX has the same burst length */
1152  	p->txbl = ilog2(cfg->src_maxburst);
1153  	p->rxbl = p->txbl;
1154  }
1155  
1156  static struct dma_async_tx_descriptor *
ldma_prep_slave_sg(struct dma_chan * chan,struct scatterlist * sgl,unsigned int sglen,enum dma_transfer_direction dir,unsigned long flags,void * context)1157  ldma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1158  		   unsigned int sglen, enum dma_transfer_direction dir,
1159  		   unsigned long flags, void *context)
1160  {
1161  	struct ldma_chan *c = to_ldma_chan(chan);
1162  	struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
1163  	size_t len, avail, total = 0;
1164  	struct dw2_desc *hw_ds;
1165  	struct dw2_desc_sw *ds;
1166  	struct scatterlist *sg;
1167  	int num = sglen, i;
1168  	dma_addr_t addr;
1169  
1170  	if (!sgl)
1171  		return NULL;
1172  
1173  	if (d->ver > DMA_VER22)
1174  		return ldma_chan_desc_cfg(chan, sgl->dma_address, sglen);
1175  
1176  	for_each_sg(sgl, sg, sglen, i) {
1177  		avail = sg_dma_len(sg);
1178  		if (avail > DMA_MAX_SIZE)
1179  			num += DIV_ROUND_UP(avail, DMA_MAX_SIZE) - 1;
1180  	}
1181  
1182  	ds = dma_alloc_desc_resource(num, c);
1183  	if (!ds)
1184  		return NULL;
1185  
1186  	c->ds = ds;
1187  
1188  	num = 0;
1189  	/* sop and eop has to be handled nicely */
1190  	for_each_sg(sgl, sg, sglen, i) {
1191  		addr = sg_dma_address(sg);
1192  		avail = sg_dma_len(sg);
1193  		total += avail;
1194  
1195  		do {
1196  			len = min_t(size_t, avail, DMA_MAX_SIZE);
1197  
1198  			hw_ds = &ds->desc_hw[num];
1199  			switch (sglen) {
1200  			case 1:
1201  				hw_ds->field &= ~DESC_SOP;
1202  				hw_ds->field |= FIELD_PREP(DESC_SOP, 1);
1203  
1204  				hw_ds->field &= ~DESC_EOP;
1205  				hw_ds->field |= FIELD_PREP(DESC_EOP, 1);
1206  				break;
1207  			default:
1208  				if (num == 0) {
1209  					hw_ds->field &= ~DESC_SOP;
1210  					hw_ds->field |= FIELD_PREP(DESC_SOP, 1);
1211  
1212  					hw_ds->field &= ~DESC_EOP;
1213  					hw_ds->field |= FIELD_PREP(DESC_EOP, 0);
1214  				} else if (num == (sglen - 1)) {
1215  					hw_ds->field &= ~DESC_SOP;
1216  					hw_ds->field |= FIELD_PREP(DESC_SOP, 0);
1217  					hw_ds->field &= ~DESC_EOP;
1218  					hw_ds->field |= FIELD_PREP(DESC_EOP, 1);
1219  				} else {
1220  					hw_ds->field &= ~DESC_SOP;
1221  					hw_ds->field |= FIELD_PREP(DESC_SOP, 0);
1222  
1223  					hw_ds->field &= ~DESC_EOP;
1224  					hw_ds->field |= FIELD_PREP(DESC_EOP, 0);
1225  				}
1226  				break;
1227  			}
1228  			/* Only 32 bit address supported */
1229  			hw_ds->addr = (u32)addr;
1230  
1231  			hw_ds->field &= ~DESC_DATA_LEN;
1232  			hw_ds->field |= FIELD_PREP(DESC_DATA_LEN, len);
1233  
1234  			hw_ds->field &= ~DESC_C;
1235  			hw_ds->field |= FIELD_PREP(DESC_C, 0);
1236  
1237  			hw_ds->field &= ~DESC_BYTE_OFF;
1238  			hw_ds->field |= FIELD_PREP(DESC_BYTE_OFF, addr & 0x3);
1239  
1240  			/* Ensure data ready before ownership change */
1241  			wmb();
1242  			hw_ds->field &= ~DESC_OWN;
1243  			hw_ds->field |= FIELD_PREP(DESC_OWN, DMA_OWN);
1244  
1245  			/* Ensure ownership changed before moving forward */
1246  			wmb();
1247  			num++;
1248  			addr += len;
1249  			avail -= len;
1250  		} while (avail);
1251  	}
1252  
1253  	ds->size = total;
1254  	prep_slave_burst_len(c);
1255  
1256  	return vchan_tx_prep(&c->vchan, &ds->vdesc, DMA_CTRL_ACK);
1257  }
1258  
1259  static int
ldma_slave_config(struct dma_chan * chan,struct dma_slave_config * cfg)1260  ldma_slave_config(struct dma_chan *chan, struct dma_slave_config *cfg)
1261  {
1262  	struct ldma_chan *c = to_ldma_chan(chan);
1263  
1264  	memcpy(&c->config, cfg, sizeof(c->config));
1265  
1266  	return 0;
1267  }
1268  
ldma_alloc_chan_resources(struct dma_chan * chan)1269  static int ldma_alloc_chan_resources(struct dma_chan *chan)
1270  {
1271  	struct ldma_chan *c = to_ldma_chan(chan);
1272  	struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
1273  	struct device *dev = c->vchan.chan.device->dev;
1274  	size_t	desc_sz;
1275  
1276  	if (d->ver > DMA_VER22) {
1277  		c->flags |= CHAN_IN_USE;
1278  		return 0;
1279  	}
1280  
1281  	if (c->desc_pool)
1282  		return c->desc_num;
1283  
1284  	desc_sz = c->desc_num * sizeof(struct dw2_desc);
1285  	c->desc_pool = dma_pool_create(c->name, dev, desc_sz,
1286  				       __alignof__(struct dw2_desc), 0);
1287  
1288  	if (!c->desc_pool) {
1289  		dev_err(dev, "unable to allocate descriptor pool\n");
1290  		return -ENOMEM;
1291  	}
1292  
1293  	return c->desc_num;
1294  }
1295  
ldma_free_chan_resources(struct dma_chan * chan)1296  static void ldma_free_chan_resources(struct dma_chan *chan)
1297  {
1298  	struct ldma_chan *c = to_ldma_chan(chan);
1299  	struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
1300  
1301  	if (d->ver == DMA_VER22) {
1302  		dma_pool_destroy(c->desc_pool);
1303  		c->desc_pool = NULL;
1304  		vchan_free_chan_resources(to_virt_chan(chan));
1305  		ldma_chan_reset(c);
1306  	} else {
1307  		c->flags &= ~CHAN_IN_USE;
1308  	}
1309  }
1310  
dma_work(struct work_struct * work)1311  static void dma_work(struct work_struct *work)
1312  {
1313  	struct ldma_chan *c = container_of(work, struct ldma_chan, work);
1314  	struct dma_async_tx_descriptor *tx = &c->ds->vdesc.tx;
1315  	struct virt_dma_chan *vc = &c->vchan;
1316  	struct dmaengine_desc_callback cb;
1317  	struct virt_dma_desc *vd, *_vd;
1318  	unsigned long flags;
1319  	LIST_HEAD(head);
1320  
1321  	spin_lock_irqsave(&c->vchan.lock, flags);
1322  	list_splice_tail_init(&vc->desc_completed, &head);
1323  	spin_unlock_irqrestore(&c->vchan.lock, flags);
1324  	dmaengine_desc_get_callback(tx, &cb);
1325  	dma_cookie_complete(tx);
1326  	dmaengine_desc_callback_invoke(&cb, NULL);
1327  
1328  	list_for_each_entry_safe(vd, _vd, &head, node) {
1329  		dmaengine_desc_get_callback(tx, &cb);
1330  		dma_cookie_complete(tx);
1331  		list_del(&vd->node);
1332  		dmaengine_desc_callback_invoke(&cb, NULL);
1333  
1334  		vchan_vdesc_fini(vd);
1335  	}
1336  	c->ds = NULL;
1337  }
1338  
1339  static void
update_burst_len_v22(struct ldma_chan * c,struct ldma_port * p,u32 burst)1340  update_burst_len_v22(struct ldma_chan *c, struct ldma_port *p, u32 burst)
1341  {
1342  	if (ldma_chan_tx(c))
1343  		p->txbl = ilog2(burst);
1344  	else
1345  		p->rxbl = ilog2(burst);
1346  }
1347  
1348  static void
update_burst_len_v3X(struct ldma_chan * c,struct ldma_port * p,u32 burst)1349  update_burst_len_v3X(struct ldma_chan *c, struct ldma_port *p, u32 burst)
1350  {
1351  	if (ldma_chan_tx(c))
1352  		p->txbl = burst;
1353  	else
1354  		p->rxbl = burst;
1355  }
1356  
1357  static int
update_client_configs(struct of_dma * ofdma,struct of_phandle_args * spec)1358  update_client_configs(struct of_dma *ofdma, struct of_phandle_args *spec)
1359  {
1360  	struct ldma_dev *d = ofdma->of_dma_data;
1361  	u32 chan_id =  spec->args[0];
1362  	u32 port_id =  spec->args[1];
1363  	u32 burst = spec->args[2];
1364  	struct ldma_port *p;
1365  	struct ldma_chan *c;
1366  
1367  	if (chan_id >= d->chan_nrs || port_id >= d->port_nrs)
1368  		return 0;
1369  
1370  	p = &d->ports[port_id];
1371  	c = &d->chans[chan_id];
1372  	c->port = p;
1373  
1374  	if (d->ver == DMA_VER22)
1375  		update_burst_len_v22(c, p, burst);
1376  	else
1377  		update_burst_len_v3X(c, p, burst);
1378  
1379  	ldma_port_cfg(p);
1380  
1381  	return 1;
1382  }
1383  
ldma_xlate(struct of_phandle_args * spec,struct of_dma * ofdma)1384  static struct dma_chan *ldma_xlate(struct of_phandle_args *spec,
1385  				   struct of_dma *ofdma)
1386  {
1387  	struct ldma_dev *d = ofdma->of_dma_data;
1388  	u32 chan_id =  spec->args[0];
1389  	int ret;
1390  
1391  	if (!spec->args_count)
1392  		return NULL;
1393  
1394  	/* if args_count is 1 driver use default settings */
1395  	if (spec->args_count > 1) {
1396  		ret = update_client_configs(ofdma, spec);
1397  		if (!ret)
1398  			return NULL;
1399  	}
1400  
1401  	return dma_get_slave_channel(&d->chans[chan_id].vchan.chan);
1402  }
1403  
ldma_dma_init_v22(int i,struct ldma_dev * d)1404  static void ldma_dma_init_v22(int i, struct ldma_dev *d)
1405  {
1406  	struct ldma_chan *c;
1407  
1408  	c = &d->chans[i];
1409  	c->nr = i; /* Real channel number */
1410  	c->rst = DMA_CHAN_RST;
1411  	c->desc_num = DMA_DFT_DESC_NUM;
1412  	snprintf(c->name, sizeof(c->name), "chan%d", c->nr);
1413  	INIT_WORK(&c->work, dma_work);
1414  	c->vchan.desc_free = dma_free_desc_resource;
1415  	vchan_init(&c->vchan, &d->dma_dev);
1416  }
1417  
ldma_dma_init_v3X(int i,struct ldma_dev * d)1418  static void ldma_dma_init_v3X(int i, struct ldma_dev *d)
1419  {
1420  	struct ldma_chan *c;
1421  
1422  	c = &d->chans[i];
1423  	c->data_endian = DMA_DFT_ENDIAN;
1424  	c->desc_endian = DMA_DFT_ENDIAN;
1425  	c->data_endian_en = false;
1426  	c->desc_endian_en = false;
1427  	c->desc_rx_np = false;
1428  	c->flags |= DEVICE_ALLOC_DESC;
1429  	c->onoff = DMA_CH_OFF;
1430  	c->rst = DMA_CHAN_RST;
1431  	c->abc_en = true;
1432  	c->hdrm_csum = false;
1433  	c->boff_len = 0;
1434  	c->nr = i;
1435  	c->vchan.desc_free = dma_free_desc_resource;
1436  	vchan_init(&c->vchan, &d->dma_dev);
1437  }
1438  
ldma_init_v22(struct ldma_dev * d,struct platform_device * pdev)1439  static int ldma_init_v22(struct ldma_dev *d, struct platform_device *pdev)
1440  {
1441  	int ret;
1442  
1443  	ret = device_property_read_u32(d->dev, "dma-channels", &d->chan_nrs);
1444  	if (ret < 0) {
1445  		dev_err(d->dev, "unable to read dma-channels property\n");
1446  		return ret;
1447  	}
1448  
1449  	d->irq = platform_get_irq(pdev, 0);
1450  	if (d->irq < 0)
1451  		return d->irq;
1452  
1453  	ret = devm_request_irq(&pdev->dev, d->irq, dma_interrupt, 0,
1454  			       DRIVER_NAME, d);
1455  	if (ret)
1456  		return ret;
1457  
1458  	d->wq = alloc_ordered_workqueue("dma_wq", WQ_MEM_RECLAIM |
1459  			WQ_HIGHPRI);
1460  	if (!d->wq)
1461  		return -ENOMEM;
1462  
1463  	return 0;
1464  }
1465  
ldma_clk_disable(void * data)1466  static void ldma_clk_disable(void *data)
1467  {
1468  	struct ldma_dev *d = data;
1469  
1470  	clk_disable_unprepare(d->core_clk);
1471  	reset_control_assert(d->rst);
1472  }
1473  
1474  static const struct ldma_inst_data dma0 = {
1475  	.name = "dma0",
1476  	.chan_fc = false,
1477  	.desc_fod = false,
1478  	.desc_in_sram = false,
1479  	.valid_desc_fetch_ack = false,
1480  };
1481  
1482  static const struct ldma_inst_data dma2tx = {
1483  	.name = "dma2tx",
1484  	.type = DMA_TYPE_TX,
1485  	.orrc = 16,
1486  	.chan_fc = true,
1487  	.desc_fod = true,
1488  	.desc_in_sram = true,
1489  	.valid_desc_fetch_ack = true,
1490  };
1491  
1492  static const struct ldma_inst_data dma1rx = {
1493  	.name = "dma1rx",
1494  	.type = DMA_TYPE_RX,
1495  	.orrc = 16,
1496  	.chan_fc = false,
1497  	.desc_fod = true,
1498  	.desc_in_sram = true,
1499  	.valid_desc_fetch_ack = false,
1500  };
1501  
1502  static const struct ldma_inst_data dma1tx = {
1503  	.name = "dma1tx",
1504  	.type = DMA_TYPE_TX,
1505  	.orrc = 16,
1506  	.chan_fc = true,
1507  	.desc_fod = true,
1508  	.desc_in_sram = true,
1509  	.valid_desc_fetch_ack = true,
1510  };
1511  
1512  static const struct ldma_inst_data dma0tx = {
1513  	.name = "dma0tx",
1514  	.type = DMA_TYPE_TX,
1515  	.orrc = 16,
1516  	.chan_fc = true,
1517  	.desc_fod = true,
1518  	.desc_in_sram = true,
1519  	.valid_desc_fetch_ack = true,
1520  };
1521  
1522  static const struct ldma_inst_data dma3 = {
1523  	.name = "dma3",
1524  	.type = DMA_TYPE_MCPY,
1525  	.orrc = 16,
1526  	.chan_fc = false,
1527  	.desc_fod = false,
1528  	.desc_in_sram = true,
1529  	.valid_desc_fetch_ack = false,
1530  };
1531  
1532  static const struct ldma_inst_data toe_dma30 = {
1533  	.name = "toe_dma30",
1534  	.type = DMA_TYPE_MCPY,
1535  	.orrc = 16,
1536  	.chan_fc = false,
1537  	.desc_fod = false,
1538  	.desc_in_sram = true,
1539  	.valid_desc_fetch_ack = true,
1540  };
1541  
1542  static const struct ldma_inst_data toe_dma31 = {
1543  	.name = "toe_dma31",
1544  	.type = DMA_TYPE_MCPY,
1545  	.orrc = 16,
1546  	.chan_fc = false,
1547  	.desc_fod = false,
1548  	.desc_in_sram = true,
1549  	.valid_desc_fetch_ack = true,
1550  };
1551  
1552  static const struct of_device_id intel_ldma_match[] = {
1553  	{ .compatible = "intel,lgm-cdma", .data = &dma0},
1554  	{ .compatible = "intel,lgm-dma2tx", .data = &dma2tx},
1555  	{ .compatible = "intel,lgm-dma1rx", .data = &dma1rx},
1556  	{ .compatible = "intel,lgm-dma1tx", .data = &dma1tx},
1557  	{ .compatible = "intel,lgm-dma0tx", .data = &dma0tx},
1558  	{ .compatible = "intel,lgm-dma3", .data = &dma3},
1559  	{ .compatible = "intel,lgm-toe-dma30", .data = &toe_dma30},
1560  	{ .compatible = "intel,lgm-toe-dma31", .data = &toe_dma31},
1561  	{}
1562  };
1563  
intel_ldma_probe(struct platform_device * pdev)1564  static int intel_ldma_probe(struct platform_device *pdev)
1565  {
1566  	struct device *dev = &pdev->dev;
1567  	struct dma_device *dma_dev;
1568  	unsigned long ch_mask;
1569  	struct ldma_chan *c;
1570  	struct ldma_port *p;
1571  	struct ldma_dev *d;
1572  	u32 id, bitn = 32, j;
1573  	int i, ret;
1574  
1575  	d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL);
1576  	if (!d)
1577  		return -ENOMEM;
1578  
1579  	/* Link controller to platform device */
1580  	d->dev = &pdev->dev;
1581  
1582  	d->inst = device_get_match_data(dev);
1583  	if (!d->inst) {
1584  		dev_err(dev, "No device match found\n");
1585  		return -ENODEV;
1586  	}
1587  
1588  	d->base = devm_platform_ioremap_resource(pdev, 0);
1589  	if (IS_ERR(d->base))
1590  		return PTR_ERR(d->base);
1591  
1592  	/* Power up and reset the dma engine, some DMAs always on?? */
1593  	d->core_clk = devm_clk_get_optional(dev, NULL);
1594  	if (IS_ERR(d->core_clk))
1595  		return PTR_ERR(d->core_clk);
1596  
1597  	d->rst = devm_reset_control_get_optional(dev, NULL);
1598  	if (IS_ERR(d->rst))
1599  		return PTR_ERR(d->rst);
1600  
1601  	clk_prepare_enable(d->core_clk);
1602  	reset_control_deassert(d->rst);
1603  
1604  	ret = devm_add_action_or_reset(dev, ldma_clk_disable, d);
1605  	if (ret) {
1606  		dev_err(dev, "Failed to devm_add_action_or_reset, %d\n", ret);
1607  		return ret;
1608  	}
1609  
1610  	id = readl(d->base + DMA_ID);
1611  	d->chan_nrs = FIELD_GET(DMA_ID_CHNR, id);
1612  	d->port_nrs = FIELD_GET(DMA_ID_PNR, id);
1613  	d->ver = FIELD_GET(DMA_ID_REV, id);
1614  
1615  	if (id & DMA_ID_AW_36B)
1616  		d->flags |= DMA_ADDR_36BIT;
1617  
1618  	if (IS_ENABLED(CONFIG_64BIT) && (id & DMA_ID_AW_36B))
1619  		bitn = 36;
1620  
1621  	if (id & DMA_ID_DW_128B)
1622  		d->flags |= DMA_DATA_128BIT;
1623  
1624  	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(bitn));
1625  	if (ret) {
1626  		dev_err(dev, "No usable DMA configuration\n");
1627  		return ret;
1628  	}
1629  
1630  	if (d->ver == DMA_VER22) {
1631  		ret = ldma_init_v22(d, pdev);
1632  		if (ret)
1633  			return ret;
1634  	}
1635  
1636  	ret = device_property_read_u32(dev, "dma-channel-mask", &d->channels_mask);
1637  	if (ret < 0)
1638  		d->channels_mask = GENMASK(d->chan_nrs - 1, 0);
1639  
1640  	dma_dev = &d->dma_dev;
1641  
1642  	dma_cap_zero(dma_dev->cap_mask);
1643  	dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
1644  
1645  	/* Channel initializations */
1646  	INIT_LIST_HEAD(&dma_dev->channels);
1647  
1648  	/* Port Initializations */
1649  	d->ports = devm_kcalloc(dev, d->port_nrs, sizeof(*p), GFP_KERNEL);
1650  	if (!d->ports)
1651  		return -ENOMEM;
1652  
1653  	/* Channels Initializations */
1654  	d->chans = devm_kcalloc(d->dev, d->chan_nrs, sizeof(*c), GFP_KERNEL);
1655  	if (!d->chans)
1656  		return -ENOMEM;
1657  
1658  	for (i = 0; i < d->port_nrs; i++) {
1659  		p = &d->ports[i];
1660  		p->portid = i;
1661  		p->ldev = d;
1662  	}
1663  
1664  	dma_dev->dev = &pdev->dev;
1665  
1666  	ch_mask = (unsigned long)d->channels_mask;
1667  	for_each_set_bit(j, &ch_mask, d->chan_nrs) {
1668  		if (d->ver == DMA_VER22)
1669  			ldma_dma_init_v22(j, d);
1670  		else
1671  			ldma_dma_init_v3X(j, d);
1672  	}
1673  
1674  	ret = ldma_parse_dt(d);
1675  	if (ret)
1676  		return ret;
1677  
1678  	dma_dev->device_alloc_chan_resources = ldma_alloc_chan_resources;
1679  	dma_dev->device_free_chan_resources = ldma_free_chan_resources;
1680  	dma_dev->device_terminate_all = ldma_terminate_all;
1681  	dma_dev->device_issue_pending = ldma_issue_pending;
1682  	dma_dev->device_tx_status = ldma_tx_status;
1683  	dma_dev->device_resume = ldma_resume_chan;
1684  	dma_dev->device_pause = ldma_pause_chan;
1685  	dma_dev->device_prep_slave_sg = ldma_prep_slave_sg;
1686  
1687  	if (d->ver == DMA_VER22) {
1688  		dma_dev->device_config = ldma_slave_config;
1689  		dma_dev->device_synchronize = ldma_synchronize;
1690  		dma_dev->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
1691  		dma_dev->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
1692  		dma_dev->directions = BIT(DMA_MEM_TO_DEV) |
1693  				      BIT(DMA_DEV_TO_MEM);
1694  		dma_dev->residue_granularity =
1695  					DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
1696  	}
1697  
1698  	platform_set_drvdata(pdev, d);
1699  
1700  	ldma_dev_init(d);
1701  
1702  	ret = dma_async_device_register(dma_dev);
1703  	if (ret) {
1704  		dev_err(dev, "Failed to register slave DMA engine device\n");
1705  		return ret;
1706  	}
1707  
1708  	ret = of_dma_controller_register(pdev->dev.of_node, ldma_xlate, d);
1709  	if (ret) {
1710  		dev_err(dev, "Failed to register of DMA controller\n");
1711  		dma_async_device_unregister(dma_dev);
1712  		return ret;
1713  	}
1714  
1715  	dev_info(dev, "Init done - rev: %x, ports: %d channels: %d\n", d->ver,
1716  		 d->port_nrs, d->chan_nrs);
1717  
1718  	return 0;
1719  }
1720  
1721  static struct platform_driver intel_ldma_driver = {
1722  	.probe = intel_ldma_probe,
1723  	.driver = {
1724  		.name = DRIVER_NAME,
1725  		.of_match_table = intel_ldma_match,
1726  	},
1727  };
1728  
1729  /*
1730   * Perform this driver as device_initcall to make sure initialization happens
1731   * before its DMA clients of some are platform specific and also to provide
1732   * registered DMA channels and DMA capabilities to clients before their
1733   * initialization.
1734   */
1735  builtin_platform_driver(intel_ldma_driver);
1736