xref: /openbmc/linux/drivers/dma/dw-edma/dw-edma-v0-core.c (revision 816ffd28002651a469e86d1118a225862e392ecd)
1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates.
4   * Synopsys DesignWare eDMA v0 core
5   *
6   * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
7   */
8  
9  #include <linux/bitfield.h>
10  #include <linux/irqreturn.h>
11  #include <linux/io-64-nonatomic-lo-hi.h>
12  
13  #include "dw-edma-core.h"
14  #include "dw-edma-v0-core.h"
15  #include "dw-edma-v0-regs.h"
16  #include "dw-edma-v0-debugfs.h"
17  
18  enum dw_edma_control {
19  	DW_EDMA_V0_CB					= BIT(0),
20  	DW_EDMA_V0_TCB					= BIT(1),
21  	DW_EDMA_V0_LLP					= BIT(2),
22  	DW_EDMA_V0_LIE					= BIT(3),
23  	DW_EDMA_V0_RIE					= BIT(4),
24  	DW_EDMA_V0_CCS					= BIT(8),
25  	DW_EDMA_V0_LLE					= BIT(9),
26  };
27  
__dw_regs(struct dw_edma * dw)28  static inline struct dw_edma_v0_regs __iomem *__dw_regs(struct dw_edma *dw)
29  {
30  	return dw->chip->reg_base;
31  }
32  
33  #define SET_32(dw, name, value)				\
34  	writel(value, &(__dw_regs(dw)->name))
35  
36  #define GET_32(dw, name)				\
37  	readl(&(__dw_regs(dw)->name))
38  
39  #define SET_RW_32(dw, dir, name, value)			\
40  	do {						\
41  		if ((dir) == EDMA_DIR_WRITE)		\
42  			SET_32(dw, wr_##name, value);	\
43  		else					\
44  			SET_32(dw, rd_##name, value);	\
45  	} while (0)
46  
47  #define GET_RW_32(dw, dir, name)			\
48  	((dir) == EDMA_DIR_WRITE			\
49  	  ? GET_32(dw, wr_##name)			\
50  	  : GET_32(dw, rd_##name))
51  
52  #define SET_BOTH_32(dw, name, value)			\
53  	do {						\
54  		SET_32(dw, wr_##name, value);		\
55  		SET_32(dw, rd_##name, value);		\
56  	} while (0)
57  
58  #define SET_64(dw, name, value)				\
59  	writeq(value, &(__dw_regs(dw)->name))
60  
61  #define GET_64(dw, name)				\
62  	readq(&(__dw_regs(dw)->name))
63  
64  #define SET_RW_64(dw, dir, name, value)			\
65  	do {						\
66  		if ((dir) == EDMA_DIR_WRITE)		\
67  			SET_64(dw, wr_##name, value);	\
68  		else					\
69  			SET_64(dw, rd_##name, value);	\
70  	} while (0)
71  
72  #define GET_RW_64(dw, dir, name)			\
73  	((dir) == EDMA_DIR_WRITE			\
74  	  ? GET_64(dw, wr_##name)			\
75  	  : GET_64(dw, rd_##name))
76  
77  #define SET_BOTH_64(dw, name, value)			\
78  	do {						\
79  		SET_64(dw, wr_##name, value);		\
80  		SET_64(dw, rd_##name, value);		\
81  	} while (0)
82  
83  #define SET_COMPAT(dw, name, value)			\
84  	writel(value, &(__dw_regs(dw)->type.unroll.name))
85  
86  #define SET_RW_COMPAT(dw, dir, name, value)		\
87  	do {						\
88  		if ((dir) == EDMA_DIR_WRITE)		\
89  			SET_COMPAT(dw, wr_##name, value); \
90  		else					\
91  			SET_COMPAT(dw, rd_##name, value); \
92  	} while (0)
93  
94  static inline struct dw_edma_v0_ch_regs __iomem *
__dw_ch_regs(struct dw_edma * dw,enum dw_edma_dir dir,u16 ch)95  __dw_ch_regs(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch)
96  {
97  	if (dw->chip->mf == EDMA_MF_EDMA_LEGACY)
98  		return &(__dw_regs(dw)->type.legacy.ch);
99  
100  	if (dir == EDMA_DIR_WRITE)
101  		return &__dw_regs(dw)->type.unroll.ch[ch].wr;
102  
103  	return &__dw_regs(dw)->type.unroll.ch[ch].rd;
104  }
105  
writel_ch(struct dw_edma * dw,enum dw_edma_dir dir,u16 ch,u32 value,void __iomem * addr)106  static inline void writel_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
107  			     u32 value, void __iomem *addr)
108  {
109  	if (dw->chip->mf == EDMA_MF_EDMA_LEGACY) {
110  		u32 viewport_sel;
111  		unsigned long flags;
112  
113  		raw_spin_lock_irqsave(&dw->lock, flags);
114  
115  		viewport_sel = FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch);
116  		if (dir == EDMA_DIR_READ)
117  			viewport_sel |= BIT(31);
118  
119  		writel(viewport_sel,
120  		       &(__dw_regs(dw)->type.legacy.viewport_sel));
121  		writel(value, addr);
122  
123  		raw_spin_unlock_irqrestore(&dw->lock, flags);
124  	} else {
125  		writel(value, addr);
126  	}
127  }
128  
readl_ch(struct dw_edma * dw,enum dw_edma_dir dir,u16 ch,const void __iomem * addr)129  static inline u32 readl_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
130  			   const void __iomem *addr)
131  {
132  	u32 value;
133  
134  	if (dw->chip->mf == EDMA_MF_EDMA_LEGACY) {
135  		u32 viewport_sel;
136  		unsigned long flags;
137  
138  		raw_spin_lock_irqsave(&dw->lock, flags);
139  
140  		viewport_sel = FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch);
141  		if (dir == EDMA_DIR_READ)
142  			viewport_sel |= BIT(31);
143  
144  		writel(viewport_sel,
145  		       &(__dw_regs(dw)->type.legacy.viewport_sel));
146  		value = readl(addr);
147  
148  		raw_spin_unlock_irqrestore(&dw->lock, flags);
149  	} else {
150  		value = readl(addr);
151  	}
152  
153  	return value;
154  }
155  
156  #define SET_CH_32(dw, dir, ch, name, value) \
157  	writel_ch(dw, dir, ch, value, &(__dw_ch_regs(dw, dir, ch)->name))
158  
159  #define GET_CH_32(dw, dir, ch, name) \
160  	readl_ch(dw, dir, ch, &(__dw_ch_regs(dw, dir, ch)->name))
161  
162  /* eDMA management callbacks */
dw_edma_v0_core_off(struct dw_edma * dw)163  static void dw_edma_v0_core_off(struct dw_edma *dw)
164  {
165  	SET_BOTH_32(dw, int_mask,
166  		    EDMA_V0_DONE_INT_MASK | EDMA_V0_ABORT_INT_MASK);
167  	SET_BOTH_32(dw, int_clear,
168  		    EDMA_V0_DONE_INT_MASK | EDMA_V0_ABORT_INT_MASK);
169  	SET_BOTH_32(dw, engine_en, 0);
170  }
171  
dw_edma_v0_core_ch_count(struct dw_edma * dw,enum dw_edma_dir dir)172  static u16 dw_edma_v0_core_ch_count(struct dw_edma *dw, enum dw_edma_dir dir)
173  {
174  	u32 num_ch;
175  
176  	if (dir == EDMA_DIR_WRITE)
177  		num_ch = FIELD_GET(EDMA_V0_WRITE_CH_COUNT_MASK,
178  				   GET_32(dw, ctrl));
179  	else
180  		num_ch = FIELD_GET(EDMA_V0_READ_CH_COUNT_MASK,
181  				   GET_32(dw, ctrl));
182  
183  	if (num_ch > EDMA_V0_MAX_NR_CH)
184  		num_ch = EDMA_V0_MAX_NR_CH;
185  
186  	return (u16)num_ch;
187  }
188  
dw_edma_v0_core_ch_status(struct dw_edma_chan * chan)189  static enum dma_status dw_edma_v0_core_ch_status(struct dw_edma_chan *chan)
190  {
191  	struct dw_edma *dw = chan->dw;
192  	u32 tmp;
193  
194  	tmp = FIELD_GET(EDMA_V0_CH_STATUS_MASK,
195  			GET_CH_32(dw, chan->dir, chan->id, ch_control1));
196  
197  	if (tmp == 1)
198  		return DMA_IN_PROGRESS;
199  	else if (tmp == 3)
200  		return DMA_COMPLETE;
201  	else
202  		return DMA_ERROR;
203  }
204  
dw_edma_v0_core_clear_done_int(struct dw_edma_chan * chan)205  static void dw_edma_v0_core_clear_done_int(struct dw_edma_chan *chan)
206  {
207  	struct dw_edma *dw = chan->dw;
208  
209  	SET_RW_32(dw, chan->dir, int_clear,
210  		  FIELD_PREP(EDMA_V0_DONE_INT_MASK, BIT(chan->id)));
211  }
212  
dw_edma_v0_core_clear_abort_int(struct dw_edma_chan * chan)213  static void dw_edma_v0_core_clear_abort_int(struct dw_edma_chan *chan)
214  {
215  	struct dw_edma *dw = chan->dw;
216  
217  	SET_RW_32(dw, chan->dir, int_clear,
218  		  FIELD_PREP(EDMA_V0_ABORT_INT_MASK, BIT(chan->id)));
219  }
220  
dw_edma_v0_core_status_done_int(struct dw_edma * dw,enum dw_edma_dir dir)221  static u32 dw_edma_v0_core_status_done_int(struct dw_edma *dw, enum dw_edma_dir dir)
222  {
223  	return FIELD_GET(EDMA_V0_DONE_INT_MASK,
224  			 GET_RW_32(dw, dir, int_status));
225  }
226  
dw_edma_v0_core_status_abort_int(struct dw_edma * dw,enum dw_edma_dir dir)227  static u32 dw_edma_v0_core_status_abort_int(struct dw_edma *dw, enum dw_edma_dir dir)
228  {
229  	return FIELD_GET(EDMA_V0_ABORT_INT_MASK,
230  			 GET_RW_32(dw, dir, int_status));
231  }
232  
233  static irqreturn_t
dw_edma_v0_core_handle_int(struct dw_edma_irq * dw_irq,enum dw_edma_dir dir,dw_edma_handler_t done,dw_edma_handler_t abort)234  dw_edma_v0_core_handle_int(struct dw_edma_irq *dw_irq, enum dw_edma_dir dir,
235  			   dw_edma_handler_t done, dw_edma_handler_t abort)
236  {
237  	struct dw_edma *dw = dw_irq->dw;
238  	unsigned long total, pos, val;
239  	irqreturn_t ret = IRQ_NONE;
240  	struct dw_edma_chan *chan;
241  	unsigned long off;
242  	u32 mask;
243  
244  	if (dir == EDMA_DIR_WRITE) {
245  		total = dw->wr_ch_cnt;
246  		off = 0;
247  		mask = dw_irq->wr_mask;
248  	} else {
249  		total = dw->rd_ch_cnt;
250  		off = dw->wr_ch_cnt;
251  		mask = dw_irq->rd_mask;
252  	}
253  
254  	val = dw_edma_v0_core_status_done_int(dw, dir);
255  	val &= mask;
256  	for_each_set_bit(pos, &val, total) {
257  		chan = &dw->chan[pos + off];
258  
259  		dw_edma_v0_core_clear_done_int(chan);
260  		done(chan);
261  
262  		ret = IRQ_HANDLED;
263  	}
264  
265  	val = dw_edma_v0_core_status_abort_int(dw, dir);
266  	val &= mask;
267  	for_each_set_bit(pos, &val, total) {
268  		chan = &dw->chan[pos + off];
269  
270  		dw_edma_v0_core_clear_abort_int(chan);
271  		abort(chan);
272  
273  		ret = IRQ_HANDLED;
274  	}
275  
276  	return ret;
277  }
278  
dw_edma_v0_write_ll_data(struct dw_edma_chunk * chunk,int i,u32 control,u32 size,u64 sar,u64 dar)279  static void dw_edma_v0_write_ll_data(struct dw_edma_chunk *chunk, int i,
280  				     u32 control, u32 size, u64 sar, u64 dar)
281  {
282  	ptrdiff_t ofs = i * sizeof(struct dw_edma_v0_lli);
283  
284  	if (chunk->chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL) {
285  		struct dw_edma_v0_lli *lli = chunk->ll_region.vaddr.mem + ofs;
286  
287  		lli->control = control;
288  		lli->transfer_size = size;
289  		lli->sar.reg = sar;
290  		lli->dar.reg = dar;
291  	} else {
292  		struct dw_edma_v0_lli __iomem *lli = chunk->ll_region.vaddr.io + ofs;
293  
294  		writel(control, &lli->control);
295  		writel(size, &lli->transfer_size);
296  		writeq(sar, &lli->sar.reg);
297  		writeq(dar, &lli->dar.reg);
298  	}
299  }
300  
dw_edma_v0_write_ll_link(struct dw_edma_chunk * chunk,int i,u32 control,u64 pointer)301  static void dw_edma_v0_write_ll_link(struct dw_edma_chunk *chunk,
302  				     int i, u32 control, u64 pointer)
303  {
304  	ptrdiff_t ofs = i * sizeof(struct dw_edma_v0_lli);
305  
306  	if (chunk->chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL) {
307  		struct dw_edma_v0_llp *llp = chunk->ll_region.vaddr.mem + ofs;
308  
309  		llp->control = control;
310  		llp->llp.reg = pointer;
311  	} else {
312  		struct dw_edma_v0_llp __iomem *llp = chunk->ll_region.vaddr.io + ofs;
313  
314  		writel(control, &llp->control);
315  		writeq(pointer, &llp->llp.reg);
316  	}
317  }
318  
dw_edma_v0_core_write_chunk(struct dw_edma_chunk * chunk)319  static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
320  {
321  	struct dw_edma_burst *child;
322  	struct dw_edma_chan *chan = chunk->chan;
323  	u32 control = 0, i = 0;
324  	int j;
325  
326  	if (chunk->cb)
327  		control = DW_EDMA_V0_CB;
328  
329  	j = chunk->bursts_alloc;
330  	list_for_each_entry(child, &chunk->burst->list, list) {
331  		j--;
332  		if (!j) {
333  			control |= DW_EDMA_V0_LIE;
334  			if (!(chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL))
335  				control |= DW_EDMA_V0_RIE;
336  		}
337  
338  		dw_edma_v0_write_ll_data(chunk, i++, control, child->sz,
339  					 child->sar, child->dar);
340  	}
341  
342  	control = DW_EDMA_V0_LLP | DW_EDMA_V0_TCB;
343  	if (!chunk->cb)
344  		control |= DW_EDMA_V0_CB;
345  
346  	dw_edma_v0_write_ll_link(chunk, i, control, chunk->ll_region.paddr);
347  }
348  
dw_edma_v0_sync_ll_data(struct dw_edma_chunk * chunk)349  static void dw_edma_v0_sync_ll_data(struct dw_edma_chunk *chunk)
350  {
351  	/*
352  	 * In case of remote eDMA engine setup, the DW PCIe RP/EP internal
353  	 * configuration registers and application memory are normally accessed
354  	 * over different buses. Ensure LL-data reaches the memory before the
355  	 * doorbell register is toggled by issuing the dummy-read from the remote
356  	 * LL memory in a hope that the MRd TLP will return only after the
357  	 * last MWr TLP is completed
358  	 */
359  	if (!(chunk->chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL))
360  		readl(chunk->ll_region.vaddr.io);
361  }
362  
dw_edma_v0_core_start(struct dw_edma_chunk * chunk,bool first)363  static void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
364  {
365  	struct dw_edma_chan *chan = chunk->chan;
366  	struct dw_edma *dw = chan->dw;
367  	u32 tmp;
368  
369  	dw_edma_v0_core_write_chunk(chunk);
370  
371  	if (first) {
372  		/* Enable engine */
373  		SET_RW_32(dw, chan->dir, engine_en, BIT(0));
374  		if (dw->chip->mf == EDMA_MF_HDMA_COMPAT) {
375  			switch (chan->id) {
376  			case 0:
377  				SET_RW_COMPAT(dw, chan->dir, ch0_pwr_en,
378  					      BIT(0));
379  				break;
380  			case 1:
381  				SET_RW_COMPAT(dw, chan->dir, ch1_pwr_en,
382  					      BIT(0));
383  				break;
384  			case 2:
385  				SET_RW_COMPAT(dw, chan->dir, ch2_pwr_en,
386  					      BIT(0));
387  				break;
388  			case 3:
389  				SET_RW_COMPAT(dw, chan->dir, ch3_pwr_en,
390  					      BIT(0));
391  				break;
392  			case 4:
393  				SET_RW_COMPAT(dw, chan->dir, ch4_pwr_en,
394  					      BIT(0));
395  				break;
396  			case 5:
397  				SET_RW_COMPAT(dw, chan->dir, ch5_pwr_en,
398  					      BIT(0));
399  				break;
400  			case 6:
401  				SET_RW_COMPAT(dw, chan->dir, ch6_pwr_en,
402  					      BIT(0));
403  				break;
404  			case 7:
405  				SET_RW_COMPAT(dw, chan->dir, ch7_pwr_en,
406  					      BIT(0));
407  				break;
408  			}
409  		}
410  		/* Interrupt unmask - done, abort */
411  		tmp = GET_RW_32(dw, chan->dir, int_mask);
412  		tmp &= ~FIELD_PREP(EDMA_V0_DONE_INT_MASK, BIT(chan->id));
413  		tmp &= ~FIELD_PREP(EDMA_V0_ABORT_INT_MASK, BIT(chan->id));
414  		SET_RW_32(dw, chan->dir, int_mask, tmp);
415  		/* Linked list error */
416  		tmp = GET_RW_32(dw, chan->dir, linked_list_err_en);
417  		tmp |= FIELD_PREP(EDMA_V0_LINKED_LIST_ERR_MASK, BIT(chan->id));
418  		SET_RW_32(dw, chan->dir, linked_list_err_en, tmp);
419  		/* Channel control */
420  		SET_CH_32(dw, chan->dir, chan->id, ch_control1,
421  			  (DW_EDMA_V0_CCS | DW_EDMA_V0_LLE));
422  		/* Linked list */
423  		/* llp is not aligned on 64bit -> keep 32bit accesses */
424  		SET_CH_32(dw, chan->dir, chan->id, llp.lsb,
425  			  lower_32_bits(chunk->ll_region.paddr));
426  		SET_CH_32(dw, chan->dir, chan->id, llp.msb,
427  			  upper_32_bits(chunk->ll_region.paddr));
428  	}
429  
430  	dw_edma_v0_sync_ll_data(chunk);
431  
432  	/* Doorbell */
433  	SET_RW_32(dw, chan->dir, doorbell,
434  		  FIELD_PREP(EDMA_V0_DOORBELL_CH_MASK, chan->id));
435  }
436  
dw_edma_v0_core_ch_config(struct dw_edma_chan * chan)437  static void dw_edma_v0_core_ch_config(struct dw_edma_chan *chan)
438  {
439  	struct dw_edma *dw = chan->dw;
440  	u32 tmp = 0;
441  
442  	/* MSI done addr - low, high */
443  	SET_RW_32(dw, chan->dir, done_imwr.lsb, chan->msi.address_lo);
444  	SET_RW_32(dw, chan->dir, done_imwr.msb, chan->msi.address_hi);
445  	/* MSI abort addr - low, high */
446  	SET_RW_32(dw, chan->dir, abort_imwr.lsb, chan->msi.address_lo);
447  	SET_RW_32(dw, chan->dir, abort_imwr.msb, chan->msi.address_hi);
448  	/* MSI data - low, high */
449  	switch (chan->id) {
450  	case 0:
451  	case 1:
452  		tmp = GET_RW_32(dw, chan->dir, ch01_imwr_data);
453  		break;
454  
455  	case 2:
456  	case 3:
457  		tmp = GET_RW_32(dw, chan->dir, ch23_imwr_data);
458  		break;
459  
460  	case 4:
461  	case 5:
462  		tmp = GET_RW_32(dw, chan->dir, ch45_imwr_data);
463  		break;
464  
465  	case 6:
466  	case 7:
467  		tmp = GET_RW_32(dw, chan->dir, ch67_imwr_data);
468  		break;
469  	}
470  
471  	if (chan->id & BIT(0)) {
472  		/* Channel odd {1, 3, 5, 7} */
473  		tmp &= EDMA_V0_CH_EVEN_MSI_DATA_MASK;
474  		tmp |= FIELD_PREP(EDMA_V0_CH_ODD_MSI_DATA_MASK,
475  				  chan->msi.data);
476  	} else {
477  		/* Channel even {0, 2, 4, 6} */
478  		tmp &= EDMA_V0_CH_ODD_MSI_DATA_MASK;
479  		tmp |= FIELD_PREP(EDMA_V0_CH_EVEN_MSI_DATA_MASK,
480  				  chan->msi.data);
481  	}
482  
483  	switch (chan->id) {
484  	case 0:
485  	case 1:
486  		SET_RW_32(dw, chan->dir, ch01_imwr_data, tmp);
487  		break;
488  
489  	case 2:
490  	case 3:
491  		SET_RW_32(dw, chan->dir, ch23_imwr_data, tmp);
492  		break;
493  
494  	case 4:
495  	case 5:
496  		SET_RW_32(dw, chan->dir, ch45_imwr_data, tmp);
497  		break;
498  
499  	case 6:
500  	case 7:
501  		SET_RW_32(dw, chan->dir, ch67_imwr_data, tmp);
502  		break;
503  	}
504  }
505  
506  /* eDMA debugfs callbacks */
dw_edma_v0_core_debugfs_on(struct dw_edma * dw)507  static void dw_edma_v0_core_debugfs_on(struct dw_edma *dw)
508  {
509  	dw_edma_v0_debugfs_on(dw);
510  }
511  
512  static const struct dw_edma_core_ops dw_edma_v0_core = {
513  	.off = dw_edma_v0_core_off,
514  	.ch_count = dw_edma_v0_core_ch_count,
515  	.ch_status = dw_edma_v0_core_ch_status,
516  	.handle_int = dw_edma_v0_core_handle_int,
517  	.start = dw_edma_v0_core_start,
518  	.ch_config = dw_edma_v0_core_ch_config,
519  	.debugfs_on = dw_edma_v0_core_debugfs_on,
520  };
521  
dw_edma_v0_core_register(struct dw_edma * dw)522  void dw_edma_v0_core_register(struct dw_edma *dw)
523  {
524  	dw->core = &dw_edma_v0_core;
525  }
526