1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /*
3  * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
4  * stmmac XGMAC support.
5  */
6 
7 #include <linux/iopoll.h>
8 #include "stmmac.h"
9 #include "dwxgmac2.h"
10 
11 static int dwxgmac2_dma_reset(void __iomem *ioaddr)
12 {
13 	u32 value = readl(ioaddr + XGMAC_DMA_MODE);
14 
15 	/* DMA SW reset */
16 	writel(value | XGMAC_SWR, ioaddr + XGMAC_DMA_MODE);
17 
18 	return readl_poll_timeout(ioaddr + XGMAC_DMA_MODE, value,
19 				  !(value & XGMAC_SWR), 0, 100000);
20 }
21 
22 static void dwxgmac2_dma_init(void __iomem *ioaddr,
23 			      struct stmmac_dma_cfg *dma_cfg, int atds)
24 {
25 	u32 value = readl(ioaddr + XGMAC_DMA_SYSBUS_MODE);
26 
27 	if (dma_cfg->aal)
28 		value |= XGMAC_AAL;
29 
30 	if (dma_cfg->eame)
31 		value |= XGMAC_EAME;
32 
33 	writel(value, ioaddr + XGMAC_DMA_SYSBUS_MODE);
34 }
35 
36 static void dwxgmac2_dma_init_chan(struct stmmac_priv *priv,
37 				   void __iomem *ioaddr,
38 				   struct stmmac_dma_cfg *dma_cfg, u32 chan)
39 {
40 	u32 value = readl(ioaddr + XGMAC_DMA_CH_CONTROL(chan));
41 
42 	if (dma_cfg->pblx8)
43 		value |= XGMAC_PBLx8;
44 
45 	writel(value, ioaddr + XGMAC_DMA_CH_CONTROL(chan));
46 	writel(XGMAC_DMA_INT_DEFAULT_EN, ioaddr + XGMAC_DMA_CH_INT_EN(chan));
47 }
48 
49 static void dwxgmac2_dma_init_rx_chan(struct stmmac_priv *priv,
50 				      void __iomem *ioaddr,
51 				      struct stmmac_dma_cfg *dma_cfg,
52 				      dma_addr_t phy, u32 chan)
53 {
54 	u32 rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
55 	u32 value;
56 
57 	value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
58 	value &= ~XGMAC_RxPBL;
59 	value |= (rxpbl << XGMAC_RxPBL_SHIFT) & XGMAC_RxPBL;
60 	writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
61 
62 	writel(upper_32_bits(phy), ioaddr + XGMAC_DMA_CH_RxDESC_HADDR(chan));
63 	writel(lower_32_bits(phy), ioaddr + XGMAC_DMA_CH_RxDESC_LADDR(chan));
64 }
65 
66 static void dwxgmac2_dma_init_tx_chan(struct stmmac_priv *priv,
67 				      void __iomem *ioaddr,
68 				      struct stmmac_dma_cfg *dma_cfg,
69 				      dma_addr_t phy, u32 chan)
70 {
71 	u32 txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
72 	u32 value;
73 
74 	value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
75 	value &= ~XGMAC_TxPBL;
76 	value |= (txpbl << XGMAC_TxPBL_SHIFT) & XGMAC_TxPBL;
77 	value |= XGMAC_OSP;
78 	writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
79 
80 	writel(upper_32_bits(phy), ioaddr + XGMAC_DMA_CH_TxDESC_HADDR(chan));
81 	writel(lower_32_bits(phy), ioaddr + XGMAC_DMA_CH_TxDESC_LADDR(chan));
82 }
83 
84 static void dwxgmac2_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
85 {
86 	u32 value = readl(ioaddr + XGMAC_DMA_SYSBUS_MODE);
87 	int i;
88 
89 	if (axi->axi_lpi_en)
90 		value |= XGMAC_EN_LPI;
91 	if (axi->axi_xit_frm)
92 		value |= XGMAC_LPI_XIT_PKT;
93 
94 	value &= ~XGMAC_WR_OSR_LMT;
95 	value |= (axi->axi_wr_osr_lmt << XGMAC_WR_OSR_LMT_SHIFT) &
96 		XGMAC_WR_OSR_LMT;
97 
98 	value &= ~XGMAC_RD_OSR_LMT;
99 	value |= (axi->axi_rd_osr_lmt << XGMAC_RD_OSR_LMT_SHIFT) &
100 		XGMAC_RD_OSR_LMT;
101 
102 	if (!axi->axi_fb)
103 		value |= XGMAC_UNDEF;
104 
105 	value &= ~XGMAC_BLEN;
106 	for (i = 0; i < AXI_BLEN; i++) {
107 		switch (axi->axi_blen[i]) {
108 		case 256:
109 			value |= XGMAC_BLEN256;
110 			break;
111 		case 128:
112 			value |= XGMAC_BLEN128;
113 			break;
114 		case 64:
115 			value |= XGMAC_BLEN64;
116 			break;
117 		case 32:
118 			value |= XGMAC_BLEN32;
119 			break;
120 		case 16:
121 			value |= XGMAC_BLEN16;
122 			break;
123 		case 8:
124 			value |= XGMAC_BLEN8;
125 			break;
126 		case 4:
127 			value |= XGMAC_BLEN4;
128 			break;
129 		}
130 	}
131 
132 	writel(value, ioaddr + XGMAC_DMA_SYSBUS_MODE);
133 	writel(XGMAC_TDPS, ioaddr + XGMAC_TX_EDMA_CTRL);
134 	writel(XGMAC_RDPS, ioaddr + XGMAC_RX_EDMA_CTRL);
135 }
136 
137 static void dwxgmac2_dma_dump_regs(struct stmmac_priv *priv,
138 				   void __iomem *ioaddr, u32 *reg_space)
139 {
140 	int i;
141 
142 	for (i = (XGMAC_DMA_MODE / 4); i < XGMAC_REGSIZE; i++)
143 		reg_space[i] = readl(ioaddr + i * 4);
144 }
145 
146 static void dwxgmac2_dma_rx_mode(struct stmmac_priv *priv, void __iomem *ioaddr,
147 				 int mode, u32 channel, int fifosz, u8 qmode)
148 {
149 	u32 value = readl(ioaddr + XGMAC_MTL_RXQ_OPMODE(channel));
150 	unsigned int rqs = fifosz / 256 - 1;
151 
152 	if (mode == SF_DMA_MODE) {
153 		value |= XGMAC_RSF;
154 	} else {
155 		value &= ~XGMAC_RSF;
156 		value &= ~XGMAC_RTC;
157 
158 		if (mode <= 64)
159 			value |= 0x0 << XGMAC_RTC_SHIFT;
160 		else if (mode <= 96)
161 			value |= 0x2 << XGMAC_RTC_SHIFT;
162 		else
163 			value |= 0x3 << XGMAC_RTC_SHIFT;
164 	}
165 
166 	value &= ~XGMAC_RQS;
167 	value |= (rqs << XGMAC_RQS_SHIFT) & XGMAC_RQS;
168 
169 	if ((fifosz >= 4096) && (qmode != MTL_QUEUE_AVB)) {
170 		u32 flow = readl(ioaddr + XGMAC_MTL_RXQ_FLOW_CONTROL(channel));
171 		unsigned int rfd, rfa;
172 
173 		value |= XGMAC_EHFC;
174 
175 		/* Set Threshold for Activating Flow Control to min 2 frames,
176 		 * i.e. 1500 * 2 = 3000 bytes.
177 		 *
178 		 * Set Threshold for Deactivating Flow Control to min 1 frame,
179 		 * i.e. 1500 bytes.
180 		 */
181 		switch (fifosz) {
182 		case 4096:
183 			/* This violates the above formula because of FIFO size
184 			 * limit therefore overflow may occur in spite of this.
185 			 */
186 			rfd = 0x03; /* Full-2.5K */
187 			rfa = 0x01; /* Full-1.5K */
188 			break;
189 
190 		default:
191 			rfd = 0x07; /* Full-4.5K */
192 			rfa = 0x04; /* Full-3K */
193 			break;
194 		}
195 
196 		flow &= ~XGMAC_RFD;
197 		flow |= rfd << XGMAC_RFD_SHIFT;
198 
199 		flow &= ~XGMAC_RFA;
200 		flow |= rfa << XGMAC_RFA_SHIFT;
201 
202 		writel(flow, ioaddr + XGMAC_MTL_RXQ_FLOW_CONTROL(channel));
203 	}
204 
205 	writel(value, ioaddr + XGMAC_MTL_RXQ_OPMODE(channel));
206 
207 	/* Enable MTL RX overflow */
208 	value = readl(ioaddr + XGMAC_MTL_QINTEN(channel));
209 	writel(value | XGMAC_RXOIE, ioaddr + XGMAC_MTL_QINTEN(channel));
210 }
211 
212 static void dwxgmac2_dma_tx_mode(struct stmmac_priv *priv, void __iomem *ioaddr,
213 				 int mode, u32 channel, int fifosz, u8 qmode)
214 {
215 	u32 value = readl(ioaddr + XGMAC_MTL_TXQ_OPMODE(channel));
216 	unsigned int tqs = fifosz / 256 - 1;
217 
218 	if (mode == SF_DMA_MODE) {
219 		value |= XGMAC_TSF;
220 	} else {
221 		value &= ~XGMAC_TSF;
222 		value &= ~XGMAC_TTC;
223 
224 		if (mode <= 64)
225 			value |= 0x0 << XGMAC_TTC_SHIFT;
226 		else if (mode <= 96)
227 			value |= 0x2 << XGMAC_TTC_SHIFT;
228 		else if (mode <= 128)
229 			value |= 0x3 << XGMAC_TTC_SHIFT;
230 		else if (mode <= 192)
231 			value |= 0x4 << XGMAC_TTC_SHIFT;
232 		else if (mode <= 256)
233 			value |= 0x5 << XGMAC_TTC_SHIFT;
234 		else if (mode <= 384)
235 			value |= 0x6 << XGMAC_TTC_SHIFT;
236 		else
237 			value |= 0x7 << XGMAC_TTC_SHIFT;
238 	}
239 
240 	/* Use static TC to Queue mapping */
241 	value |= (channel << XGMAC_Q2TCMAP_SHIFT) & XGMAC_Q2TCMAP;
242 
243 	value &= ~XGMAC_TXQEN;
244 	if (qmode != MTL_QUEUE_AVB)
245 		value |= 0x2 << XGMAC_TXQEN_SHIFT;
246 	else
247 		value |= 0x1 << XGMAC_TXQEN_SHIFT;
248 
249 	value &= ~XGMAC_TQS;
250 	value |= (tqs << XGMAC_TQS_SHIFT) & XGMAC_TQS;
251 
252 	writel(value, ioaddr +  XGMAC_MTL_TXQ_OPMODE(channel));
253 }
254 
255 static void dwxgmac2_enable_dma_irq(struct stmmac_priv *priv,
256 				    void __iomem *ioaddr, u32 chan,
257 				    bool rx, bool tx)
258 {
259 	u32 value = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
260 
261 	if (rx)
262 		value |= XGMAC_DMA_INT_DEFAULT_RX;
263 	if (tx)
264 		value |= XGMAC_DMA_INT_DEFAULT_TX;
265 
266 	writel(value, ioaddr + XGMAC_DMA_CH_INT_EN(chan));
267 }
268 
269 static void dwxgmac2_disable_dma_irq(struct stmmac_priv *priv,
270 				     void __iomem *ioaddr, u32 chan,
271 				     bool rx, bool tx)
272 {
273 	u32 value = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
274 
275 	if (rx)
276 		value &= ~XGMAC_DMA_INT_DEFAULT_RX;
277 	if (tx)
278 		value &= ~XGMAC_DMA_INT_DEFAULT_TX;
279 
280 	writel(value, ioaddr + XGMAC_DMA_CH_INT_EN(chan));
281 }
282 
283 static void dwxgmac2_dma_start_tx(struct stmmac_priv *priv,
284 				  void __iomem *ioaddr, u32 chan)
285 {
286 	u32 value;
287 
288 	value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
289 	value |= XGMAC_TXST;
290 	writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
291 
292 	value = readl(ioaddr + XGMAC_TX_CONFIG);
293 	value |= XGMAC_CONFIG_TE;
294 	writel(value, ioaddr + XGMAC_TX_CONFIG);
295 }
296 
297 static void dwxgmac2_dma_stop_tx(struct stmmac_priv *priv, void __iomem *ioaddr,
298 				 u32 chan)
299 {
300 	u32 value;
301 
302 	value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
303 	value &= ~XGMAC_TXST;
304 	writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
305 
306 	value = readl(ioaddr + XGMAC_TX_CONFIG);
307 	value &= ~XGMAC_CONFIG_TE;
308 	writel(value, ioaddr + XGMAC_TX_CONFIG);
309 }
310 
311 static void dwxgmac2_dma_start_rx(struct stmmac_priv *priv,
312 				  void __iomem *ioaddr, u32 chan)
313 {
314 	u32 value;
315 
316 	value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
317 	value |= XGMAC_RXST;
318 	writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
319 
320 	value = readl(ioaddr + XGMAC_RX_CONFIG);
321 	value |= XGMAC_CONFIG_RE;
322 	writel(value, ioaddr + XGMAC_RX_CONFIG);
323 }
324 
325 static void dwxgmac2_dma_stop_rx(struct stmmac_priv *priv, void __iomem *ioaddr,
326 				 u32 chan)
327 {
328 	u32 value;
329 
330 	value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
331 	value &= ~XGMAC_RXST;
332 	writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
333 }
334 
335 static int dwxgmac2_dma_interrupt(struct stmmac_priv *priv,
336 				  void __iomem *ioaddr,
337 				  struct stmmac_extra_stats *x, u32 chan,
338 				  u32 dir)
339 {
340 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
341 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
342 	u32 intr_status = readl(ioaddr + XGMAC_DMA_CH_STATUS(chan));
343 	u32 intr_en = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
344 	int ret = 0;
345 
346 	if (dir == DMA_DIR_RX)
347 		intr_status &= XGMAC_DMA_STATUS_MSK_RX;
348 	else if (dir == DMA_DIR_TX)
349 		intr_status &= XGMAC_DMA_STATUS_MSK_TX;
350 
351 	/* ABNORMAL interrupts */
352 	if (unlikely(intr_status & XGMAC_AIS)) {
353 		if (unlikely(intr_status & XGMAC_RBU)) {
354 			x->rx_buf_unav_irq++;
355 			ret |= handle_rx;
356 		}
357 		if (unlikely(intr_status & XGMAC_TPS)) {
358 			x->tx_process_stopped_irq++;
359 			ret |= tx_hard_error;
360 		}
361 		if (unlikely(intr_status & XGMAC_FBE)) {
362 			x->fatal_bus_error_irq++;
363 			ret |= tx_hard_error;
364 		}
365 	}
366 
367 	/* TX/RX NORMAL interrupts */
368 	if (likely(intr_status & XGMAC_NIS)) {
369 		if (likely(intr_status & XGMAC_RI)) {
370 			u64_stats_update_begin(&rx_q->rxq_stats.syncp);
371 			rx_q->rxq_stats.rx_normal_irq_n++;
372 			u64_stats_update_end(&rx_q->rxq_stats.syncp);
373 			ret |= handle_rx;
374 		}
375 		if (likely(intr_status & (XGMAC_TI | XGMAC_TBU))) {
376 			u64_stats_update_begin(&tx_q->txq_stats.syncp);
377 			tx_q->txq_stats.tx_normal_irq_n++;
378 			u64_stats_update_end(&tx_q->txq_stats.syncp);
379 			ret |= handle_tx;
380 		}
381 	}
382 
383 	/* Clear interrupts */
384 	writel(intr_en & intr_status, ioaddr + XGMAC_DMA_CH_STATUS(chan));
385 
386 	return ret;
387 }
388 
389 static int dwxgmac2_get_hw_feature(void __iomem *ioaddr,
390 				   struct dma_features *dma_cap)
391 {
392 	u32 hw_cap;
393 
394 	/*  MAC HW feature 0 */
395 	hw_cap = readl(ioaddr + XGMAC_HW_FEATURE0);
396 	dma_cap->vlins = (hw_cap & XGMAC_HWFEAT_SAVLANINS) >> 27;
397 	dma_cap->rx_coe = (hw_cap & XGMAC_HWFEAT_RXCOESEL) >> 16;
398 	dma_cap->tx_coe = (hw_cap & XGMAC_HWFEAT_TXCOESEL) >> 14;
399 	dma_cap->eee = (hw_cap & XGMAC_HWFEAT_EEESEL) >> 13;
400 	dma_cap->atime_stamp = (hw_cap & XGMAC_HWFEAT_TSSEL) >> 12;
401 	dma_cap->av = (hw_cap & XGMAC_HWFEAT_AVSEL) >> 11;
402 	dma_cap->av &= !((hw_cap & XGMAC_HWFEAT_RAVSEL) >> 10);
403 	dma_cap->arpoffsel = (hw_cap & XGMAC_HWFEAT_ARPOFFSEL) >> 9;
404 	dma_cap->rmon = (hw_cap & XGMAC_HWFEAT_MMCSEL) >> 8;
405 	dma_cap->pmt_magic_frame = (hw_cap & XGMAC_HWFEAT_MGKSEL) >> 7;
406 	dma_cap->pmt_remote_wake_up = (hw_cap & XGMAC_HWFEAT_RWKSEL) >> 6;
407 	dma_cap->vlhash = (hw_cap & XGMAC_HWFEAT_VLHASH) >> 4;
408 	dma_cap->mbps_1000 = (hw_cap & XGMAC_HWFEAT_GMIISEL) >> 1;
409 
410 	/* MAC HW feature 1 */
411 	hw_cap = readl(ioaddr + XGMAC_HW_FEATURE1);
412 	dma_cap->l3l4fnum = (hw_cap & XGMAC_HWFEAT_L3L4FNUM) >> 27;
413 	/* If L3L4FNUM < 8, then the number of L3L4 filters supported by
414 	 * XGMAC is equal to L3L4FNUM. From L3L4FNUM >= 8 the number of
415 	 * L3L4 filters goes on like 8, 16, 32, ... Current maximum of
416 	 * L3L4FNUM = 10.
417 	 */
418 	if (dma_cap->l3l4fnum >= 8 && dma_cap->l3l4fnum <= 10)
419 		dma_cap->l3l4fnum = 8 << (dma_cap->l3l4fnum - 8);
420 	else if (dma_cap->l3l4fnum > 10)
421 		dma_cap->l3l4fnum = 32;
422 
423 	dma_cap->hash_tb_sz = (hw_cap & XGMAC_HWFEAT_HASHTBLSZ) >> 24;
424 	dma_cap->rssen = (hw_cap & XGMAC_HWFEAT_RSSEN) >> 20;
425 	dma_cap->tsoen = (hw_cap & XGMAC_HWFEAT_TSOEN) >> 18;
426 	dma_cap->sphen = (hw_cap & XGMAC_HWFEAT_SPHEN) >> 17;
427 
428 	dma_cap->addr64 = (hw_cap & XGMAC_HWFEAT_ADDR64) >> 14;
429 	switch (dma_cap->addr64) {
430 	case 0:
431 		dma_cap->addr64 = 32;
432 		break;
433 	case 1:
434 		dma_cap->addr64 = 40;
435 		break;
436 	case 2:
437 		dma_cap->addr64 = 48;
438 		break;
439 	default:
440 		dma_cap->addr64 = 32;
441 		break;
442 	}
443 
444 	dma_cap->tx_fifo_size =
445 		128 << ((hw_cap & XGMAC_HWFEAT_TXFIFOSIZE) >> 6);
446 	dma_cap->rx_fifo_size =
447 		128 << ((hw_cap & XGMAC_HWFEAT_RXFIFOSIZE) >> 0);
448 
449 	/* MAC HW feature 2 */
450 	hw_cap = readl(ioaddr + XGMAC_HW_FEATURE2);
451 	dma_cap->pps_out_num = (hw_cap & XGMAC_HWFEAT_PPSOUTNUM) >> 24;
452 	dma_cap->number_tx_channel =
453 		((hw_cap & XGMAC_HWFEAT_TXCHCNT) >> 18) + 1;
454 	dma_cap->number_rx_channel =
455 		((hw_cap & XGMAC_HWFEAT_RXCHCNT) >> 12) + 1;
456 	dma_cap->number_tx_queues =
457 		((hw_cap & XGMAC_HWFEAT_TXQCNT) >> 6) + 1;
458 	dma_cap->number_rx_queues =
459 		((hw_cap & XGMAC_HWFEAT_RXQCNT) >> 0) + 1;
460 
461 	/* MAC HW feature 3 */
462 	hw_cap = readl(ioaddr + XGMAC_HW_FEATURE3);
463 	dma_cap->tbssel = (hw_cap & XGMAC_HWFEAT_TBSSEL) >> 27;
464 	dma_cap->fpesel = (hw_cap & XGMAC_HWFEAT_FPESEL) >> 26;
465 	dma_cap->estwid = (hw_cap & XGMAC_HWFEAT_ESTWID) >> 23;
466 	dma_cap->estdep = (hw_cap & XGMAC_HWFEAT_ESTDEP) >> 20;
467 	dma_cap->estsel = (hw_cap & XGMAC_HWFEAT_ESTSEL) >> 19;
468 	dma_cap->asp = (hw_cap & XGMAC_HWFEAT_ASP) >> 14;
469 	dma_cap->dvlan = (hw_cap & XGMAC_HWFEAT_DVLAN) >> 13;
470 	dma_cap->frpes = (hw_cap & XGMAC_HWFEAT_FRPES) >> 11;
471 	dma_cap->frpbs = (hw_cap & XGMAC_HWFEAT_FRPPB) >> 9;
472 	dma_cap->frpsel = (hw_cap & XGMAC_HWFEAT_FRPSEL) >> 3;
473 
474 	return 0;
475 }
476 
477 static void dwxgmac2_rx_watchdog(struct stmmac_priv *priv, void __iomem *ioaddr,
478 				 u32 riwt, u32 queue)
479 {
480 	writel(riwt & XGMAC_RWT, ioaddr + XGMAC_DMA_CH_Rx_WATCHDOG(queue));
481 }
482 
483 static void dwxgmac2_set_rx_ring_len(struct stmmac_priv *priv,
484 				     void __iomem *ioaddr, u32 len, u32 chan)
485 {
486 	writel(len, ioaddr + XGMAC_DMA_CH_RxDESC_RING_LEN(chan));
487 }
488 
489 static void dwxgmac2_set_tx_ring_len(struct stmmac_priv *priv,
490 				     void __iomem *ioaddr, u32 len, u32 chan)
491 {
492 	writel(len, ioaddr + XGMAC_DMA_CH_TxDESC_RING_LEN(chan));
493 }
494 
495 static void dwxgmac2_set_rx_tail_ptr(struct stmmac_priv *priv,
496 				     void __iomem *ioaddr, u32 ptr, u32 chan)
497 {
498 	writel(ptr, ioaddr + XGMAC_DMA_CH_RxDESC_TAIL_LPTR(chan));
499 }
500 
501 static void dwxgmac2_set_tx_tail_ptr(struct stmmac_priv *priv,
502 				     void __iomem *ioaddr, u32 ptr, u32 chan)
503 {
504 	writel(ptr, ioaddr + XGMAC_DMA_CH_TxDESC_TAIL_LPTR(chan));
505 }
506 
507 static void dwxgmac2_enable_tso(struct stmmac_priv *priv, void __iomem *ioaddr,
508 				bool en, u32 chan)
509 {
510 	u32 value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
511 
512 	if (en)
513 		value |= XGMAC_TSE;
514 	else
515 		value &= ~XGMAC_TSE;
516 
517 	writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
518 }
519 
520 static void dwxgmac2_qmode(struct stmmac_priv *priv, void __iomem *ioaddr,
521 			   u32 channel, u8 qmode)
522 {
523 	u32 value = readl(ioaddr + XGMAC_MTL_TXQ_OPMODE(channel));
524 	u32 flow = readl(ioaddr + XGMAC_RX_FLOW_CTRL);
525 
526 	value &= ~XGMAC_TXQEN;
527 	if (qmode != MTL_QUEUE_AVB) {
528 		value |= 0x2 << XGMAC_TXQEN_SHIFT;
529 		writel(0, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(channel));
530 	} else {
531 		value |= 0x1 << XGMAC_TXQEN_SHIFT;
532 		writel(flow & (~XGMAC_RFE), ioaddr + XGMAC_RX_FLOW_CTRL);
533 	}
534 
535 	writel(value, ioaddr +  XGMAC_MTL_TXQ_OPMODE(channel));
536 }
537 
538 static void dwxgmac2_set_bfsize(struct stmmac_priv *priv, void __iomem *ioaddr,
539 				int bfsize, u32 chan)
540 {
541 	u32 value;
542 
543 	value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
544 	value &= ~XGMAC_RBSZ;
545 	value |= bfsize << XGMAC_RBSZ_SHIFT;
546 	writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
547 }
548 
549 static void dwxgmac2_enable_sph(struct stmmac_priv *priv, void __iomem *ioaddr,
550 				bool en, u32 chan)
551 {
552 	u32 value = readl(ioaddr + XGMAC_RX_CONFIG);
553 
554 	value &= ~XGMAC_CONFIG_HDSMS;
555 	value |= XGMAC_CONFIG_HDSMS_256; /* Segment max 256 bytes */
556 	writel(value, ioaddr + XGMAC_RX_CONFIG);
557 
558 	value = readl(ioaddr + XGMAC_DMA_CH_CONTROL(chan));
559 	if (en)
560 		value |= XGMAC_SPH;
561 	else
562 		value &= ~XGMAC_SPH;
563 	writel(value, ioaddr + XGMAC_DMA_CH_CONTROL(chan));
564 }
565 
566 static int dwxgmac2_enable_tbs(struct stmmac_priv *priv, void __iomem *ioaddr,
567 			       bool en, u32 chan)
568 {
569 	u32 value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
570 
571 	if (en)
572 		value |= XGMAC_EDSE;
573 	else
574 		value &= ~XGMAC_EDSE;
575 
576 	writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
577 
578 	value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan)) & XGMAC_EDSE;
579 	if (en && !value)
580 		return -EIO;
581 
582 	writel(XGMAC_DEF_FTOS, ioaddr + XGMAC_DMA_TBS_CTRL0);
583 	writel(XGMAC_DEF_FTOS, ioaddr + XGMAC_DMA_TBS_CTRL1);
584 	writel(XGMAC_DEF_FTOS, ioaddr + XGMAC_DMA_TBS_CTRL2);
585 	writel(XGMAC_DEF_FTOS, ioaddr + XGMAC_DMA_TBS_CTRL3);
586 	return 0;
587 }
588 
589 const struct stmmac_dma_ops dwxgmac210_dma_ops = {
590 	.reset = dwxgmac2_dma_reset,
591 	.init = dwxgmac2_dma_init,
592 	.init_chan = dwxgmac2_dma_init_chan,
593 	.init_rx_chan = dwxgmac2_dma_init_rx_chan,
594 	.init_tx_chan = dwxgmac2_dma_init_tx_chan,
595 	.axi = dwxgmac2_dma_axi,
596 	.dump_regs = dwxgmac2_dma_dump_regs,
597 	.dma_rx_mode = dwxgmac2_dma_rx_mode,
598 	.dma_tx_mode = dwxgmac2_dma_tx_mode,
599 	.enable_dma_irq = dwxgmac2_enable_dma_irq,
600 	.disable_dma_irq = dwxgmac2_disable_dma_irq,
601 	.start_tx = dwxgmac2_dma_start_tx,
602 	.stop_tx = dwxgmac2_dma_stop_tx,
603 	.start_rx = dwxgmac2_dma_start_rx,
604 	.stop_rx = dwxgmac2_dma_stop_rx,
605 	.dma_interrupt = dwxgmac2_dma_interrupt,
606 	.get_hw_feature = dwxgmac2_get_hw_feature,
607 	.rx_watchdog = dwxgmac2_rx_watchdog,
608 	.set_rx_ring_len = dwxgmac2_set_rx_ring_len,
609 	.set_tx_ring_len = dwxgmac2_set_tx_ring_len,
610 	.set_rx_tail_ptr = dwxgmac2_set_rx_tail_ptr,
611 	.set_tx_tail_ptr = dwxgmac2_set_tx_tail_ptr,
612 	.enable_tso = dwxgmac2_enable_tso,
613 	.qmode = dwxgmac2_qmode,
614 	.set_bfsize = dwxgmac2_set_bfsize,
615 	.enable_sph = dwxgmac2_enable_sph,
616 	.enable_tbs = dwxgmac2_enable_tbs,
617 };
618