1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 // Copyright (c) 2017 Synopsys, Inc. and/or its affiliates.
3 // stmmac Support for 5.xx Ethernet QoS cores
4 
5 #include <linux/bitops.h>
6 #include <linux/iopoll.h>
7 #include "common.h"
8 #include "dwmac4.h"
9 #include "dwmac5.h"
10 #include "stmmac.h"
11 #include "stmmac_ptp.h"
12 
13 struct dwmac5_error_desc {
14 	bool valid;
15 	const char *desc;
16 	const char *detailed_desc;
17 };
18 
19 #define STAT_OFF(field)		offsetof(struct stmmac_safety_stats, field)
20 
21 static void dwmac5_log_error(struct net_device *ndev, u32 value, bool corr,
22 		const char *module_name, const struct dwmac5_error_desc *desc,
23 		unsigned long field_offset, struct stmmac_safety_stats *stats)
24 {
25 	unsigned long loc, mask;
26 	u8 *bptr = (u8 *)stats;
27 	unsigned long *ptr;
28 
29 	ptr = (unsigned long *)(bptr + field_offset);
30 
31 	mask = value;
32 	for_each_set_bit(loc, &mask, 32) {
33 		netdev_err(ndev, "Found %s error in %s: '%s: %s'\n", corr ?
34 				"correctable" : "uncorrectable", module_name,
35 				desc[loc].desc, desc[loc].detailed_desc);
36 
37 		/* Update counters */
38 		ptr[loc]++;
39 	}
40 }
41 
42 static const struct dwmac5_error_desc dwmac5_mac_errors[32]= {
43 	{ true, "ATPES", "Application Transmit Interface Parity Check Error" },
44 	{ true, "TPES", "TSO Data Path Parity Check Error" },
45 	{ true, "RDPES", "Read Descriptor Parity Check Error" },
46 	{ true, "MPES", "MTL Data Path Parity Check Error" },
47 	{ true, "MTSPES", "MTL TX Status Data Path Parity Check Error" },
48 	{ true, "ARPES", "Application Receive Interface Data Path Parity Check Error" },
49 	{ true, "CWPES", "CSR Write Data Path Parity Check Error" },
50 	{ true, "ASRPES", "AXI Slave Read Data Path Parity Check Error" },
51 	{ true, "TTES", "TX FSM Timeout Error" },
52 	{ true, "RTES", "RX FSM Timeout Error" },
53 	{ true, "CTES", "CSR FSM Timeout Error" },
54 	{ true, "ATES", "APP FSM Timeout Error" },
55 	{ true, "PTES", "PTP FSM Timeout Error" },
56 	{ true, "T125ES", "TX125 FSM Timeout Error" },
57 	{ true, "R125ES", "RX125 FSM Timeout Error" },
58 	{ true, "RVCTES", "REV MDC FSM Timeout Error" },
59 	{ true, "MSTTES", "Master Read/Write Timeout Error" },
60 	{ true, "SLVTES", "Slave Read/Write Timeout Error" },
61 	{ true, "ATITES", "Application Timeout on ATI Interface Error" },
62 	{ true, "ARITES", "Application Timeout on ARI Interface Error" },
63 	{ false, "UNKNOWN", "Unknown Error" }, /* 20 */
64 	{ false, "UNKNOWN", "Unknown Error" }, /* 21 */
65 	{ false, "UNKNOWN", "Unknown Error" }, /* 22 */
66 	{ false, "UNKNOWN", "Unknown Error" }, /* 23 */
67 	{ true, "FSMPES", "FSM State Parity Error" },
68 	{ false, "UNKNOWN", "Unknown Error" }, /* 25 */
69 	{ false, "UNKNOWN", "Unknown Error" }, /* 26 */
70 	{ false, "UNKNOWN", "Unknown Error" }, /* 27 */
71 	{ false, "UNKNOWN", "Unknown Error" }, /* 28 */
72 	{ false, "UNKNOWN", "Unknown Error" }, /* 29 */
73 	{ false, "UNKNOWN", "Unknown Error" }, /* 30 */
74 	{ false, "UNKNOWN", "Unknown Error" }, /* 31 */
75 };
76 
77 static void dwmac5_handle_mac_err(struct net_device *ndev,
78 		void __iomem *ioaddr, bool correctable,
79 		struct stmmac_safety_stats *stats)
80 {
81 	u32 value;
82 
83 	value = readl(ioaddr + MAC_DPP_FSM_INT_STATUS);
84 	writel(value, ioaddr + MAC_DPP_FSM_INT_STATUS);
85 
86 	dwmac5_log_error(ndev, value, correctable, "MAC", dwmac5_mac_errors,
87 			STAT_OFF(mac_errors), stats);
88 }
89 
90 static const struct dwmac5_error_desc dwmac5_mtl_errors[32]= {
91 	{ true, "TXCES", "MTL TX Memory Error" },
92 	{ true, "TXAMS", "MTL TX Memory Address Mismatch Error" },
93 	{ true, "TXUES", "MTL TX Memory Error" },
94 	{ false, "UNKNOWN", "Unknown Error" }, /* 3 */
95 	{ true, "RXCES", "MTL RX Memory Error" },
96 	{ true, "RXAMS", "MTL RX Memory Address Mismatch Error" },
97 	{ true, "RXUES", "MTL RX Memory Error" },
98 	{ false, "UNKNOWN", "Unknown Error" }, /* 7 */
99 	{ true, "ECES", "MTL EST Memory Error" },
100 	{ true, "EAMS", "MTL EST Memory Address Mismatch Error" },
101 	{ true, "EUES", "MTL EST Memory Error" },
102 	{ false, "UNKNOWN", "Unknown Error" }, /* 11 */
103 	{ true, "RPCES", "MTL RX Parser Memory Error" },
104 	{ true, "RPAMS", "MTL RX Parser Memory Address Mismatch Error" },
105 	{ true, "RPUES", "MTL RX Parser Memory Error" },
106 	{ false, "UNKNOWN", "Unknown Error" }, /* 15 */
107 	{ false, "UNKNOWN", "Unknown Error" }, /* 16 */
108 	{ false, "UNKNOWN", "Unknown Error" }, /* 17 */
109 	{ false, "UNKNOWN", "Unknown Error" }, /* 18 */
110 	{ false, "UNKNOWN", "Unknown Error" }, /* 19 */
111 	{ false, "UNKNOWN", "Unknown Error" }, /* 20 */
112 	{ false, "UNKNOWN", "Unknown Error" }, /* 21 */
113 	{ false, "UNKNOWN", "Unknown Error" }, /* 22 */
114 	{ false, "UNKNOWN", "Unknown Error" }, /* 23 */
115 	{ false, "UNKNOWN", "Unknown Error" }, /* 24 */
116 	{ false, "UNKNOWN", "Unknown Error" }, /* 25 */
117 	{ false, "UNKNOWN", "Unknown Error" }, /* 26 */
118 	{ false, "UNKNOWN", "Unknown Error" }, /* 27 */
119 	{ false, "UNKNOWN", "Unknown Error" }, /* 28 */
120 	{ false, "UNKNOWN", "Unknown Error" }, /* 29 */
121 	{ false, "UNKNOWN", "Unknown Error" }, /* 30 */
122 	{ false, "UNKNOWN", "Unknown Error" }, /* 31 */
123 };
124 
125 static void dwmac5_handle_mtl_err(struct net_device *ndev,
126 		void __iomem *ioaddr, bool correctable,
127 		struct stmmac_safety_stats *stats)
128 {
129 	u32 value;
130 
131 	value = readl(ioaddr + MTL_ECC_INT_STATUS);
132 	writel(value, ioaddr + MTL_ECC_INT_STATUS);
133 
134 	dwmac5_log_error(ndev, value, correctable, "MTL", dwmac5_mtl_errors,
135 			STAT_OFF(mtl_errors), stats);
136 }
137 
138 static const struct dwmac5_error_desc dwmac5_dma_errors[32]= {
139 	{ true, "TCES", "DMA TSO Memory Error" },
140 	{ true, "TAMS", "DMA TSO Memory Address Mismatch Error" },
141 	{ true, "TUES", "DMA TSO Memory Error" },
142 	{ false, "UNKNOWN", "Unknown Error" }, /* 3 */
143 	{ false, "UNKNOWN", "Unknown Error" }, /* 4 */
144 	{ false, "UNKNOWN", "Unknown Error" }, /* 5 */
145 	{ false, "UNKNOWN", "Unknown Error" }, /* 6 */
146 	{ false, "UNKNOWN", "Unknown Error" }, /* 7 */
147 	{ false, "UNKNOWN", "Unknown Error" }, /* 8 */
148 	{ false, "UNKNOWN", "Unknown Error" }, /* 9 */
149 	{ false, "UNKNOWN", "Unknown Error" }, /* 10 */
150 	{ false, "UNKNOWN", "Unknown Error" }, /* 11 */
151 	{ false, "UNKNOWN", "Unknown Error" }, /* 12 */
152 	{ false, "UNKNOWN", "Unknown Error" }, /* 13 */
153 	{ false, "UNKNOWN", "Unknown Error" }, /* 14 */
154 	{ false, "UNKNOWN", "Unknown Error" }, /* 15 */
155 	{ false, "UNKNOWN", "Unknown Error" }, /* 16 */
156 	{ false, "UNKNOWN", "Unknown Error" }, /* 17 */
157 	{ false, "UNKNOWN", "Unknown Error" }, /* 18 */
158 	{ false, "UNKNOWN", "Unknown Error" }, /* 19 */
159 	{ false, "UNKNOWN", "Unknown Error" }, /* 20 */
160 	{ false, "UNKNOWN", "Unknown Error" }, /* 21 */
161 	{ false, "UNKNOWN", "Unknown Error" }, /* 22 */
162 	{ false, "UNKNOWN", "Unknown Error" }, /* 23 */
163 	{ false, "UNKNOWN", "Unknown Error" }, /* 24 */
164 	{ false, "UNKNOWN", "Unknown Error" }, /* 25 */
165 	{ false, "UNKNOWN", "Unknown Error" }, /* 26 */
166 	{ false, "UNKNOWN", "Unknown Error" }, /* 27 */
167 	{ false, "UNKNOWN", "Unknown Error" }, /* 28 */
168 	{ false, "UNKNOWN", "Unknown Error" }, /* 29 */
169 	{ false, "UNKNOWN", "Unknown Error" }, /* 30 */
170 	{ false, "UNKNOWN", "Unknown Error" }, /* 31 */
171 };
172 
173 static void dwmac5_handle_dma_err(struct net_device *ndev,
174 		void __iomem *ioaddr, bool correctable,
175 		struct stmmac_safety_stats *stats)
176 {
177 	u32 value;
178 
179 	value = readl(ioaddr + DMA_ECC_INT_STATUS);
180 	writel(value, ioaddr + DMA_ECC_INT_STATUS);
181 
182 	dwmac5_log_error(ndev, value, correctable, "DMA", dwmac5_dma_errors,
183 			STAT_OFF(dma_errors), stats);
184 }
185 
186 int dwmac5_safety_feat_config(void __iomem *ioaddr, unsigned int asp)
187 {
188 	u32 value;
189 
190 	if (!asp)
191 		return -EINVAL;
192 
193 	/* 1. Enable Safety Features */
194 	value = readl(ioaddr + MTL_ECC_CONTROL);
195 	value |= MEEAO; /* MTL ECC Error Addr Status Override */
196 	value |= TSOEE; /* TSO ECC */
197 	value |= MRXPEE; /* MTL RX Parser ECC */
198 	value |= MESTEE; /* MTL EST ECC */
199 	value |= MRXEE; /* MTL RX FIFO ECC */
200 	value |= MTXEE; /* MTL TX FIFO ECC */
201 	writel(value, ioaddr + MTL_ECC_CONTROL);
202 
203 	/* 2. Enable MTL Safety Interrupts */
204 	value = readl(ioaddr + MTL_ECC_INT_ENABLE);
205 	value |= RPCEIE; /* RX Parser Memory Correctable Error */
206 	value |= ECEIE; /* EST Memory Correctable Error */
207 	value |= RXCEIE; /* RX Memory Correctable Error */
208 	value |= TXCEIE; /* TX Memory Correctable Error */
209 	writel(value, ioaddr + MTL_ECC_INT_ENABLE);
210 
211 	/* 3. Enable DMA Safety Interrupts */
212 	value = readl(ioaddr + DMA_ECC_INT_ENABLE);
213 	value |= TCEIE; /* TSO Memory Correctable Error */
214 	writel(value, ioaddr + DMA_ECC_INT_ENABLE);
215 
216 	/* Only ECC Protection for External Memory feature is selected */
217 	if (asp <= 0x1)
218 		return 0;
219 
220 	/* 5. Enable Parity and Timeout for FSM */
221 	value = readl(ioaddr + MAC_FSM_CONTROL);
222 	value |= PRTYEN; /* FSM Parity Feature */
223 	value |= TMOUTEN; /* FSM Timeout Feature */
224 	writel(value, ioaddr + MAC_FSM_CONTROL);
225 
226 	/* 4. Enable Data Parity Protection */
227 	value = readl(ioaddr + MTL_DPP_CONTROL);
228 	value |= EDPP;
229 	writel(value, ioaddr + MTL_DPP_CONTROL);
230 
231 	/*
232 	 * All the Automotive Safety features are selected without the "Parity
233 	 * Port Enable for external interface" feature.
234 	 */
235 	if (asp <= 0x2)
236 		return 0;
237 
238 	value |= EPSI;
239 	writel(value, ioaddr + MTL_DPP_CONTROL);
240 	return 0;
241 }
242 
243 int dwmac5_safety_feat_irq_status(struct net_device *ndev,
244 		void __iomem *ioaddr, unsigned int asp,
245 		struct stmmac_safety_stats *stats)
246 {
247 	bool err, corr;
248 	u32 mtl, dma;
249 	int ret = 0;
250 
251 	if (!asp)
252 		return -EINVAL;
253 
254 	mtl = readl(ioaddr + MTL_SAFETY_INT_STATUS);
255 	dma = readl(ioaddr + DMA_SAFETY_INT_STATUS);
256 
257 	err = (mtl & MCSIS) || (dma & MCSIS);
258 	corr = false;
259 	if (err) {
260 		dwmac5_handle_mac_err(ndev, ioaddr, corr, stats);
261 		ret |= !corr;
262 	}
263 
264 	err = (mtl & (MEUIS | MECIS)) || (dma & (MSUIS | MSCIS));
265 	corr = (mtl & MECIS) || (dma & MSCIS);
266 	if (err) {
267 		dwmac5_handle_mtl_err(ndev, ioaddr, corr, stats);
268 		ret |= !corr;
269 	}
270 
271 	err = dma & (DEUIS | DECIS);
272 	corr = dma & DECIS;
273 	if (err) {
274 		dwmac5_handle_dma_err(ndev, ioaddr, corr, stats);
275 		ret |= !corr;
276 	}
277 
278 	return ret;
279 }
280 
281 static const struct dwmac5_error {
282 	const struct dwmac5_error_desc *desc;
283 } dwmac5_all_errors[] = {
284 	{ dwmac5_mac_errors },
285 	{ dwmac5_mtl_errors },
286 	{ dwmac5_dma_errors },
287 };
288 
289 int dwmac5_safety_feat_dump(struct stmmac_safety_stats *stats,
290 			int index, unsigned long *count, const char **desc)
291 {
292 	int module = index / 32, offset = index % 32;
293 	unsigned long *ptr = (unsigned long *)stats;
294 
295 	if (module >= ARRAY_SIZE(dwmac5_all_errors))
296 		return -EINVAL;
297 	if (!dwmac5_all_errors[module].desc[offset].valid)
298 		return -EINVAL;
299 	if (count)
300 		*count = *(ptr + index);
301 	if (desc)
302 		*desc = dwmac5_all_errors[module].desc[offset].desc;
303 	return 0;
304 }
305 
306 static int dwmac5_rxp_disable(void __iomem *ioaddr)
307 {
308 	u32 val;
309 
310 	val = readl(ioaddr + MTL_OPERATION_MODE);
311 	val &= ~MTL_FRPE;
312 	writel(val, ioaddr + MTL_OPERATION_MODE);
313 
314 	return readl_poll_timeout(ioaddr + MTL_RXP_CONTROL_STATUS, val,
315 			val & RXPI, 1, 10000);
316 }
317 
318 static void dwmac5_rxp_enable(void __iomem *ioaddr)
319 {
320 	u32 val;
321 
322 	val = readl(ioaddr + MTL_OPERATION_MODE);
323 	val |= MTL_FRPE;
324 	writel(val, ioaddr + MTL_OPERATION_MODE);
325 }
326 
327 static int dwmac5_rxp_update_single_entry(void __iomem *ioaddr,
328 					  struct stmmac_tc_entry *entry,
329 					  int pos)
330 {
331 	int ret, i;
332 
333 	for (i = 0; i < (sizeof(entry->val) / sizeof(u32)); i++) {
334 		int real_pos = pos * (sizeof(entry->val) / sizeof(u32)) + i;
335 		u32 val;
336 
337 		/* Wait for ready */
338 		ret = readl_poll_timeout(ioaddr + MTL_RXP_IACC_CTRL_STATUS,
339 				val, !(val & STARTBUSY), 1, 10000);
340 		if (ret)
341 			return ret;
342 
343 		/* Write data */
344 		val = *((u32 *)&entry->val + i);
345 		writel(val, ioaddr + MTL_RXP_IACC_DATA);
346 
347 		/* Write pos */
348 		val = real_pos & ADDR;
349 		writel(val, ioaddr + MTL_RXP_IACC_CTRL_STATUS);
350 
351 		/* Write OP */
352 		val |= WRRDN;
353 		writel(val, ioaddr + MTL_RXP_IACC_CTRL_STATUS);
354 
355 		/* Start Write */
356 		val |= STARTBUSY;
357 		writel(val, ioaddr + MTL_RXP_IACC_CTRL_STATUS);
358 
359 		/* Wait for done */
360 		ret = readl_poll_timeout(ioaddr + MTL_RXP_IACC_CTRL_STATUS,
361 				val, !(val & STARTBUSY), 1, 10000);
362 		if (ret)
363 			return ret;
364 	}
365 
366 	return 0;
367 }
368 
369 static struct stmmac_tc_entry *
370 dwmac5_rxp_get_next_entry(struct stmmac_tc_entry *entries, unsigned int count,
371 			  u32 curr_prio)
372 {
373 	struct stmmac_tc_entry *entry;
374 	u32 min_prio = ~0x0;
375 	int i, min_prio_idx;
376 	bool found = false;
377 
378 	for (i = count - 1; i >= 0; i--) {
379 		entry = &entries[i];
380 
381 		/* Do not update unused entries */
382 		if (!entry->in_use)
383 			continue;
384 		/* Do not update already updated entries (i.e. fragments) */
385 		if (entry->in_hw)
386 			continue;
387 		/* Let last entry be updated last */
388 		if (entry->is_last)
389 			continue;
390 		/* Do not return fragments */
391 		if (entry->is_frag)
392 			continue;
393 		/* Check if we already checked this prio */
394 		if (entry->prio < curr_prio)
395 			continue;
396 		/* Check if this is the minimum prio */
397 		if (entry->prio < min_prio) {
398 			min_prio = entry->prio;
399 			min_prio_idx = i;
400 			found = true;
401 		}
402 	}
403 
404 	if (found)
405 		return &entries[min_prio_idx];
406 	return NULL;
407 }
408 
409 int dwmac5_rxp_config(void __iomem *ioaddr, struct stmmac_tc_entry *entries,
410 		      unsigned int count)
411 {
412 	struct stmmac_tc_entry *entry, *frag;
413 	int i, ret, nve = 0;
414 	u32 curr_prio = 0;
415 	u32 old_val, val;
416 
417 	/* Force disable RX */
418 	old_val = readl(ioaddr + GMAC_CONFIG);
419 	val = old_val & ~GMAC_CONFIG_RE;
420 	writel(val, ioaddr + GMAC_CONFIG);
421 
422 	/* Disable RX Parser */
423 	ret = dwmac5_rxp_disable(ioaddr);
424 	if (ret)
425 		goto re_enable;
426 
427 	/* Set all entries as NOT in HW */
428 	for (i = 0; i < count; i++) {
429 		entry = &entries[i];
430 		entry->in_hw = false;
431 	}
432 
433 	/* Update entries by reverse order */
434 	while (1) {
435 		entry = dwmac5_rxp_get_next_entry(entries, count, curr_prio);
436 		if (!entry)
437 			break;
438 
439 		curr_prio = entry->prio;
440 		frag = entry->frag_ptr;
441 
442 		/* Set special fragment requirements */
443 		if (frag) {
444 			entry->val.af = 0;
445 			entry->val.rf = 0;
446 			entry->val.nc = 1;
447 			entry->val.ok_index = nve + 2;
448 		}
449 
450 		ret = dwmac5_rxp_update_single_entry(ioaddr, entry, nve);
451 		if (ret)
452 			goto re_enable;
453 
454 		entry->table_pos = nve++;
455 		entry->in_hw = true;
456 
457 		if (frag && !frag->in_hw) {
458 			ret = dwmac5_rxp_update_single_entry(ioaddr, frag, nve);
459 			if (ret)
460 				goto re_enable;
461 			frag->table_pos = nve++;
462 			frag->in_hw = true;
463 		}
464 	}
465 
466 	if (!nve)
467 		goto re_enable;
468 
469 	/* Update all pass entry */
470 	for (i = 0; i < count; i++) {
471 		entry = &entries[i];
472 		if (!entry->is_last)
473 			continue;
474 
475 		ret = dwmac5_rxp_update_single_entry(ioaddr, entry, nve);
476 		if (ret)
477 			goto re_enable;
478 
479 		entry->table_pos = nve++;
480 	}
481 
482 	/* Assume n. of parsable entries == n. of valid entries */
483 	val = (nve << 16) & NPE;
484 	val |= nve & NVE;
485 	writel(val, ioaddr + MTL_RXP_CONTROL_STATUS);
486 
487 	/* Enable RX Parser */
488 	dwmac5_rxp_enable(ioaddr);
489 
490 re_enable:
491 	/* Re-enable RX */
492 	writel(old_val, ioaddr + GMAC_CONFIG);
493 	return ret;
494 }
495 
496 int dwmac5_flex_pps_config(void __iomem *ioaddr, int index,
497 			   struct stmmac_pps_cfg *cfg, bool enable,
498 			   u32 sub_second_inc, u32 systime_flags)
499 {
500 	u32 tnsec = readl(ioaddr + MAC_PPSx_TARGET_TIME_NSEC(index));
501 	u32 val = readl(ioaddr + MAC_PPS_CONTROL);
502 	u64 period;
503 
504 	if (!cfg->available)
505 		return -EINVAL;
506 	if (tnsec & TRGTBUSY0)
507 		return -EBUSY;
508 	if (!sub_second_inc || !systime_flags)
509 		return -EINVAL;
510 
511 	val &= ~PPSx_MASK(index);
512 
513 	if (!enable) {
514 		val |= PPSCMDx(index, 0x5);
515 		val |= PPSEN0;
516 		writel(val, ioaddr + MAC_PPS_CONTROL);
517 		return 0;
518 	}
519 
520 	val |= PPSCMDx(index, 0x2);
521 	val |= TRGTMODSELx(index, 0x2);
522 	val |= PPSEN0;
523 
524 	writel(cfg->start.tv_sec, ioaddr + MAC_PPSx_TARGET_TIME_SEC(index));
525 
526 	if (!(systime_flags & PTP_TCR_TSCTRLSSR))
527 		cfg->start.tv_nsec = (cfg->start.tv_nsec * 1000) / 465;
528 	writel(cfg->start.tv_nsec, ioaddr + MAC_PPSx_TARGET_TIME_NSEC(index));
529 
530 	period = cfg->period.tv_sec * 1000000000;
531 	period += cfg->period.tv_nsec;
532 
533 	do_div(period, sub_second_inc);
534 
535 	if (period <= 1)
536 		return -EINVAL;
537 
538 	writel(period - 1, ioaddr + MAC_PPSx_INTERVAL(index));
539 
540 	period >>= 1;
541 	if (period <= 1)
542 		return -EINVAL;
543 
544 	writel(period - 1, ioaddr + MAC_PPSx_WIDTH(index));
545 
546 	/* Finally, activate it */
547 	writel(val, ioaddr + MAC_PPS_CONTROL);
548 	return 0;
549 }
550 
551 static int dwmac5_est_write(void __iomem *ioaddr, u32 reg, u32 val, bool gcl)
552 {
553 	u32 ctrl;
554 
555 	writel(val, ioaddr + MTL_EST_GCL_DATA);
556 
557 	ctrl = (reg << ADDR_SHIFT);
558 	ctrl |= gcl ? 0 : GCRR;
559 
560 	writel(ctrl, ioaddr + MTL_EST_GCL_CONTROL);
561 
562 	ctrl |= SRWO;
563 	writel(ctrl, ioaddr + MTL_EST_GCL_CONTROL);
564 
565 	return readl_poll_timeout(ioaddr + MTL_EST_GCL_CONTROL,
566 				  ctrl, !(ctrl & SRWO), 100, 5000);
567 }
568 
569 int dwmac5_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg,
570 			 unsigned int ptp_rate)
571 {
572 	int i, ret = 0x0;
573 	u32 ctrl;
574 
575 	ret |= dwmac5_est_write(ioaddr, BTR_LOW, cfg->btr[0], false);
576 	ret |= dwmac5_est_write(ioaddr, BTR_HIGH, cfg->btr[1], false);
577 	ret |= dwmac5_est_write(ioaddr, TER, cfg->ter, false);
578 	ret |= dwmac5_est_write(ioaddr, LLR, cfg->gcl_size, false);
579 	ret |= dwmac5_est_write(ioaddr, CTR_LOW, cfg->ctr[0], false);
580 	ret |= dwmac5_est_write(ioaddr, CTR_HIGH, cfg->ctr[1], false);
581 	if (ret)
582 		return ret;
583 
584 	for (i = 0; i < cfg->gcl_size; i++) {
585 		ret = dwmac5_est_write(ioaddr, i, cfg->gcl[i], true);
586 		if (ret)
587 			return ret;
588 	}
589 
590 	ctrl = readl(ioaddr + MTL_EST_CONTROL);
591 	ctrl &= ~PTOV;
592 	ctrl |= ((1000000000 / ptp_rate) * 6) << PTOV_SHIFT;
593 	if (cfg->enable)
594 		ctrl |= EEST | SSWL;
595 	else
596 		ctrl &= ~EEST;
597 
598 	writel(ctrl, ioaddr + MTL_EST_CONTROL);
599 
600 	/* Configure EST interrupt */
601 	if (cfg->enable)
602 		ctrl = (IECGCE | IEHS | IEHF | IEBE | IECC);
603 	else
604 		ctrl = 0;
605 
606 	writel(ctrl, ioaddr + MTL_EST_INT_EN);
607 
608 	return 0;
609 }
610 
611 void dwmac5_est_irq_status(void __iomem *ioaddr, struct net_device *dev,
612 			  struct stmmac_extra_stats *x, u32 txqcnt)
613 {
614 	u32 status, value, feqn, hbfq, hbfs, btrl;
615 	u32 txqcnt_mask = (1 << txqcnt) - 1;
616 
617 	status = readl(ioaddr + MTL_EST_STATUS);
618 
619 	value = (CGCE | HLBS | HLBF | BTRE | SWLC);
620 
621 	/* Return if there is no error */
622 	if (!(status & value))
623 		return;
624 
625 	if (status & CGCE) {
626 		/* Clear Interrupt */
627 		writel(CGCE, ioaddr + MTL_EST_STATUS);
628 
629 		x->mtl_est_cgce++;
630 	}
631 
632 	if (status & HLBS) {
633 		value = readl(ioaddr + MTL_EST_SCH_ERR);
634 		value &= txqcnt_mask;
635 
636 		x->mtl_est_hlbs++;
637 
638 		/* Clear Interrupt */
639 		writel(value, ioaddr + MTL_EST_SCH_ERR);
640 
641 		/* Collecting info to shows all the queues that has HLBS
642 		 * issue. The only way to clear this is to clear the
643 		 * statistic
644 		 */
645 		if (net_ratelimit())
646 			netdev_err(dev, "EST: HLB(sched) Queue 0x%x\n", value);
647 	}
648 
649 	if (status & HLBF) {
650 		value = readl(ioaddr + MTL_EST_FRM_SZ_ERR);
651 		feqn = value & txqcnt_mask;
652 
653 		value = readl(ioaddr + MTL_EST_FRM_SZ_CAP);
654 		hbfq = (value & SZ_CAP_HBFQ_MASK(txqcnt)) >> SZ_CAP_HBFQ_SHIFT;
655 		hbfs = value & SZ_CAP_HBFS_MASK;
656 
657 		x->mtl_est_hlbf++;
658 
659 		/* Clear Interrupt */
660 		writel(feqn, ioaddr + MTL_EST_FRM_SZ_ERR);
661 
662 		if (net_ratelimit())
663 			netdev_err(dev, "EST: HLB(size) Queue %u Size %u\n",
664 				   hbfq, hbfs);
665 	}
666 
667 	if (status & BTRE) {
668 		if ((status & BTRL) == BTRL_MAX)
669 			x->mtl_est_btrlm++;
670 		else
671 			x->mtl_est_btre++;
672 
673 		btrl = (status & BTRL) >> BTRL_SHIFT;
674 
675 		if (net_ratelimit())
676 			netdev_info(dev, "EST: BTR Error Loop Count %u\n",
677 				    btrl);
678 
679 		writel(BTRE, ioaddr + MTL_EST_STATUS);
680 	}
681 
682 	if (status & SWLC) {
683 		writel(SWLC, ioaddr + MTL_EST_STATUS);
684 		netdev_info(dev, "EST: SWOL has been switched\n");
685 	}
686 }
687 
688 void dwmac5_fpe_configure(void __iomem *ioaddr, u32 num_txq, u32 num_rxq,
689 			  bool enable)
690 {
691 	u32 value;
692 
693 	if (!enable) {
694 		value = readl(ioaddr + MAC_FPE_CTRL_STS);
695 
696 		value &= ~EFPE;
697 
698 		writel(value, ioaddr + MAC_FPE_CTRL_STS);
699 		return;
700 	}
701 
702 	value = readl(ioaddr + GMAC_RXQ_CTRL1);
703 	value &= ~GMAC_RXQCTRL_FPRQ;
704 	value |= (num_rxq - 1) << GMAC_RXQCTRL_FPRQ_SHIFT;
705 	writel(value, ioaddr + GMAC_RXQ_CTRL1);
706 
707 	value = readl(ioaddr + MAC_FPE_CTRL_STS);
708 	value |= EFPE;
709 	writel(value, ioaddr + MAC_FPE_CTRL_STS);
710 }
711 
712 int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev)
713 {
714 	u32 value;
715 	int status;
716 
717 	status = FPE_EVENT_UNKNOWN;
718 
719 	value = readl(ioaddr + MAC_FPE_CTRL_STS);
720 
721 	if (value & TRSP) {
722 		status |= FPE_EVENT_TRSP;
723 		netdev_info(dev, "FPE: Respond mPacket is transmitted\n");
724 	}
725 
726 	if (value & TVER) {
727 		status |= FPE_EVENT_TVER;
728 		netdev_info(dev, "FPE: Verify mPacket is transmitted\n");
729 	}
730 
731 	if (value & RRSP) {
732 		status |= FPE_EVENT_RRSP;
733 		netdev_info(dev, "FPE: Respond mPacket is received\n");
734 	}
735 
736 	if (value & RVER) {
737 		status |= FPE_EVENT_RVER;
738 		netdev_info(dev, "FPE: Verify mPacket is received\n");
739 	}
740 
741 	return status;
742 }
743 
744 void dwmac5_fpe_send_mpacket(void __iomem *ioaddr, enum stmmac_mpacket_type type)
745 {
746 	u32 value;
747 
748 	value = readl(ioaddr + MAC_FPE_CTRL_STS);
749 
750 	if (type == MPACKET_VERIFY) {
751 		value &= ~SRSP;
752 		value |= SVER;
753 	} else {
754 		value &= ~SVER;
755 		value |= SRSP;
756 	}
757 
758 	writel(value, ioaddr + MAC_FPE_CTRL_STS);
759 }
760