1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 // Copyright (c) 2017 Synopsys, Inc. and/or its affiliates.
3 // stmmac Support for 5.xx Ethernet QoS cores
4 
5 #include <linux/bitops.h>
6 #include <linux/iopoll.h>
7 #include "common.h"
8 #include "dwmac4.h"
9 #include "dwmac5.h"
10 #include "stmmac.h"
11 #include "stmmac_ptp.h"
12 
13 struct dwmac5_error_desc {
14 	bool valid;
15 	const char *desc;
16 	const char *detailed_desc;
17 };
18 
19 #define STAT_OFF(field)		offsetof(struct stmmac_safety_stats, field)
20 
dwmac5_log_error(struct net_device * ndev,u32 value,bool corr,const char * module_name,const struct dwmac5_error_desc * desc,unsigned long field_offset,struct stmmac_safety_stats * stats)21 static void dwmac5_log_error(struct net_device *ndev, u32 value, bool corr,
22 		const char *module_name, const struct dwmac5_error_desc *desc,
23 		unsigned long field_offset, struct stmmac_safety_stats *stats)
24 {
25 	unsigned long loc, mask;
26 	u8 *bptr = (u8 *)stats;
27 	unsigned long *ptr;
28 
29 	ptr = (unsigned long *)(bptr + field_offset);
30 
31 	mask = value;
32 	for_each_set_bit(loc, &mask, 32) {
33 		netdev_err(ndev, "Found %s error in %s: '%s: %s'\n", corr ?
34 				"correctable" : "uncorrectable", module_name,
35 				desc[loc].desc, desc[loc].detailed_desc);
36 
37 		/* Update counters */
38 		ptr[loc]++;
39 	}
40 }
41 
42 static const struct dwmac5_error_desc dwmac5_mac_errors[32]= {
43 	{ true, "ATPES", "Application Transmit Interface Parity Check Error" },
44 	{ true, "TPES", "TSO Data Path Parity Check Error" },
45 	{ true, "RDPES", "Read Descriptor Parity Check Error" },
46 	{ true, "MPES", "MTL Data Path Parity Check Error" },
47 	{ true, "MTSPES", "MTL TX Status Data Path Parity Check Error" },
48 	{ true, "ARPES", "Application Receive Interface Data Path Parity Check Error" },
49 	{ true, "CWPES", "CSR Write Data Path Parity Check Error" },
50 	{ true, "ASRPES", "AXI Slave Read Data Path Parity Check Error" },
51 	{ true, "TTES", "TX FSM Timeout Error" },
52 	{ true, "RTES", "RX FSM Timeout Error" },
53 	{ true, "CTES", "CSR FSM Timeout Error" },
54 	{ true, "ATES", "APP FSM Timeout Error" },
55 	{ true, "PTES", "PTP FSM Timeout Error" },
56 	{ true, "T125ES", "TX125 FSM Timeout Error" },
57 	{ true, "R125ES", "RX125 FSM Timeout Error" },
58 	{ true, "RVCTES", "REV MDC FSM Timeout Error" },
59 	{ true, "MSTTES", "Master Read/Write Timeout Error" },
60 	{ true, "SLVTES", "Slave Read/Write Timeout Error" },
61 	{ true, "ATITES", "Application Timeout on ATI Interface Error" },
62 	{ true, "ARITES", "Application Timeout on ARI Interface Error" },
63 	{ false, "UNKNOWN", "Unknown Error" }, /* 20 */
64 	{ false, "UNKNOWN", "Unknown Error" }, /* 21 */
65 	{ false, "UNKNOWN", "Unknown Error" }, /* 22 */
66 	{ false, "UNKNOWN", "Unknown Error" }, /* 23 */
67 	{ true, "FSMPES", "FSM State Parity Error" },
68 	{ false, "UNKNOWN", "Unknown Error" }, /* 25 */
69 	{ false, "UNKNOWN", "Unknown Error" }, /* 26 */
70 	{ false, "UNKNOWN", "Unknown Error" }, /* 27 */
71 	{ false, "UNKNOWN", "Unknown Error" }, /* 28 */
72 	{ false, "UNKNOWN", "Unknown Error" }, /* 29 */
73 	{ false, "UNKNOWN", "Unknown Error" }, /* 30 */
74 	{ false, "UNKNOWN", "Unknown Error" }, /* 31 */
75 };
76 
dwmac5_handle_mac_err(struct net_device * ndev,void __iomem * ioaddr,bool correctable,struct stmmac_safety_stats * stats)77 static void dwmac5_handle_mac_err(struct net_device *ndev,
78 		void __iomem *ioaddr, bool correctable,
79 		struct stmmac_safety_stats *stats)
80 {
81 	u32 value;
82 
83 	value = readl(ioaddr + MAC_DPP_FSM_INT_STATUS);
84 	writel(value, ioaddr + MAC_DPP_FSM_INT_STATUS);
85 
86 	dwmac5_log_error(ndev, value, correctable, "MAC", dwmac5_mac_errors,
87 			STAT_OFF(mac_errors), stats);
88 }
89 
90 static const struct dwmac5_error_desc dwmac5_mtl_errors[32]= {
91 	{ true, "TXCES", "MTL TX Memory Error" },
92 	{ true, "TXAMS", "MTL TX Memory Address Mismatch Error" },
93 	{ true, "TXUES", "MTL TX Memory Error" },
94 	{ false, "UNKNOWN", "Unknown Error" }, /* 3 */
95 	{ true, "RXCES", "MTL RX Memory Error" },
96 	{ true, "RXAMS", "MTL RX Memory Address Mismatch Error" },
97 	{ true, "RXUES", "MTL RX Memory Error" },
98 	{ false, "UNKNOWN", "Unknown Error" }, /* 7 */
99 	{ true, "ECES", "MTL EST Memory Error" },
100 	{ true, "EAMS", "MTL EST Memory Address Mismatch Error" },
101 	{ true, "EUES", "MTL EST Memory Error" },
102 	{ false, "UNKNOWN", "Unknown Error" }, /* 11 */
103 	{ true, "RPCES", "MTL RX Parser Memory Error" },
104 	{ true, "RPAMS", "MTL RX Parser Memory Address Mismatch Error" },
105 	{ true, "RPUES", "MTL RX Parser Memory Error" },
106 	{ false, "UNKNOWN", "Unknown Error" }, /* 15 */
107 	{ false, "UNKNOWN", "Unknown Error" }, /* 16 */
108 	{ false, "UNKNOWN", "Unknown Error" }, /* 17 */
109 	{ false, "UNKNOWN", "Unknown Error" }, /* 18 */
110 	{ false, "UNKNOWN", "Unknown Error" }, /* 19 */
111 	{ false, "UNKNOWN", "Unknown Error" }, /* 20 */
112 	{ false, "UNKNOWN", "Unknown Error" }, /* 21 */
113 	{ false, "UNKNOWN", "Unknown Error" }, /* 22 */
114 	{ false, "UNKNOWN", "Unknown Error" }, /* 23 */
115 	{ false, "UNKNOWN", "Unknown Error" }, /* 24 */
116 	{ false, "UNKNOWN", "Unknown Error" }, /* 25 */
117 	{ false, "UNKNOWN", "Unknown Error" }, /* 26 */
118 	{ false, "UNKNOWN", "Unknown Error" }, /* 27 */
119 	{ false, "UNKNOWN", "Unknown Error" }, /* 28 */
120 	{ false, "UNKNOWN", "Unknown Error" }, /* 29 */
121 	{ false, "UNKNOWN", "Unknown Error" }, /* 30 */
122 	{ false, "UNKNOWN", "Unknown Error" }, /* 31 */
123 };
124 
dwmac5_handle_mtl_err(struct net_device * ndev,void __iomem * ioaddr,bool correctable,struct stmmac_safety_stats * stats)125 static void dwmac5_handle_mtl_err(struct net_device *ndev,
126 		void __iomem *ioaddr, bool correctable,
127 		struct stmmac_safety_stats *stats)
128 {
129 	u32 value;
130 
131 	value = readl(ioaddr + MTL_ECC_INT_STATUS);
132 	writel(value, ioaddr + MTL_ECC_INT_STATUS);
133 
134 	dwmac5_log_error(ndev, value, correctable, "MTL", dwmac5_mtl_errors,
135 			STAT_OFF(mtl_errors), stats);
136 }
137 
138 static const struct dwmac5_error_desc dwmac5_dma_errors[32]= {
139 	{ true, "TCES", "DMA TSO Memory Error" },
140 	{ true, "TAMS", "DMA TSO Memory Address Mismatch Error" },
141 	{ true, "TUES", "DMA TSO Memory Error" },
142 	{ false, "UNKNOWN", "Unknown Error" }, /* 3 */
143 	{ false, "UNKNOWN", "Unknown Error" }, /* 4 */
144 	{ false, "UNKNOWN", "Unknown Error" }, /* 5 */
145 	{ false, "UNKNOWN", "Unknown Error" }, /* 6 */
146 	{ false, "UNKNOWN", "Unknown Error" }, /* 7 */
147 	{ false, "UNKNOWN", "Unknown Error" }, /* 8 */
148 	{ false, "UNKNOWN", "Unknown Error" }, /* 9 */
149 	{ false, "UNKNOWN", "Unknown Error" }, /* 10 */
150 	{ false, "UNKNOWN", "Unknown Error" }, /* 11 */
151 	{ false, "UNKNOWN", "Unknown Error" }, /* 12 */
152 	{ false, "UNKNOWN", "Unknown Error" }, /* 13 */
153 	{ false, "UNKNOWN", "Unknown Error" }, /* 14 */
154 	{ false, "UNKNOWN", "Unknown Error" }, /* 15 */
155 	{ false, "UNKNOWN", "Unknown Error" }, /* 16 */
156 	{ false, "UNKNOWN", "Unknown Error" }, /* 17 */
157 	{ false, "UNKNOWN", "Unknown Error" }, /* 18 */
158 	{ false, "UNKNOWN", "Unknown Error" }, /* 19 */
159 	{ false, "UNKNOWN", "Unknown Error" }, /* 20 */
160 	{ false, "UNKNOWN", "Unknown Error" }, /* 21 */
161 	{ false, "UNKNOWN", "Unknown Error" }, /* 22 */
162 	{ false, "UNKNOWN", "Unknown Error" }, /* 23 */
163 	{ false, "UNKNOWN", "Unknown Error" }, /* 24 */
164 	{ false, "UNKNOWN", "Unknown Error" }, /* 25 */
165 	{ false, "UNKNOWN", "Unknown Error" }, /* 26 */
166 	{ false, "UNKNOWN", "Unknown Error" }, /* 27 */
167 	{ false, "UNKNOWN", "Unknown Error" }, /* 28 */
168 	{ false, "UNKNOWN", "Unknown Error" }, /* 29 */
169 	{ false, "UNKNOWN", "Unknown Error" }, /* 30 */
170 	{ false, "UNKNOWN", "Unknown Error" }, /* 31 */
171 };
172 
dwmac5_handle_dma_err(struct net_device * ndev,void __iomem * ioaddr,bool correctable,struct stmmac_safety_stats * stats)173 static void dwmac5_handle_dma_err(struct net_device *ndev,
174 		void __iomem *ioaddr, bool correctable,
175 		struct stmmac_safety_stats *stats)
176 {
177 	u32 value;
178 
179 	value = readl(ioaddr + DMA_ECC_INT_STATUS);
180 	writel(value, ioaddr + DMA_ECC_INT_STATUS);
181 
182 	dwmac5_log_error(ndev, value, correctable, "DMA", dwmac5_dma_errors,
183 			STAT_OFF(dma_errors), stats);
184 }
185 
dwmac5_safety_feat_config(void __iomem * ioaddr,unsigned int asp,struct stmmac_safety_feature_cfg * safety_feat_cfg)186 int dwmac5_safety_feat_config(void __iomem *ioaddr, unsigned int asp,
187 			      struct stmmac_safety_feature_cfg *safety_feat_cfg)
188 {
189 	struct stmmac_safety_feature_cfg all_safety_feats = {
190 		.tsoee = 1,
191 		.mrxpee = 1,
192 		.mestee = 1,
193 		.mrxee = 1,
194 		.mtxee = 1,
195 		.epsi = 1,
196 		.edpp = 1,
197 		.prtyen = 1,
198 		.tmouten = 1,
199 	};
200 	u32 value;
201 
202 	if (!asp)
203 		return -EINVAL;
204 
205 	if (!safety_feat_cfg)
206 		safety_feat_cfg = &all_safety_feats;
207 
208 	/* 1. Enable Safety Features */
209 	value = readl(ioaddr + MTL_ECC_CONTROL);
210 	value |= MEEAO; /* MTL ECC Error Addr Status Override */
211 	if (safety_feat_cfg->tsoee)
212 		value |= TSOEE; /* TSO ECC */
213 	if (safety_feat_cfg->mrxpee)
214 		value |= MRXPEE; /* MTL RX Parser ECC */
215 	if (safety_feat_cfg->mestee)
216 		value |= MESTEE; /* MTL EST ECC */
217 	if (safety_feat_cfg->mrxee)
218 		value |= MRXEE; /* MTL RX FIFO ECC */
219 	if (safety_feat_cfg->mtxee)
220 		value |= MTXEE; /* MTL TX FIFO ECC */
221 	writel(value, ioaddr + MTL_ECC_CONTROL);
222 
223 	/* 2. Enable MTL Safety Interrupts */
224 	value = readl(ioaddr + MTL_ECC_INT_ENABLE);
225 	value |= RPCEIE; /* RX Parser Memory Correctable Error */
226 	value |= ECEIE; /* EST Memory Correctable Error */
227 	value |= RXCEIE; /* RX Memory Correctable Error */
228 	value |= TXCEIE; /* TX Memory Correctable Error */
229 	writel(value, ioaddr + MTL_ECC_INT_ENABLE);
230 
231 	/* 3. Enable DMA Safety Interrupts */
232 	value = readl(ioaddr + DMA_ECC_INT_ENABLE);
233 	value |= TCEIE; /* TSO Memory Correctable Error */
234 	writel(value, ioaddr + DMA_ECC_INT_ENABLE);
235 
236 	/* Only ECC Protection for External Memory feature is selected */
237 	if (asp <= 0x1)
238 		return 0;
239 
240 	/* 5. Enable Parity and Timeout for FSM */
241 	value = readl(ioaddr + MAC_FSM_CONTROL);
242 	if (safety_feat_cfg->prtyen)
243 		value |= PRTYEN; /* FSM Parity Feature */
244 	if (safety_feat_cfg->tmouten)
245 		value |= TMOUTEN; /* FSM Timeout Feature */
246 	writel(value, ioaddr + MAC_FSM_CONTROL);
247 
248 	/* 4. Enable Data Parity Protection */
249 	value = readl(ioaddr + MTL_DPP_CONTROL);
250 	if (safety_feat_cfg->edpp)
251 		value |= EDPP;
252 	writel(value, ioaddr + MTL_DPP_CONTROL);
253 
254 	/*
255 	 * All the Automotive Safety features are selected without the "Parity
256 	 * Port Enable for external interface" feature.
257 	 */
258 	if (asp <= 0x2)
259 		return 0;
260 
261 	if (safety_feat_cfg->epsi)
262 		value |= EPSI;
263 	writel(value, ioaddr + MTL_DPP_CONTROL);
264 	return 0;
265 }
266 
dwmac5_safety_feat_irq_status(struct net_device * ndev,void __iomem * ioaddr,unsigned int asp,struct stmmac_safety_stats * stats)267 int dwmac5_safety_feat_irq_status(struct net_device *ndev,
268 		void __iomem *ioaddr, unsigned int asp,
269 		struct stmmac_safety_stats *stats)
270 {
271 	bool err, corr;
272 	u32 mtl, dma;
273 	int ret = 0;
274 
275 	if (!asp)
276 		return -EINVAL;
277 
278 	mtl = readl(ioaddr + MTL_SAFETY_INT_STATUS);
279 	dma = readl(ioaddr + DMA_SAFETY_INT_STATUS);
280 
281 	err = (mtl & MCSIS) || (dma & MCSIS);
282 	corr = false;
283 	if (err) {
284 		dwmac5_handle_mac_err(ndev, ioaddr, corr, stats);
285 		ret |= !corr;
286 	}
287 
288 	err = (mtl & (MEUIS | MECIS)) || (dma & (MSUIS | MSCIS));
289 	corr = (mtl & MECIS) || (dma & MSCIS);
290 	if (err) {
291 		dwmac5_handle_mtl_err(ndev, ioaddr, corr, stats);
292 		ret |= !corr;
293 	}
294 
295 	err = dma & (DEUIS | DECIS);
296 	corr = dma & DECIS;
297 	if (err) {
298 		dwmac5_handle_dma_err(ndev, ioaddr, corr, stats);
299 		ret |= !corr;
300 	}
301 
302 	return ret;
303 }
304 
305 static const struct dwmac5_error {
306 	const struct dwmac5_error_desc *desc;
307 } dwmac5_all_errors[] = {
308 	{ dwmac5_mac_errors },
309 	{ dwmac5_mtl_errors },
310 	{ dwmac5_dma_errors },
311 };
312 
dwmac5_safety_feat_dump(struct stmmac_safety_stats * stats,int index,unsigned long * count,const char ** desc)313 int dwmac5_safety_feat_dump(struct stmmac_safety_stats *stats,
314 			int index, unsigned long *count, const char **desc)
315 {
316 	int module = index / 32, offset = index % 32;
317 	unsigned long *ptr = (unsigned long *)stats;
318 
319 	if (module >= ARRAY_SIZE(dwmac5_all_errors))
320 		return -EINVAL;
321 	if (!dwmac5_all_errors[module].desc[offset].valid)
322 		return -EINVAL;
323 	if (count)
324 		*count = *(ptr + index);
325 	if (desc)
326 		*desc = dwmac5_all_errors[module].desc[offset].desc;
327 	return 0;
328 }
329 
dwmac5_rxp_disable(void __iomem * ioaddr)330 static int dwmac5_rxp_disable(void __iomem *ioaddr)
331 {
332 	u32 val;
333 
334 	val = readl(ioaddr + MTL_OPERATION_MODE);
335 	val &= ~MTL_FRPE;
336 	writel(val, ioaddr + MTL_OPERATION_MODE);
337 
338 	return readl_poll_timeout(ioaddr + MTL_RXP_CONTROL_STATUS, val,
339 			val & RXPI, 1, 10000);
340 }
341 
dwmac5_rxp_enable(void __iomem * ioaddr)342 static void dwmac5_rxp_enable(void __iomem *ioaddr)
343 {
344 	u32 val;
345 
346 	val = readl(ioaddr + MTL_OPERATION_MODE);
347 	val |= MTL_FRPE;
348 	writel(val, ioaddr + MTL_OPERATION_MODE);
349 }
350 
dwmac5_rxp_update_single_entry(void __iomem * ioaddr,struct stmmac_tc_entry * entry,int pos)351 static int dwmac5_rxp_update_single_entry(void __iomem *ioaddr,
352 					  struct stmmac_tc_entry *entry,
353 					  int pos)
354 {
355 	int ret, i;
356 
357 	for (i = 0; i < (sizeof(entry->val) / sizeof(u32)); i++) {
358 		int real_pos = pos * (sizeof(entry->val) / sizeof(u32)) + i;
359 		u32 val;
360 
361 		/* Wait for ready */
362 		ret = readl_poll_timeout(ioaddr + MTL_RXP_IACC_CTRL_STATUS,
363 				val, !(val & STARTBUSY), 1, 10000);
364 		if (ret)
365 			return ret;
366 
367 		/* Write data */
368 		val = *((u32 *)&entry->val + i);
369 		writel(val, ioaddr + MTL_RXP_IACC_DATA);
370 
371 		/* Write pos */
372 		val = real_pos & ADDR;
373 		writel(val, ioaddr + MTL_RXP_IACC_CTRL_STATUS);
374 
375 		/* Write OP */
376 		val |= WRRDN;
377 		writel(val, ioaddr + MTL_RXP_IACC_CTRL_STATUS);
378 
379 		/* Start Write */
380 		val |= STARTBUSY;
381 		writel(val, ioaddr + MTL_RXP_IACC_CTRL_STATUS);
382 
383 		/* Wait for done */
384 		ret = readl_poll_timeout(ioaddr + MTL_RXP_IACC_CTRL_STATUS,
385 				val, !(val & STARTBUSY), 1, 10000);
386 		if (ret)
387 			return ret;
388 	}
389 
390 	return 0;
391 }
392 
393 static struct stmmac_tc_entry *
dwmac5_rxp_get_next_entry(struct stmmac_tc_entry * entries,unsigned int count,u32 curr_prio)394 dwmac5_rxp_get_next_entry(struct stmmac_tc_entry *entries, unsigned int count,
395 			  u32 curr_prio)
396 {
397 	struct stmmac_tc_entry *entry;
398 	u32 min_prio = ~0x0;
399 	int i, min_prio_idx;
400 	bool found = false;
401 
402 	for (i = count - 1; i >= 0; i--) {
403 		entry = &entries[i];
404 
405 		/* Do not update unused entries */
406 		if (!entry->in_use)
407 			continue;
408 		/* Do not update already updated entries (i.e. fragments) */
409 		if (entry->in_hw)
410 			continue;
411 		/* Let last entry be updated last */
412 		if (entry->is_last)
413 			continue;
414 		/* Do not return fragments */
415 		if (entry->is_frag)
416 			continue;
417 		/* Check if we already checked this prio */
418 		if (entry->prio < curr_prio)
419 			continue;
420 		/* Check if this is the minimum prio */
421 		if (entry->prio < min_prio) {
422 			min_prio = entry->prio;
423 			min_prio_idx = i;
424 			found = true;
425 		}
426 	}
427 
428 	if (found)
429 		return &entries[min_prio_idx];
430 	return NULL;
431 }
432 
dwmac5_rxp_config(void __iomem * ioaddr,struct stmmac_tc_entry * entries,unsigned int count)433 int dwmac5_rxp_config(void __iomem *ioaddr, struct stmmac_tc_entry *entries,
434 		      unsigned int count)
435 {
436 	struct stmmac_tc_entry *entry, *frag;
437 	int i, ret, nve = 0;
438 	u32 curr_prio = 0;
439 	u32 old_val, val;
440 
441 	/* Force disable RX */
442 	old_val = readl(ioaddr + GMAC_CONFIG);
443 	val = old_val & ~GMAC_CONFIG_RE;
444 	writel(val, ioaddr + GMAC_CONFIG);
445 
446 	/* Disable RX Parser */
447 	ret = dwmac5_rxp_disable(ioaddr);
448 	if (ret)
449 		goto re_enable;
450 
451 	/* Set all entries as NOT in HW */
452 	for (i = 0; i < count; i++) {
453 		entry = &entries[i];
454 		entry->in_hw = false;
455 	}
456 
457 	/* Update entries by reverse order */
458 	while (1) {
459 		entry = dwmac5_rxp_get_next_entry(entries, count, curr_prio);
460 		if (!entry)
461 			break;
462 
463 		curr_prio = entry->prio;
464 		frag = entry->frag_ptr;
465 
466 		/* Set special fragment requirements */
467 		if (frag) {
468 			entry->val.af = 0;
469 			entry->val.rf = 0;
470 			entry->val.nc = 1;
471 			entry->val.ok_index = nve + 2;
472 		}
473 
474 		ret = dwmac5_rxp_update_single_entry(ioaddr, entry, nve);
475 		if (ret)
476 			goto re_enable;
477 
478 		entry->table_pos = nve++;
479 		entry->in_hw = true;
480 
481 		if (frag && !frag->in_hw) {
482 			ret = dwmac5_rxp_update_single_entry(ioaddr, frag, nve);
483 			if (ret)
484 				goto re_enable;
485 			frag->table_pos = nve++;
486 			frag->in_hw = true;
487 		}
488 	}
489 
490 	if (!nve)
491 		goto re_enable;
492 
493 	/* Update all pass entry */
494 	for (i = 0; i < count; i++) {
495 		entry = &entries[i];
496 		if (!entry->is_last)
497 			continue;
498 
499 		ret = dwmac5_rxp_update_single_entry(ioaddr, entry, nve);
500 		if (ret)
501 			goto re_enable;
502 
503 		entry->table_pos = nve++;
504 	}
505 
506 	/* Assume n. of parsable entries == n. of valid entries */
507 	val = (nve << 16) & NPE;
508 	val |= nve & NVE;
509 	writel(val, ioaddr + MTL_RXP_CONTROL_STATUS);
510 
511 	/* Enable RX Parser */
512 	dwmac5_rxp_enable(ioaddr);
513 
514 re_enable:
515 	/* Re-enable RX */
516 	writel(old_val, ioaddr + GMAC_CONFIG);
517 	return ret;
518 }
519 
dwmac5_flex_pps_config(void __iomem * ioaddr,int index,struct stmmac_pps_cfg * cfg,bool enable,u32 sub_second_inc,u32 systime_flags)520 int dwmac5_flex_pps_config(void __iomem *ioaddr, int index,
521 			   struct stmmac_pps_cfg *cfg, bool enable,
522 			   u32 sub_second_inc, u32 systime_flags)
523 {
524 	u32 tnsec = readl(ioaddr + MAC_PPSx_TARGET_TIME_NSEC(index));
525 	u32 val = readl(ioaddr + MAC_PPS_CONTROL);
526 	u64 period;
527 
528 	if (!cfg->available)
529 		return -EINVAL;
530 	if (tnsec & TRGTBUSY0)
531 		return -EBUSY;
532 	if (!sub_second_inc || !systime_flags)
533 		return -EINVAL;
534 
535 	val &= ~PPSx_MASK(index);
536 
537 	if (!enable) {
538 		val |= PPSCMDx(index, 0x5);
539 		val |= PPSEN0;
540 		writel(val, ioaddr + MAC_PPS_CONTROL);
541 		return 0;
542 	}
543 
544 	val |= TRGTMODSELx(index, 0x2);
545 	val |= PPSEN0;
546 	writel(val, ioaddr + MAC_PPS_CONTROL);
547 
548 	writel(cfg->start.tv_sec, ioaddr + MAC_PPSx_TARGET_TIME_SEC(index));
549 
550 	if (!(systime_flags & PTP_TCR_TSCTRLSSR))
551 		cfg->start.tv_nsec = (cfg->start.tv_nsec * 1000) / 465;
552 	writel(cfg->start.tv_nsec, ioaddr + MAC_PPSx_TARGET_TIME_NSEC(index));
553 
554 	period = cfg->period.tv_sec * 1000000000;
555 	period += cfg->period.tv_nsec;
556 
557 	do_div(period, sub_second_inc);
558 
559 	if (period <= 1)
560 		return -EINVAL;
561 
562 	writel(period - 1, ioaddr + MAC_PPSx_INTERVAL(index));
563 
564 	period >>= 1;
565 	if (period <= 1)
566 		return -EINVAL;
567 
568 	writel(period - 1, ioaddr + MAC_PPSx_WIDTH(index));
569 
570 	/* Finally, activate it */
571 	val |= PPSCMDx(index, 0x2);
572 	writel(val, ioaddr + MAC_PPS_CONTROL);
573 	return 0;
574 }
575 
dwmac5_est_write(void __iomem * ioaddr,u32 reg,u32 val,bool gcl)576 static int dwmac5_est_write(void __iomem *ioaddr, u32 reg, u32 val, bool gcl)
577 {
578 	u32 ctrl;
579 
580 	writel(val, ioaddr + MTL_EST_GCL_DATA);
581 
582 	ctrl = (reg << ADDR_SHIFT);
583 	ctrl |= gcl ? 0 : GCRR;
584 
585 	writel(ctrl, ioaddr + MTL_EST_GCL_CONTROL);
586 
587 	ctrl |= SRWO;
588 	writel(ctrl, ioaddr + MTL_EST_GCL_CONTROL);
589 
590 	return readl_poll_timeout(ioaddr + MTL_EST_GCL_CONTROL,
591 				  ctrl, !(ctrl & SRWO), 100, 5000);
592 }
593 
dwmac5_est_configure(void __iomem * ioaddr,struct stmmac_est * cfg,unsigned int ptp_rate)594 int dwmac5_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg,
595 			 unsigned int ptp_rate)
596 {
597 	int i, ret = 0x0;
598 	u32 ctrl;
599 
600 	ret |= dwmac5_est_write(ioaddr, BTR_LOW, cfg->btr[0], false);
601 	ret |= dwmac5_est_write(ioaddr, BTR_HIGH, cfg->btr[1], false);
602 	ret |= dwmac5_est_write(ioaddr, TER, cfg->ter, false);
603 	ret |= dwmac5_est_write(ioaddr, LLR, cfg->gcl_size, false);
604 	ret |= dwmac5_est_write(ioaddr, CTR_LOW, cfg->ctr[0], false);
605 	ret |= dwmac5_est_write(ioaddr, CTR_HIGH, cfg->ctr[1], false);
606 	if (ret)
607 		return ret;
608 
609 	for (i = 0; i < cfg->gcl_size; i++) {
610 		ret = dwmac5_est_write(ioaddr, i, cfg->gcl[i], true);
611 		if (ret)
612 			return ret;
613 	}
614 
615 	ctrl = readl(ioaddr + MTL_EST_CONTROL);
616 	ctrl &= ~PTOV;
617 	ctrl |= ((1000000000 / ptp_rate) * 6) << PTOV_SHIFT;
618 	if (cfg->enable)
619 		ctrl |= EEST | SSWL;
620 	else
621 		ctrl &= ~EEST;
622 
623 	writel(ctrl, ioaddr + MTL_EST_CONTROL);
624 
625 	/* Configure EST interrupt */
626 	if (cfg->enable)
627 		ctrl = (IECGCE | IEHS | IEHF | IEBE | IECC);
628 	else
629 		ctrl = 0;
630 
631 	writel(ctrl, ioaddr + MTL_EST_INT_EN);
632 
633 	return 0;
634 }
635 
dwmac5_est_irq_status(void __iomem * ioaddr,struct net_device * dev,struct stmmac_extra_stats * x,u32 txqcnt)636 void dwmac5_est_irq_status(void __iomem *ioaddr, struct net_device *dev,
637 			  struct stmmac_extra_stats *x, u32 txqcnt)
638 {
639 	u32 status, value, feqn, hbfq, hbfs, btrl;
640 	u32 txqcnt_mask = (1 << txqcnt) - 1;
641 
642 	status = readl(ioaddr + MTL_EST_STATUS);
643 
644 	value = (CGCE | HLBS | HLBF | BTRE | SWLC);
645 
646 	/* Return if there is no error */
647 	if (!(status & value))
648 		return;
649 
650 	if (status & CGCE) {
651 		/* Clear Interrupt */
652 		writel(CGCE, ioaddr + MTL_EST_STATUS);
653 
654 		x->mtl_est_cgce++;
655 	}
656 
657 	if (status & HLBS) {
658 		value = readl(ioaddr + MTL_EST_SCH_ERR);
659 		value &= txqcnt_mask;
660 
661 		x->mtl_est_hlbs++;
662 
663 		/* Clear Interrupt */
664 		writel(value, ioaddr + MTL_EST_SCH_ERR);
665 
666 		/* Collecting info to shows all the queues that has HLBS
667 		 * issue. The only way to clear this is to clear the
668 		 * statistic
669 		 */
670 		if (net_ratelimit())
671 			netdev_err(dev, "EST: HLB(sched) Queue 0x%x\n", value);
672 	}
673 
674 	if (status & HLBF) {
675 		value = readl(ioaddr + MTL_EST_FRM_SZ_ERR);
676 		feqn = value & txqcnt_mask;
677 
678 		value = readl(ioaddr + MTL_EST_FRM_SZ_CAP);
679 		hbfq = (value & SZ_CAP_HBFQ_MASK(txqcnt)) >> SZ_CAP_HBFQ_SHIFT;
680 		hbfs = value & SZ_CAP_HBFS_MASK;
681 
682 		x->mtl_est_hlbf++;
683 
684 		/* Clear Interrupt */
685 		writel(feqn, ioaddr + MTL_EST_FRM_SZ_ERR);
686 
687 		if (net_ratelimit())
688 			netdev_err(dev, "EST: HLB(size) Queue %u Size %u\n",
689 				   hbfq, hbfs);
690 	}
691 
692 	if (status & BTRE) {
693 		if ((status & BTRL) == BTRL_MAX)
694 			x->mtl_est_btrlm++;
695 		else
696 			x->mtl_est_btre++;
697 
698 		btrl = (status & BTRL) >> BTRL_SHIFT;
699 
700 		if (net_ratelimit())
701 			netdev_info(dev, "EST: BTR Error Loop Count %u\n",
702 				    btrl);
703 
704 		writel(BTRE, ioaddr + MTL_EST_STATUS);
705 	}
706 
707 	if (status & SWLC) {
708 		writel(SWLC, ioaddr + MTL_EST_STATUS);
709 		netdev_info(dev, "EST: SWOL has been switched\n");
710 	}
711 }
712 
dwmac5_fpe_configure(void __iomem * ioaddr,struct stmmac_fpe_cfg * cfg,u32 num_txq,u32 num_rxq,bool enable)713 void dwmac5_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
714 			  u32 num_txq, u32 num_rxq,
715 			  bool enable)
716 {
717 	u32 value;
718 
719 	if (enable) {
720 		cfg->fpe_csr = EFPE;
721 		value = readl(ioaddr + GMAC_RXQ_CTRL1);
722 		value &= ~GMAC_RXQCTRL_FPRQ;
723 		value |= (num_rxq - 1) << GMAC_RXQCTRL_FPRQ_SHIFT;
724 		writel(value, ioaddr + GMAC_RXQ_CTRL1);
725 	} else {
726 		cfg->fpe_csr = 0;
727 	}
728 	writel(cfg->fpe_csr, ioaddr + MAC_FPE_CTRL_STS);
729 }
730 
dwmac5_fpe_irq_status(void __iomem * ioaddr,struct net_device * dev)731 int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev)
732 {
733 	u32 value;
734 	int status;
735 
736 	status = FPE_EVENT_UNKNOWN;
737 
738 	/* Reads from the MAC_FPE_CTRL_STS register should only be performed
739 	 * here, since the status flags of MAC_FPE_CTRL_STS are "clear on read"
740 	 */
741 	value = readl(ioaddr + MAC_FPE_CTRL_STS);
742 
743 	if (value & TRSP) {
744 		status |= FPE_EVENT_TRSP;
745 		netdev_info(dev, "FPE: Respond mPacket is transmitted\n");
746 	}
747 
748 	if (value & TVER) {
749 		status |= FPE_EVENT_TVER;
750 		netdev_info(dev, "FPE: Verify mPacket is transmitted\n");
751 	}
752 
753 	if (value & RRSP) {
754 		status |= FPE_EVENT_RRSP;
755 		netdev_info(dev, "FPE: Respond mPacket is received\n");
756 	}
757 
758 	if (value & RVER) {
759 		status |= FPE_EVENT_RVER;
760 		netdev_info(dev, "FPE: Verify mPacket is received\n");
761 	}
762 
763 	return status;
764 }
765 
dwmac5_fpe_send_mpacket(void __iomem * ioaddr,struct stmmac_fpe_cfg * cfg,enum stmmac_mpacket_type type)766 void dwmac5_fpe_send_mpacket(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
767 			     enum stmmac_mpacket_type type)
768 {
769 	u32 value = cfg->fpe_csr;
770 
771 	if (type == MPACKET_VERIFY)
772 		value |= SVER;
773 	else if (type == MPACKET_RESPONSE)
774 		value |= SRSP;
775 
776 	writel(value, ioaddr + MAC_FPE_CTRL_STS);
777 }
778