1 // SPDX-License-Identifier: (GPL-2.0+ OR MIT)
2 /*
3  * Copyright (c) 2018 Microsemi Corporation
4  */
5 
6 #include <linux/io.h>
7 #include "mscc_xfer.h"
8 
9 #define QS_XTR_FLUSH_FLUSH		GENMASK(1, 0)
10 #define QS_INJ_CTRL_GAP_SIZE(x)		((x) << 21)
11 #define QS_INJ_CTRL_EOF			BIT(19)
12 #define QS_INJ_CTRL_SOF			BIT(18)
13 #define QS_INJ_CTRL_VLD_BYTES(x)	((x) << 16)
14 
15 #define XTR_EOF_0     ntohl(0x80000000u)
16 #define XTR_EOF_1     ntohl(0x80000001u)
17 #define XTR_EOF_2     ntohl(0x80000002u)
18 #define XTR_EOF_3     ntohl(0x80000003u)
19 #define XTR_PRUNED    ntohl(0x80000004u)
20 #define XTR_ABORT     ntohl(0x80000005u)
21 #define XTR_ESCAPE    ntohl(0x80000006u)
22 #define XTR_NOT_READY ntohl(0x80000007u)
23 
24 #define BUF_CELL_SZ		60
25 #define XTR_VALID_BYTES(x)	(4 - ((x) & 3))
26 
mscc_send(void __iomem * regs,const unsigned long * mscc_qs_offset,u32 * ifh,size_t ifh_len,u32 * buff,size_t buff_len)27 int mscc_send(void __iomem *regs, const unsigned long *mscc_qs_offset,
28 	      u32 *ifh, size_t ifh_len, u32 *buff, size_t buff_len)
29 {
30 	int i, count = (buff_len + 3) / 4, last = buff_len % 4;
31 
32 	writel(QS_INJ_CTRL_GAP_SIZE(1) | QS_INJ_CTRL_SOF,
33 	       regs + mscc_qs_offset[MSCC_QS_INJ_CTRL]);
34 
35 	for (i = 0; i < ifh_len; i++)
36 		writel(ifh[i], regs + mscc_qs_offset[MSCC_QS_INJ_WR]);
37 
38 	for (i = 0; i < count; i++)
39 		writel(buff[i], regs + mscc_qs_offset[MSCC_QS_INJ_WR]);
40 
41 	/* Add padding */
42 	while (i < (BUF_CELL_SZ / 4)) {
43 		writel(0, regs + mscc_qs_offset[MSCC_QS_INJ_WR]);
44 		i++;
45 	}
46 
47 	/* Indicate EOF and valid bytes in last word */
48 	writel(QS_INJ_CTRL_GAP_SIZE(1) |
49 	       QS_INJ_CTRL_VLD_BYTES(buff_len < BUF_CELL_SZ ? 0 : last) |
50 	       QS_INJ_CTRL_EOF, regs + mscc_qs_offset[MSCC_QS_INJ_CTRL]);
51 
52 	/* Add dummy CRC */
53 	writel(0, regs + mscc_qs_offset[MSCC_QS_INJ_WR]);
54 
55 	return 0;
56 }
57 
mscc_recv(void __iomem * regs,const unsigned long * mscc_qs_offset,u32 * rxbuf,size_t ifh_len,bool byte_swap)58 int mscc_recv(void __iomem *regs, const unsigned long *mscc_qs_offset,
59 	      u32 *rxbuf, size_t ifh_len, bool byte_swap)
60 {
61 	u8 grp = 0; /* Recv everything on CPU group 0 */
62 	int i, byte_cnt = 0;
63 	bool eof_flag = false, pruned_flag = false, abort_flag = false;
64 
65 	if (!(readl(regs + mscc_qs_offset[MSCC_QS_XTR_DATA_PRESENT]) &
66 	      BIT(grp)))
67 		return -EAGAIN;
68 
69 	/* skip IFH */
70 	for (i = 0; i < ifh_len; i++)
71 		readl(regs + mscc_qs_offset[MSCC_QS_XTR_RD]);
72 
73 	while (!eof_flag) {
74 		u32 val = readl(regs + mscc_qs_offset[MSCC_QS_XTR_RD]);
75 		u32 cmp = val;
76 
77 		if (byte_swap)
78 			cmp = ntohl(val);
79 
80 		switch (cmp) {
81 		case XTR_NOT_READY:
82 			debug("%d NOT_READY...?\n", byte_cnt);
83 			break;
84 		case XTR_ABORT:
85 			*rxbuf = readl(regs + mscc_qs_offset[MSCC_QS_XTR_RD]);
86 			abort_flag = true;
87 			eof_flag = true;
88 			debug("XTR_ABORT\n");
89 			break;
90 		case XTR_EOF_0:
91 		case XTR_EOF_1:
92 		case XTR_EOF_2:
93 		case XTR_EOF_3:
94 			byte_cnt += XTR_VALID_BYTES(val);
95 			*rxbuf = readl(regs + mscc_qs_offset[MSCC_QS_XTR_RD]);
96 			eof_flag = true;
97 			debug("EOF\n");
98 			break;
99 		case XTR_PRUNED:
100 			/* But get the last 4 bytes as well */
101 			eof_flag = true;
102 			pruned_flag = true;
103 			debug("PRUNED\n");
104 			/* fallthrough */
105 		case XTR_ESCAPE:
106 			*rxbuf = readl(regs + mscc_qs_offset[MSCC_QS_XTR_RD]);
107 			byte_cnt += 4;
108 			rxbuf++;
109 			debug("ESCAPED\n");
110 			break;
111 		default:
112 			*rxbuf = val;
113 			byte_cnt += 4;
114 			rxbuf++;
115 		}
116 	}
117 
118 	if (abort_flag || pruned_flag || !eof_flag) {
119 		debug("Discarded frame: abort:%d pruned:%d eof:%d\n",
120 		      abort_flag, pruned_flag, eof_flag);
121 		return -EAGAIN;
122 	}
123 
124 	return byte_cnt;
125 }
126 
mscc_flush(void __iomem * regs,const unsigned long * mscc_qs_offset)127 void mscc_flush(void __iomem *regs, const unsigned long *mscc_qs_offset)
128 {
129 	/* All Queues flush */
130 	setbits_le32(regs + mscc_qs_offset[MSCC_QS_XTR_FLUSH],
131 		     QS_XTR_FLUSH_FLUSH);
132 
133 	/* Allow to drain */
134 	mdelay(1);
135 
136 	/* All Queues normal */
137 	clrbits_le32(regs + mscc_qs_offset[MSCC_QS_XTR_FLUSH],
138 		     QS_XTR_FLUSH_FLUSH);
139 }
140