1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /* Copyright 2019 NXP */
3 
4 #include "enetc.h"
5 
6 #include <net/pkt_sched.h>
7 #include <linux/math64.h>
8 
9 static u16 enetc_get_max_gcl_len(struct enetc_hw *hw)
10 {
11 	return enetc_rd(hw, ENETC_QBV_PTGCAPR_OFFSET)
12 		& ENETC_QBV_MAX_GCL_LEN_MASK;
13 }
14 
15 void enetc_sched_speed_set(struct net_device *ndev)
16 {
17 	struct enetc_ndev_priv *priv = netdev_priv(ndev);
18 	struct phy_device *phydev = ndev->phydev;
19 	u32 old_speed = priv->speed;
20 	u32 speed, pspeed;
21 
22 	if (phydev->speed == old_speed)
23 		return;
24 
25 	speed = phydev->speed;
26 	switch (speed) {
27 	case SPEED_1000:
28 		pspeed = ENETC_PMR_PSPEED_1000M;
29 		break;
30 	case SPEED_2500:
31 		pspeed = ENETC_PMR_PSPEED_2500M;
32 		break;
33 	case SPEED_100:
34 		pspeed = ENETC_PMR_PSPEED_100M;
35 		break;
36 	case SPEED_10:
37 	default:
38 		pspeed = ENETC_PMR_PSPEED_10M;
39 	}
40 
41 	priv->speed = speed;
42 	enetc_port_wr(&priv->si->hw, ENETC_PMR,
43 		      (enetc_port_rd(&priv->si->hw, ENETC_PMR)
44 		      & (~ENETC_PMR_PSPEED_MASK))
45 		      | pspeed);
46 }
47 
48 static int enetc_setup_taprio(struct net_device *ndev,
49 			      struct tc_taprio_qopt_offload *admin_conf)
50 {
51 	struct enetc_ndev_priv *priv = netdev_priv(ndev);
52 	struct enetc_cbd cbd = {.cmd = 0};
53 	struct tgs_gcl_conf *gcl_config;
54 	struct tgs_gcl_data *gcl_data;
55 	struct gce *gce;
56 	dma_addr_t dma;
57 	u16 data_size;
58 	u16 gcl_len;
59 	u32 tge;
60 	int err;
61 	int i;
62 
63 	if (admin_conf->num_entries > enetc_get_max_gcl_len(&priv->si->hw))
64 		return -EINVAL;
65 	gcl_len = admin_conf->num_entries;
66 
67 	tge = enetc_rd(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET);
68 	if (!admin_conf->enable) {
69 		enetc_wr(&priv->si->hw,
70 			 ENETC_QBV_PTGCR_OFFSET,
71 			 tge & (~ENETC_QBV_TGE));
72 		return 0;
73 	}
74 
75 	if (admin_conf->cycle_time > U32_MAX ||
76 	    admin_conf->cycle_time_extension > U32_MAX)
77 		return -EINVAL;
78 
79 	/* Configure the (administrative) gate control list using the
80 	 * control BD descriptor.
81 	 */
82 	gcl_config = &cbd.gcl_conf;
83 
84 	data_size = struct_size(gcl_data, entry, gcl_len);
85 	gcl_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL);
86 	if (!gcl_data)
87 		return -ENOMEM;
88 
89 	gce = (struct gce *)(gcl_data + 1);
90 
91 	/* Set all gates open as default */
92 	gcl_config->atc = 0xff;
93 	gcl_config->acl_len = cpu_to_le16(gcl_len);
94 
95 	if (!admin_conf->base_time) {
96 		gcl_data->btl =
97 			cpu_to_le32(enetc_rd(&priv->si->hw, ENETC_SICTR0));
98 		gcl_data->bth =
99 			cpu_to_le32(enetc_rd(&priv->si->hw, ENETC_SICTR1));
100 	} else {
101 		gcl_data->btl =
102 			cpu_to_le32(lower_32_bits(admin_conf->base_time));
103 		gcl_data->bth =
104 			cpu_to_le32(upper_32_bits(admin_conf->base_time));
105 	}
106 
107 	gcl_data->ct = cpu_to_le32(admin_conf->cycle_time);
108 	gcl_data->cte = cpu_to_le32(admin_conf->cycle_time_extension);
109 
110 	for (i = 0; i < gcl_len; i++) {
111 		struct tc_taprio_sched_entry *temp_entry;
112 		struct gce *temp_gce = gce + i;
113 
114 		temp_entry = &admin_conf->entries[i];
115 
116 		temp_gce->gate = (u8)temp_entry->gate_mask;
117 		temp_gce->period = cpu_to_le32(temp_entry->interval);
118 	}
119 
120 	cbd.length = cpu_to_le16(data_size);
121 	cbd.status_flags = 0;
122 
123 	dma = dma_map_single(&priv->si->pdev->dev, gcl_data,
124 			     data_size, DMA_TO_DEVICE);
125 	if (dma_mapping_error(&priv->si->pdev->dev, dma)) {
126 		netdev_err(priv->si->ndev, "DMA mapping failed!\n");
127 		kfree(gcl_data);
128 		return -ENOMEM;
129 	}
130 
131 	cbd.addr[0] = lower_32_bits(dma);
132 	cbd.addr[1] = upper_32_bits(dma);
133 	cbd.cls = BDCR_CMD_PORT_GCL;
134 	cbd.status_flags = 0;
135 
136 	enetc_wr(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET,
137 		 tge | ENETC_QBV_TGE);
138 
139 	err = enetc_send_cmd(priv->si, &cbd);
140 	if (err)
141 		enetc_wr(&priv->si->hw,
142 			 ENETC_QBV_PTGCR_OFFSET,
143 			 tge & (~ENETC_QBV_TGE));
144 
145 	dma_unmap_single(&priv->si->pdev->dev, dma, data_size, DMA_TO_DEVICE);
146 	kfree(gcl_data);
147 
148 	return err;
149 }
150 
151 int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data)
152 {
153 	struct tc_taprio_qopt_offload *taprio = type_data;
154 	struct enetc_ndev_priv *priv = netdev_priv(ndev);
155 	int err;
156 	int i;
157 
158 	/* TSD and Qbv are mutually exclusive in hardware */
159 	for (i = 0; i < priv->num_tx_rings; i++)
160 		if (priv->tx_ring[i]->tsd_enable)
161 			return -EBUSY;
162 
163 	for (i = 0; i < priv->num_tx_rings; i++)
164 		enetc_set_bdr_prio(&priv->si->hw,
165 				   priv->tx_ring[i]->index,
166 				   taprio->enable ? i : 0);
167 
168 	err = enetc_setup_taprio(ndev, taprio);
169 
170 	if (err)
171 		for (i = 0; i < priv->num_tx_rings; i++)
172 			enetc_set_bdr_prio(&priv->si->hw,
173 					   priv->tx_ring[i]->index,
174 					   taprio->enable ? 0 : i);
175 
176 	return err;
177 }
178 
179 static u32 enetc_get_cbs_enable(struct enetc_hw *hw, u8 tc)
180 {
181 	return enetc_port_rd(hw, ENETC_PTCCBSR0(tc)) & ENETC_CBSE;
182 }
183 
184 static u8 enetc_get_cbs_bw(struct enetc_hw *hw, u8 tc)
185 {
186 	return enetc_port_rd(hw, ENETC_PTCCBSR0(tc)) & ENETC_CBS_BW_MASK;
187 }
188 
189 int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data)
190 {
191 	struct enetc_ndev_priv *priv = netdev_priv(ndev);
192 	struct tc_cbs_qopt_offload *cbs = type_data;
193 	u32 port_transmit_rate = priv->speed;
194 	u8 tc_nums = netdev_get_num_tc(ndev);
195 	struct enetc_si *si = priv->si;
196 	u32 hi_credit_bit, hi_credit_reg;
197 	u32 max_interference_size;
198 	u32 port_frame_max_size;
199 	u8 tc = cbs->queue;
200 	u8 prio_top, prio_next;
201 	int bw_sum = 0;
202 	u8 bw;
203 
204 	prio_top = netdev_get_prio_tc_map(ndev, tc_nums - 1);
205 	prio_next = netdev_get_prio_tc_map(ndev, tc_nums - 2);
206 
207 	/* Support highest prio and second prio tc in cbs mode */
208 	if (tc != prio_top && tc != prio_next)
209 		return -EOPNOTSUPP;
210 
211 	if (!cbs->enable) {
212 		/* Make sure the other TC that are numerically
213 		 * lower than this TC have been disabled.
214 		 */
215 		if (tc == prio_top &&
216 		    enetc_get_cbs_enable(&si->hw, prio_next)) {
217 			dev_err(&ndev->dev,
218 				"Disable TC%d before disable TC%d\n",
219 				prio_next, tc);
220 			return -EINVAL;
221 		}
222 
223 		enetc_port_wr(&si->hw, ENETC_PTCCBSR1(tc), 0);
224 		enetc_port_wr(&si->hw, ENETC_PTCCBSR0(tc), 0);
225 
226 		return 0;
227 	}
228 
229 	if (cbs->idleslope - cbs->sendslope != port_transmit_rate * 1000L ||
230 	    cbs->idleslope < 0 || cbs->sendslope > 0)
231 		return -EOPNOTSUPP;
232 
233 	port_frame_max_size = ndev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
234 
235 	bw = cbs->idleslope / (port_transmit_rate * 10UL);
236 
237 	/* Make sure the other TC that are numerically
238 	 * higher than this TC have been enabled.
239 	 */
240 	if (tc == prio_next) {
241 		if (!enetc_get_cbs_enable(&si->hw, prio_top)) {
242 			dev_err(&ndev->dev,
243 				"Enable TC%d first before enable TC%d\n",
244 				prio_top, prio_next);
245 			return -EINVAL;
246 		}
247 		bw_sum += enetc_get_cbs_bw(&si->hw, prio_top);
248 	}
249 
250 	if (bw_sum + bw >= 100) {
251 		dev_err(&ndev->dev,
252 			"The sum of all CBS Bandwidth can't exceed 100\n");
253 		return -EINVAL;
254 	}
255 
256 	enetc_port_rd(&si->hw, ENETC_PTCMSDUR(tc));
257 
258 	/* For top prio TC, the max_interfrence_size is maxSizedFrame.
259 	 *
260 	 * For next prio TC, the max_interfrence_size is calculated as below:
261 	 *
262 	 *      max_interference_size = M0 + Ma + Ra * M0 / (R0 - Ra)
263 	 *
264 	 *	- RA: idleSlope for AVB Class A
265 	 *	- R0: port transmit rate
266 	 *	- M0: maximum sized frame for the port
267 	 *	- MA: maximum sized frame for AVB Class A
268 	 */
269 
270 	if (tc == prio_top) {
271 		max_interference_size = port_frame_max_size * 8;
272 	} else {
273 		u32 m0, ma, r0, ra;
274 
275 		m0 = port_frame_max_size * 8;
276 		ma = enetc_port_rd(&si->hw, ENETC_PTCMSDUR(prio_top)) * 8;
277 		ra = enetc_get_cbs_bw(&si->hw, prio_top) *
278 			port_transmit_rate * 10000ULL;
279 		r0 = port_transmit_rate * 1000000ULL;
280 		max_interference_size = m0 + ma +
281 			(u32)div_u64((u64)ra * m0, r0 - ra);
282 	}
283 
284 	/* hiCredit bits calculate by:
285 	 *
286 	 * maxSizedFrame * (idleSlope/portTxRate)
287 	 */
288 	hi_credit_bit = max_interference_size * bw / 100;
289 
290 	/* hiCredit bits to hiCredit register need to calculated as:
291 	 *
292 	 * (enetClockFrequency / portTransmitRate) * 100
293 	 */
294 	hi_credit_reg = (u32)div_u64((ENETC_CLK * 100ULL) * hi_credit_bit,
295 				     port_transmit_rate * 1000000ULL);
296 
297 	enetc_port_wr(&si->hw, ENETC_PTCCBSR1(tc), hi_credit_reg);
298 
299 	/* Set bw register and enable this traffic class */
300 	enetc_port_wr(&si->hw, ENETC_PTCCBSR0(tc), bw | ENETC_CBSE);
301 
302 	return 0;
303 }
304 
305 int enetc_setup_tc_txtime(struct net_device *ndev, void *type_data)
306 {
307 	struct enetc_ndev_priv *priv = netdev_priv(ndev);
308 	struct tc_etf_qopt_offload *qopt = type_data;
309 	u8 tc_nums = netdev_get_num_tc(ndev);
310 	int tc;
311 
312 	if (!tc_nums)
313 		return -EOPNOTSUPP;
314 
315 	tc = qopt->queue;
316 
317 	if (tc < 0 || tc >= priv->num_tx_rings)
318 		return -EINVAL;
319 
320 	/* Do not support TXSTART and TX CSUM offload simutaniously */
321 	if (ndev->features & NETIF_F_CSUM_MASK)
322 		return -EBUSY;
323 
324 	/* TSD and Qbv are mutually exclusive in hardware */
325 	if (enetc_rd(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET) & ENETC_QBV_TGE)
326 		return -EBUSY;
327 
328 	priv->tx_ring[tc]->tsd_enable = qopt->enable;
329 	enetc_port_wr(&priv->si->hw, ENETC_PTCTSDR(tc),
330 		      qopt->enable ? ENETC_TSDE : 0);
331 
332 	return 0;
333 }
334