1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Atlantic Network Driver
3  *
4  * Copyright (C) 2014-2019 aQuantia Corporation
5  * Copyright (C) 2019-2020 Marvell International Ltd.
6  */
7 
8 /* File aq_main.c: Main file for aQuantia Linux driver. */
9 
10 #include "aq_main.h"
11 #include "aq_nic.h"
12 #include "aq_pci_func.h"
13 #include "aq_ethtool.h"
14 #include "aq_ptp.h"
15 #include "aq_filters.h"
16 #include "aq_hw_utils.h"
17 #include "aq_vec.h"
18 
19 #include <linux/netdevice.h>
20 #include <linux/module.h>
21 #include <linux/ip.h>
22 #include <linux/udp.h>
23 #include <net/pkt_cls.h>
24 #include <linux/filter.h>
25 
26 MODULE_LICENSE("GPL v2");
27 MODULE_AUTHOR(AQ_CFG_DRV_AUTHOR);
28 MODULE_DESCRIPTION(AQ_CFG_DRV_DESC);
29 
30 DEFINE_STATIC_KEY_FALSE(aq_xdp_locking_key);
31 EXPORT_SYMBOL(aq_xdp_locking_key);
32 
33 static const char aq_ndev_driver_name[] = AQ_CFG_DRV_NAME;
34 
35 static const struct net_device_ops aq_ndev_ops;
36 
37 static struct workqueue_struct *aq_ndev_wq;
38 
39 void aq_ndev_schedule_work(struct work_struct *work)
40 {
41 	queue_work(aq_ndev_wq, work);
42 }
43 
44 struct net_device *aq_ndev_alloc(void)
45 {
46 	struct net_device *ndev = NULL;
47 	struct aq_nic_s *aq_nic = NULL;
48 
49 	ndev = alloc_etherdev_mq(sizeof(struct aq_nic_s), AQ_HW_QUEUES_MAX);
50 	if (!ndev)
51 		return NULL;
52 
53 	aq_nic = netdev_priv(ndev);
54 	aq_nic->ndev = ndev;
55 	ndev->netdev_ops = &aq_ndev_ops;
56 	ndev->ethtool_ops = &aq_ethtool_ops;
57 
58 	return ndev;
59 }
60 
61 static int aq_ndev_open(struct net_device *ndev)
62 {
63 	struct aq_nic_s *aq_nic = netdev_priv(ndev);
64 	int err = 0;
65 
66 	err = aq_nic_init(aq_nic);
67 	if (err < 0)
68 		goto err_exit;
69 
70 	err = aq_reapply_rxnfc_all_rules(aq_nic);
71 	if (err < 0)
72 		goto err_exit;
73 
74 	err = aq_filters_vlans_update(aq_nic);
75 	if (err < 0)
76 		goto err_exit;
77 
78 	err = aq_nic_start(aq_nic);
79 	if (err < 0) {
80 		aq_nic_stop(aq_nic);
81 		goto err_exit;
82 	}
83 
84 err_exit:
85 	if (err < 0)
86 		aq_nic_deinit(aq_nic, true);
87 
88 	return err;
89 }
90 
91 static int aq_ndev_close(struct net_device *ndev)
92 {
93 	struct aq_nic_s *aq_nic = netdev_priv(ndev);
94 	int err = 0;
95 
96 	err = aq_nic_stop(aq_nic);
97 	if (err < 0)
98 		goto err_exit;
99 	aq_nic_deinit(aq_nic, true);
100 
101 err_exit:
102 	return err;
103 }
104 
105 static netdev_tx_t aq_ndev_start_xmit(struct sk_buff *skb, struct net_device *ndev)
106 {
107 	struct aq_nic_s *aq_nic = netdev_priv(ndev);
108 
109 #if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
110 	if (unlikely(aq_utils_obj_test(&aq_nic->flags, AQ_NIC_PTP_DPATH_UP))) {
111 		/* Hardware adds the Timestamp for PTPv2 802.AS1
112 		 * and PTPv2 IPv4 UDP.
113 		 * We have to push even general 320 port messages to the ptp
114 		 * queue explicitly. This is a limitation of current firmware
115 		 * and hardware PTP design of the chip. Otherwise ptp stream
116 		 * will fail to sync
117 		 */
118 		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ||
119 		    unlikely((ip_hdr(skb)->version == 4) &&
120 			     (ip_hdr(skb)->protocol == IPPROTO_UDP) &&
121 			     ((udp_hdr(skb)->dest == htons(319)) ||
122 			      (udp_hdr(skb)->dest == htons(320)))) ||
123 		    unlikely(eth_hdr(skb)->h_proto == htons(ETH_P_1588)))
124 			return aq_ptp_xmit(aq_nic, skb);
125 	}
126 #endif
127 
128 	skb_tx_timestamp(skb);
129 	return aq_nic_xmit(aq_nic, skb);
130 }
131 
132 static int aq_ndev_change_mtu(struct net_device *ndev, int new_mtu)
133 {
134 	int new_frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN;
135 	struct aq_nic_s *aq_nic = netdev_priv(ndev);
136 	struct bpf_prog *prog;
137 	int err;
138 
139 	prog = READ_ONCE(aq_nic->xdp_prog);
140 	if (prog && !prog->aux->xdp_has_frags &&
141 	    new_frame_size > AQ_CFG_RX_FRAME_MAX) {
142 		netdev_err(ndev, "Illegal MTU %d for XDP prog without frags\n",
143 			   ndev->mtu);
144 		return -EOPNOTSUPP;
145 	}
146 
147 	err = aq_nic_set_mtu(aq_nic, new_mtu + ETH_HLEN);
148 
149 	if (err < 0)
150 		goto err_exit;
151 	ndev->mtu = new_mtu;
152 
153 err_exit:
154 	return err;
155 }
156 
157 static int aq_ndev_set_features(struct net_device *ndev,
158 				netdev_features_t features)
159 {
160 	bool is_vlan_tx_insert = !!(features & NETIF_F_HW_VLAN_CTAG_TX);
161 	bool is_vlan_rx_strip = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
162 	struct aq_nic_s *aq_nic = netdev_priv(ndev);
163 	bool need_ndev_restart = false;
164 	struct aq_nic_cfg_s *aq_cfg;
165 	bool is_lro = false;
166 	int err = 0;
167 
168 	aq_cfg = aq_nic_get_cfg(aq_nic);
169 
170 	if (!(features & NETIF_F_NTUPLE)) {
171 		if (aq_nic->ndev->features & NETIF_F_NTUPLE) {
172 			err = aq_clear_rxnfc_all_rules(aq_nic);
173 			if (unlikely(err))
174 				goto err_exit;
175 		}
176 	}
177 	if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER)) {
178 		if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) {
179 			err = aq_filters_vlan_offload_off(aq_nic);
180 			if (unlikely(err))
181 				goto err_exit;
182 		}
183 	}
184 
185 	aq_cfg->features = features;
186 
187 	if (aq_cfg->aq_hw_caps->hw_features & NETIF_F_LRO) {
188 		is_lro = features & NETIF_F_LRO;
189 
190 		if (aq_cfg->is_lro != is_lro) {
191 			aq_cfg->is_lro = is_lro;
192 			need_ndev_restart = true;
193 		}
194 	}
195 
196 	if ((aq_nic->ndev->features ^ features) & NETIF_F_RXCSUM) {
197 		err = aq_nic->aq_hw_ops->hw_set_offload(aq_nic->aq_hw,
198 							aq_cfg);
199 
200 		if (unlikely(err))
201 			goto err_exit;
202 	}
203 
204 	if (aq_cfg->is_vlan_rx_strip != is_vlan_rx_strip) {
205 		aq_cfg->is_vlan_rx_strip = is_vlan_rx_strip;
206 		need_ndev_restart = true;
207 	}
208 	if (aq_cfg->is_vlan_tx_insert != is_vlan_tx_insert) {
209 		aq_cfg->is_vlan_tx_insert = is_vlan_tx_insert;
210 		need_ndev_restart = true;
211 	}
212 
213 	if (need_ndev_restart && netif_running(ndev)) {
214 		aq_ndev_close(ndev);
215 		aq_ndev_open(ndev);
216 	}
217 
218 err_exit:
219 	return err;
220 }
221 
222 static netdev_features_t aq_ndev_fix_features(struct net_device *ndev,
223 					      netdev_features_t features)
224 {
225 	struct aq_nic_s *aq_nic = netdev_priv(ndev);
226 	struct bpf_prog *prog;
227 
228 	if (!(features & NETIF_F_RXCSUM))
229 		features &= ~NETIF_F_LRO;
230 
231 	prog = READ_ONCE(aq_nic->xdp_prog);
232 	if (prog && !prog->aux->xdp_has_frags &&
233 	    aq_nic->xdp_prog && features & NETIF_F_LRO) {
234 		netdev_err(ndev, "LRO is not supported with single buffer XDP, disabling\n");
235 		features &= ~NETIF_F_LRO;
236 	}
237 
238 	return features;
239 }
240 
241 static int aq_ndev_set_mac_address(struct net_device *ndev, void *addr)
242 {
243 	struct aq_nic_s *aq_nic = netdev_priv(ndev);
244 	int err = 0;
245 
246 	err = eth_mac_addr(ndev, addr);
247 	if (err < 0)
248 		goto err_exit;
249 	err = aq_nic_set_mac(aq_nic, ndev);
250 	if (err < 0)
251 		goto err_exit;
252 
253 err_exit:
254 	return err;
255 }
256 
257 static void aq_ndev_set_multicast_settings(struct net_device *ndev)
258 {
259 	struct aq_nic_s *aq_nic = netdev_priv(ndev);
260 
261 	(void)aq_nic_set_multicast_list(aq_nic, ndev);
262 }
263 
264 #if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
265 static int aq_ndev_config_hwtstamp(struct aq_nic_s *aq_nic,
266 				   struct hwtstamp_config *config)
267 {
268 	switch (config->tx_type) {
269 	case HWTSTAMP_TX_OFF:
270 	case HWTSTAMP_TX_ON:
271 		break;
272 	default:
273 		return -ERANGE;
274 	}
275 
276 	switch (config->rx_filter) {
277 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
278 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
279 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
280 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
281 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
282 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
283 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
284 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
285 		config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
286 		break;
287 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
288 	case HWTSTAMP_FILTER_NONE:
289 		break;
290 	default:
291 		return -ERANGE;
292 	}
293 
294 	return aq_ptp_hwtstamp_config_set(aq_nic->aq_ptp, config);
295 }
296 #endif
297 
298 static int aq_ndev_hwtstamp_set(struct aq_nic_s *aq_nic, struct ifreq *ifr)
299 {
300 	struct hwtstamp_config config;
301 #if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
302 	int ret_val;
303 #endif
304 
305 	if (!aq_nic->aq_ptp)
306 		return -EOPNOTSUPP;
307 
308 	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
309 		return -EFAULT;
310 #if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
311 	ret_val = aq_ndev_config_hwtstamp(aq_nic, &config);
312 	if (ret_val)
313 		return ret_val;
314 #endif
315 
316 	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
317 	       -EFAULT : 0;
318 }
319 
320 #if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
321 static int aq_ndev_hwtstamp_get(struct aq_nic_s *aq_nic, struct ifreq *ifr)
322 {
323 	struct hwtstamp_config config;
324 
325 	if (!aq_nic->aq_ptp)
326 		return -EOPNOTSUPP;
327 
328 	aq_ptp_hwtstamp_config_get(aq_nic->aq_ptp, &config);
329 	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
330 	       -EFAULT : 0;
331 }
332 #endif
333 
334 static int aq_ndev_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
335 {
336 	struct aq_nic_s *aq_nic = netdev_priv(netdev);
337 
338 	switch (cmd) {
339 	case SIOCSHWTSTAMP:
340 		return aq_ndev_hwtstamp_set(aq_nic, ifr);
341 
342 #if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
343 	case SIOCGHWTSTAMP:
344 		return aq_ndev_hwtstamp_get(aq_nic, ifr);
345 #endif
346 	}
347 
348 	return -EOPNOTSUPP;
349 }
350 
351 static int aq_ndo_vlan_rx_add_vid(struct net_device *ndev, __be16 proto,
352 				  u16 vid)
353 {
354 	struct aq_nic_s *aq_nic = netdev_priv(ndev);
355 
356 	if (!aq_nic->aq_hw_ops->hw_filter_vlan_set)
357 		return -EOPNOTSUPP;
358 
359 	set_bit(vid, aq_nic->active_vlans);
360 
361 	return aq_filters_vlans_update(aq_nic);
362 }
363 
364 static int aq_ndo_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto,
365 				   u16 vid)
366 {
367 	struct aq_nic_s *aq_nic = netdev_priv(ndev);
368 
369 	if (!aq_nic->aq_hw_ops->hw_filter_vlan_set)
370 		return -EOPNOTSUPP;
371 
372 	clear_bit(vid, aq_nic->active_vlans);
373 
374 	if (-ENOENT == aq_del_fvlan_by_vlan(aq_nic, vid))
375 		return aq_filters_vlans_update(aq_nic);
376 
377 	return 0;
378 }
379 
380 static int aq_validate_mqprio_opt(struct aq_nic_s *self,
381 				  struct tc_mqprio_qopt_offload *mqprio,
382 				  const unsigned int num_tc)
383 {
384 	const bool has_min_rate = !!(mqprio->flags & TC_MQPRIO_F_MIN_RATE);
385 	struct aq_nic_cfg_s *aq_nic_cfg = aq_nic_get_cfg(self);
386 	const unsigned int tcs_max = min_t(u8, aq_nic_cfg->aq_hw_caps->tcs_max,
387 					   AQ_CFG_TCS_MAX);
388 
389 	if (num_tc > tcs_max) {
390 		netdev_err(self->ndev, "Too many TCs requested\n");
391 		return -EOPNOTSUPP;
392 	}
393 
394 	if (num_tc != 0 && !is_power_of_2(num_tc)) {
395 		netdev_err(self->ndev, "TC count should be power of 2\n");
396 		return -EOPNOTSUPP;
397 	}
398 
399 	if (has_min_rate && !ATL_HW_IS_CHIP_FEATURE(self->aq_hw, ANTIGUA)) {
400 		netdev_err(self->ndev, "Min tx rate is not supported\n");
401 		return -EOPNOTSUPP;
402 	}
403 
404 	return 0;
405 }
406 
407 static int aq_ndo_setup_tc(struct net_device *dev, enum tc_setup_type type,
408 			   void *type_data)
409 {
410 	struct tc_mqprio_qopt_offload *mqprio = type_data;
411 	struct aq_nic_s *aq_nic = netdev_priv(dev);
412 	bool has_min_rate;
413 	bool has_max_rate;
414 	int err;
415 	int i;
416 
417 	if (type != TC_SETUP_QDISC_MQPRIO)
418 		return -EOPNOTSUPP;
419 
420 	has_min_rate = !!(mqprio->flags & TC_MQPRIO_F_MIN_RATE);
421 	has_max_rate = !!(mqprio->flags & TC_MQPRIO_F_MAX_RATE);
422 
423 	err = aq_validate_mqprio_opt(aq_nic, mqprio, mqprio->qopt.num_tc);
424 	if (err)
425 		return err;
426 
427 	for (i = 0; i < mqprio->qopt.num_tc; i++) {
428 		if (has_max_rate) {
429 			u64 max_rate = mqprio->max_rate[i];
430 
431 			do_div(max_rate, AQ_MBPS_DIVISOR);
432 			aq_nic_setup_tc_max_rate(aq_nic, i, (u32)max_rate);
433 		}
434 
435 		if (has_min_rate) {
436 			u64 min_rate = mqprio->min_rate[i];
437 
438 			do_div(min_rate, AQ_MBPS_DIVISOR);
439 			aq_nic_setup_tc_min_rate(aq_nic, i, (u32)min_rate);
440 		}
441 	}
442 
443 	return aq_nic_setup_tc_mqprio(aq_nic, mqprio->qopt.num_tc,
444 				      mqprio->qopt.prio_tc_map);
445 }
446 
447 static int aq_xdp_setup(struct net_device *ndev, struct bpf_prog *prog,
448 			struct netlink_ext_ack *extack)
449 {
450 	bool need_update, running = netif_running(ndev);
451 	struct aq_nic_s *aq_nic = netdev_priv(ndev);
452 	struct bpf_prog *old_prog;
453 
454 	if (prog && !prog->aux->xdp_has_frags) {
455 		if (ndev->mtu > AQ_CFG_RX_FRAME_MAX) {
456 			NL_SET_ERR_MSG_MOD(extack,
457 					   "prog does not support XDP frags");
458 			return -EOPNOTSUPP;
459 		}
460 
461 		if (prog && ndev->features & NETIF_F_LRO) {
462 			netdev_err(ndev,
463 				   "LRO is not supported with single buffer XDP, disabling\n");
464 			ndev->features &= ~NETIF_F_LRO;
465 		}
466 	}
467 
468 	need_update = !!aq_nic->xdp_prog != !!prog;
469 	if (running && need_update)
470 		aq_ndev_close(ndev);
471 
472 	old_prog = xchg(&aq_nic->xdp_prog, prog);
473 	if (old_prog)
474 		bpf_prog_put(old_prog);
475 
476 	if (!old_prog && prog)
477 		static_branch_inc(&aq_xdp_locking_key);
478 	else if (old_prog && !prog)
479 		static_branch_dec(&aq_xdp_locking_key);
480 
481 	if (running && need_update)
482 		return aq_ndev_open(ndev);
483 
484 	return 0;
485 }
486 
487 static int aq_xdp(struct net_device *dev, struct netdev_bpf *xdp)
488 {
489 	switch (xdp->command) {
490 	case XDP_SETUP_PROG:
491 		return aq_xdp_setup(dev, xdp->prog, xdp->extack);
492 	default:
493 		return -EINVAL;
494 	}
495 }
496 
497 static const struct net_device_ops aq_ndev_ops = {
498 	.ndo_open = aq_ndev_open,
499 	.ndo_stop = aq_ndev_close,
500 	.ndo_start_xmit = aq_ndev_start_xmit,
501 	.ndo_set_rx_mode = aq_ndev_set_multicast_settings,
502 	.ndo_change_mtu = aq_ndev_change_mtu,
503 	.ndo_set_mac_address = aq_ndev_set_mac_address,
504 	.ndo_set_features = aq_ndev_set_features,
505 	.ndo_fix_features = aq_ndev_fix_features,
506 	.ndo_eth_ioctl = aq_ndev_ioctl,
507 	.ndo_vlan_rx_add_vid = aq_ndo_vlan_rx_add_vid,
508 	.ndo_vlan_rx_kill_vid = aq_ndo_vlan_rx_kill_vid,
509 	.ndo_setup_tc = aq_ndo_setup_tc,
510 	.ndo_bpf = aq_xdp,
511 	.ndo_xdp_xmit = aq_xdp_xmit,
512 };
513 
514 static int __init aq_ndev_init_module(void)
515 {
516 	int ret;
517 
518 	aq_ndev_wq = create_singlethread_workqueue(aq_ndev_driver_name);
519 	if (!aq_ndev_wq) {
520 		pr_err("Failed to create workqueue\n");
521 		return -ENOMEM;
522 	}
523 
524 	ret = aq_pci_func_register_driver();
525 	if (ret) {
526 		destroy_workqueue(aq_ndev_wq);
527 		return ret;
528 	}
529 
530 	return 0;
531 }
532 
533 static void __exit aq_ndev_exit_module(void)
534 {
535 	aq_pci_func_unregister_driver();
536 
537 	if (aq_ndev_wq) {
538 		destroy_workqueue(aq_ndev_wq);
539 		aq_ndev_wq = NULL;
540 	}
541 }
542 
543 module_init(aq_ndev_init_module);
544 module_exit(aq_ndev_exit_module);
545