1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Huawei HiNIC PCI Express Linux driver
4  * Copyright(c) 2017 Huawei Technologies Co., Ltd
5  */
6 
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/moduleparam.h>
10 #include <linux/pci.h>
11 #include <linux/device.h>
12 #include <linux/errno.h>
13 #include <linux/types.h>
14 #include <linux/etherdevice.h>
15 #include <linux/netdevice.h>
16 #include <linux/slab.h>
17 #include <linux/if_vlan.h>
18 #include <linux/semaphore.h>
19 #include <linux/workqueue.h>
20 #include <net/ip.h>
21 #include <net/devlink.h>
22 #include <linux/bitops.h>
23 #include <linux/bitmap.h>
24 #include <linux/delay.h>
25 #include <linux/err.h>
26 
27 #include "hinic_hw_qp.h"
28 #include "hinic_hw_dev.h"
29 #include "hinic_devlink.h"
30 #include "hinic_port.h"
31 #include "hinic_tx.h"
32 #include "hinic_rx.h"
33 #include "hinic_dev.h"
34 #include "hinic_sriov.h"
35 
36 MODULE_AUTHOR("Huawei Technologies CO., Ltd");
37 MODULE_DESCRIPTION("Huawei Intelligent NIC driver");
38 MODULE_LICENSE("GPL");
39 
40 static unsigned int tx_weight = 64;
41 module_param(tx_weight, uint, 0644);
42 MODULE_PARM_DESC(tx_weight, "Number Tx packets for NAPI budget (default=64)");
43 
44 static unsigned int rx_weight = 64;
45 module_param(rx_weight, uint, 0644);
46 MODULE_PARM_DESC(rx_weight, "Number Rx packets for NAPI budget (default=64)");
47 
48 #define HINIC_DEV_ID_QUAD_PORT_25GE         0x1822
49 #define HINIC_DEV_ID_DUAL_PORT_100GE        0x0200
50 #define HINIC_DEV_ID_DUAL_PORT_100GE_MEZZ   0x0205
51 #define HINIC_DEV_ID_QUAD_PORT_25GE_MEZZ    0x0210
52 #define HINIC_DEV_ID_VF    0x375e
53 
54 #define HINIC_WQ_NAME                   "hinic_dev"
55 
56 #define MSG_ENABLE_DEFAULT              (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
57 					 NETIF_MSG_IFUP |                  \
58 					 NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR)
59 
60 #define HINIC_LRO_MAX_WQE_NUM_DEFAULT	8
61 
62 #define HINIC_LRO_RX_TIMER_DEFAULT	16
63 
64 #define VLAN_BITMAP_SIZE(nic_dev)       (ALIGN(VLAN_N_VID, 8) / 8)
65 
66 #define work_to_rx_mode_work(work)      \
67 		container_of(work, struct hinic_rx_mode_work, work)
68 
69 #define rx_mode_work_to_nic_dev(rx_mode_work) \
70 		container_of(rx_mode_work, struct hinic_dev, rx_mode_work)
71 
72 #define HINIC_WAIT_SRIOV_CFG_TIMEOUT	15000
73 
74 #define HINIC_DEAULT_TXRX_MSIX_PENDING_LIMIT		2
75 #define HINIC_DEAULT_TXRX_MSIX_COALESC_TIMER_CFG	32
76 #define HINIC_DEAULT_TXRX_MSIX_RESEND_TIMER_CFG		7
77 
78 static int change_mac_addr(struct net_device *netdev, const u8 *addr);
79 
80 static int set_features(struct hinic_dev *nic_dev,
81 			netdev_features_t pre_features,
82 			netdev_features_t features, bool force_change);
83 
84 static void update_rx_stats(struct hinic_dev *nic_dev, struct hinic_rxq *rxq)
85 {
86 	struct hinic_rxq_stats *nic_rx_stats = &nic_dev->rx_stats;
87 	struct hinic_rxq_stats rx_stats;
88 
89 	u64_stats_init(&rx_stats.syncp);
90 
91 	hinic_rxq_get_stats(rxq, &rx_stats);
92 
93 	u64_stats_update_begin(&nic_rx_stats->syncp);
94 	nic_rx_stats->bytes += rx_stats.bytes;
95 	nic_rx_stats->pkts  += rx_stats.pkts;
96 	nic_rx_stats->errors += rx_stats.errors;
97 	nic_rx_stats->csum_errors += rx_stats.csum_errors;
98 	nic_rx_stats->other_errors += rx_stats.other_errors;
99 	u64_stats_update_end(&nic_rx_stats->syncp);
100 
101 	hinic_rxq_clean_stats(rxq);
102 }
103 
104 static void update_tx_stats(struct hinic_dev *nic_dev, struct hinic_txq *txq)
105 {
106 	struct hinic_txq_stats *nic_tx_stats = &nic_dev->tx_stats;
107 	struct hinic_txq_stats tx_stats;
108 
109 	u64_stats_init(&tx_stats.syncp);
110 
111 	hinic_txq_get_stats(txq, &tx_stats);
112 
113 	u64_stats_update_begin(&nic_tx_stats->syncp);
114 	nic_tx_stats->bytes += tx_stats.bytes;
115 	nic_tx_stats->pkts += tx_stats.pkts;
116 	nic_tx_stats->tx_busy += tx_stats.tx_busy;
117 	nic_tx_stats->tx_wake += tx_stats.tx_wake;
118 	nic_tx_stats->tx_dropped += tx_stats.tx_dropped;
119 	nic_tx_stats->big_frags_pkts += tx_stats.big_frags_pkts;
120 	u64_stats_update_end(&nic_tx_stats->syncp);
121 
122 	hinic_txq_clean_stats(txq);
123 }
124 
125 static void update_nic_stats(struct hinic_dev *nic_dev)
126 {
127 	int i, num_qps = hinic_hwdev_num_qps(nic_dev->hwdev);
128 
129 	for (i = 0; i < num_qps; i++)
130 		update_rx_stats(nic_dev, &nic_dev->rxqs[i]);
131 
132 	for (i = 0; i < num_qps; i++)
133 		update_tx_stats(nic_dev, &nic_dev->txqs[i]);
134 }
135 
136 /**
137  * create_txqs - Create the Logical Tx Queues of specific NIC device
138  * @nic_dev: the specific NIC device
139  *
140  * Return 0 - Success, negative - Failure
141  **/
142 static int create_txqs(struct hinic_dev *nic_dev)
143 {
144 	int err, i, j, num_txqs = hinic_hwdev_num_qps(nic_dev->hwdev);
145 	struct net_device *netdev = nic_dev->netdev;
146 	size_t txq_size;
147 
148 	if (nic_dev->txqs)
149 		return -EINVAL;
150 
151 	txq_size = num_txqs * sizeof(*nic_dev->txqs);
152 	nic_dev->txqs = devm_kzalloc(&netdev->dev, txq_size, GFP_KERNEL);
153 	if (!nic_dev->txqs)
154 		return -ENOMEM;
155 
156 	for (i = 0; i < num_txqs; i++) {
157 		struct hinic_sq *sq = hinic_hwdev_get_sq(nic_dev->hwdev, i);
158 
159 		err = hinic_init_txq(&nic_dev->txqs[i], sq, netdev);
160 		if (err) {
161 			netif_err(nic_dev, drv, netdev,
162 				  "Failed to init Txq\n");
163 			goto err_init_txq;
164 		}
165 	}
166 
167 	return 0;
168 
169 err_init_txq:
170 	for (j = 0; j < i; j++)
171 		hinic_clean_txq(&nic_dev->txqs[j]);
172 
173 	devm_kfree(&netdev->dev, nic_dev->txqs);
174 	return err;
175 }
176 
177 static void enable_txqs_napi(struct hinic_dev *nic_dev)
178 {
179 	int num_txqs = hinic_hwdev_num_qps(nic_dev->hwdev);
180 	int i;
181 
182 	for (i = 0; i < num_txqs; i++)
183 		napi_enable(&nic_dev->txqs[i].napi);
184 }
185 
186 static void disable_txqs_napi(struct hinic_dev *nic_dev)
187 {
188 	int num_txqs = hinic_hwdev_num_qps(nic_dev->hwdev);
189 	int i;
190 
191 	for (i = 0; i < num_txqs; i++)
192 		napi_disable(&nic_dev->txqs[i].napi);
193 }
194 
195 /**
196  * free_txqs - Free the Logical Tx Queues of specific NIC device
197  * @nic_dev: the specific NIC device
198  **/
199 static void free_txqs(struct hinic_dev *nic_dev)
200 {
201 	int i, num_txqs = hinic_hwdev_num_qps(nic_dev->hwdev);
202 	struct net_device *netdev = nic_dev->netdev;
203 
204 	if (!nic_dev->txqs)
205 		return;
206 
207 	for (i = 0; i < num_txqs; i++)
208 		hinic_clean_txq(&nic_dev->txqs[i]);
209 
210 	devm_kfree(&netdev->dev, nic_dev->txqs);
211 	nic_dev->txqs = NULL;
212 }
213 
214 /**
215  * create_txqs - Create the Logical Rx Queues of specific NIC device
216  * @nic_dev: the specific NIC device
217  *
218  * Return 0 - Success, negative - Failure
219  **/
220 static int create_rxqs(struct hinic_dev *nic_dev)
221 {
222 	int err, i, j, num_rxqs = hinic_hwdev_num_qps(nic_dev->hwdev);
223 	struct net_device *netdev = nic_dev->netdev;
224 	size_t rxq_size;
225 
226 	if (nic_dev->rxqs)
227 		return -EINVAL;
228 
229 	rxq_size = num_rxqs * sizeof(*nic_dev->rxqs);
230 	nic_dev->rxqs = devm_kzalloc(&netdev->dev, rxq_size, GFP_KERNEL);
231 	if (!nic_dev->rxqs)
232 		return -ENOMEM;
233 
234 	for (i = 0; i < num_rxqs; i++) {
235 		struct hinic_rq *rq = hinic_hwdev_get_rq(nic_dev->hwdev, i);
236 
237 		err = hinic_init_rxq(&nic_dev->rxqs[i], rq, netdev);
238 		if (err) {
239 			netif_err(nic_dev, drv, netdev,
240 				  "Failed to init rxq\n");
241 			goto err_init_rxq;
242 		}
243 	}
244 
245 	return 0;
246 
247 err_init_rxq:
248 	for (j = 0; j < i; j++)
249 		hinic_clean_rxq(&nic_dev->rxqs[j]);
250 
251 	devm_kfree(&netdev->dev, nic_dev->rxqs);
252 	return err;
253 }
254 
255 /**
256  * free_txqs - Free the Logical Rx Queues of specific NIC device
257  * @nic_dev: the specific NIC device
258  **/
259 static void free_rxqs(struct hinic_dev *nic_dev)
260 {
261 	int i, num_rxqs = hinic_hwdev_num_qps(nic_dev->hwdev);
262 	struct net_device *netdev = nic_dev->netdev;
263 
264 	if (!nic_dev->rxqs)
265 		return;
266 
267 	for (i = 0; i < num_rxqs; i++)
268 		hinic_clean_rxq(&nic_dev->rxqs[i]);
269 
270 	devm_kfree(&netdev->dev, nic_dev->rxqs);
271 	nic_dev->rxqs = NULL;
272 }
273 
274 static int hinic_configure_max_qnum(struct hinic_dev *nic_dev)
275 {
276 	int err;
277 
278 	err = hinic_set_max_qnum(nic_dev, nic_dev->hwdev->nic_cap.max_qps);
279 	if (err)
280 		return err;
281 
282 	return 0;
283 }
284 
285 static int hinic_rss_init(struct hinic_dev *nic_dev)
286 {
287 	u8 default_rss_key[HINIC_RSS_KEY_SIZE];
288 	u8 tmpl_idx = nic_dev->rss_tmpl_idx;
289 	u32 *indir_tbl;
290 	int err, i;
291 
292 	indir_tbl = kcalloc(HINIC_RSS_INDIR_SIZE, sizeof(u32), GFP_KERNEL);
293 	if (!indir_tbl)
294 		return -ENOMEM;
295 
296 	netdev_rss_key_fill(default_rss_key, sizeof(default_rss_key));
297 	for (i = 0; i < HINIC_RSS_INDIR_SIZE; i++)
298 		indir_tbl[i] = ethtool_rxfh_indir_default(i, nic_dev->num_rss);
299 
300 	err = hinic_rss_set_template_tbl(nic_dev, tmpl_idx, default_rss_key);
301 	if (err)
302 		goto out;
303 
304 	err = hinic_rss_set_indir_tbl(nic_dev, tmpl_idx, indir_tbl);
305 	if (err)
306 		goto out;
307 
308 	err = hinic_set_rss_type(nic_dev, tmpl_idx, nic_dev->rss_type);
309 	if (err)
310 		goto out;
311 
312 	err = hinic_rss_set_hash_engine(nic_dev, tmpl_idx,
313 					nic_dev->rss_hash_engine);
314 	if (err)
315 		goto out;
316 
317 	err = hinic_rss_cfg(nic_dev, 1, tmpl_idx);
318 	if (err)
319 		goto out;
320 
321 out:
322 	kfree(indir_tbl);
323 	return err;
324 }
325 
326 static void hinic_rss_deinit(struct hinic_dev *nic_dev)
327 {
328 	hinic_rss_cfg(nic_dev, 0, nic_dev->rss_tmpl_idx);
329 }
330 
331 static void hinic_init_rss_parameters(struct hinic_dev *nic_dev)
332 {
333 	nic_dev->rss_hash_engine = HINIC_RSS_HASH_ENGINE_TYPE_XOR;
334 	nic_dev->rss_type.tcp_ipv6_ext = 1;
335 	nic_dev->rss_type.ipv6_ext = 1;
336 	nic_dev->rss_type.tcp_ipv6 = 1;
337 	nic_dev->rss_type.ipv6 = 1;
338 	nic_dev->rss_type.tcp_ipv4 = 1;
339 	nic_dev->rss_type.ipv4 = 1;
340 	nic_dev->rss_type.udp_ipv6 = 1;
341 	nic_dev->rss_type.udp_ipv4 = 1;
342 }
343 
344 static void hinic_enable_rss(struct hinic_dev *nic_dev)
345 {
346 	struct net_device *netdev = nic_dev->netdev;
347 	struct hinic_hwdev *hwdev = nic_dev->hwdev;
348 	struct hinic_hwif *hwif = hwdev->hwif;
349 	struct pci_dev *pdev = hwif->pdev;
350 	int i, node, err = 0;
351 	u16 num_cpus = 0;
352 
353 	if (nic_dev->max_qps <= 1) {
354 		nic_dev->flags &= ~HINIC_RSS_ENABLE;
355 		nic_dev->rss_limit = nic_dev->max_qps;
356 		nic_dev->num_qps = nic_dev->max_qps;
357 		nic_dev->num_rss = nic_dev->max_qps;
358 
359 		return;
360 	}
361 
362 	err = hinic_rss_template_alloc(nic_dev, &nic_dev->rss_tmpl_idx);
363 	if (err) {
364 		netif_err(nic_dev, drv, netdev,
365 			  "Failed to alloc tmpl_idx for rss, can't enable rss for this function\n");
366 		nic_dev->flags &= ~HINIC_RSS_ENABLE;
367 		nic_dev->max_qps = 1;
368 		nic_dev->rss_limit = nic_dev->max_qps;
369 		nic_dev->num_qps = nic_dev->max_qps;
370 		nic_dev->num_rss = nic_dev->max_qps;
371 
372 		return;
373 	}
374 
375 	nic_dev->flags |= HINIC_RSS_ENABLE;
376 
377 	for (i = 0; i < num_online_cpus(); i++) {
378 		node = cpu_to_node(i);
379 		if (node == dev_to_node(&pdev->dev))
380 			num_cpus++;
381 	}
382 
383 	if (!num_cpus)
384 		num_cpus = num_online_cpus();
385 
386 	nic_dev->num_qps = hinic_hwdev_num_qps(hwdev);
387 	nic_dev->num_qps = min_t(u16, nic_dev->num_qps, num_cpus);
388 
389 	nic_dev->rss_limit = nic_dev->num_qps;
390 	nic_dev->num_rss = nic_dev->num_qps;
391 
392 	hinic_init_rss_parameters(nic_dev);
393 	err = hinic_rss_init(nic_dev);
394 	if (err)
395 		netif_err(nic_dev, drv, netdev, "Failed to init rss\n");
396 }
397 
398 int hinic_open(struct net_device *netdev)
399 {
400 	struct hinic_dev *nic_dev = netdev_priv(netdev);
401 	enum hinic_port_link_state link_state;
402 	int err, ret;
403 
404 	if (!(nic_dev->flags & HINIC_INTF_UP)) {
405 		err = hinic_hwdev_ifup(nic_dev->hwdev, nic_dev->sq_depth,
406 				       nic_dev->rq_depth);
407 		if (err) {
408 			netif_err(nic_dev, drv, netdev,
409 				  "Failed - HW interface up\n");
410 			return err;
411 		}
412 	}
413 
414 	err = create_txqs(nic_dev);
415 	if (err) {
416 		netif_err(nic_dev, drv, netdev,
417 			  "Failed to create Tx queues\n");
418 		goto err_create_txqs;
419 	}
420 
421 	enable_txqs_napi(nic_dev);
422 
423 	err = create_rxqs(nic_dev);
424 	if (err) {
425 		netif_err(nic_dev, drv, netdev,
426 			  "Failed to create Rx queues\n");
427 		goto err_create_rxqs;
428 	}
429 
430 	hinic_enable_rss(nic_dev);
431 
432 	err = hinic_configure_max_qnum(nic_dev);
433 	if (err) {
434 		netif_err(nic_dev, drv, nic_dev->netdev,
435 			  "Failed to configure the maximum number of queues\n");
436 		goto err_port_state;
437 	}
438 
439 	netif_set_real_num_tx_queues(netdev, nic_dev->num_qps);
440 	netif_set_real_num_rx_queues(netdev, nic_dev->num_qps);
441 
442 	err = hinic_port_set_state(nic_dev, HINIC_PORT_ENABLE);
443 	if (err) {
444 		netif_err(nic_dev, drv, netdev,
445 			  "Failed to set port state\n");
446 		goto err_port_state;
447 	}
448 
449 	err = hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_ENABLE);
450 	if (err) {
451 		netif_err(nic_dev, drv, netdev,
452 			  "Failed to set func port state\n");
453 		goto err_func_port_state;
454 	}
455 
456 	down(&nic_dev->mgmt_lock);
457 
458 	err = hinic_port_link_state(nic_dev, &link_state);
459 	if (err) {
460 		netif_err(nic_dev, drv, netdev, "Failed to get link state\n");
461 		goto err_port_link;
462 	}
463 
464 	if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
465 		hinic_notify_all_vfs_link_changed(nic_dev->hwdev, link_state);
466 
467 	if (link_state == HINIC_LINK_STATE_UP) {
468 		nic_dev->flags |= HINIC_LINK_UP;
469 		nic_dev->cable_unplugged = false;
470 		nic_dev->module_unrecognized = false;
471 	}
472 
473 	nic_dev->flags |= HINIC_INTF_UP;
474 
475 	if ((nic_dev->flags & (HINIC_LINK_UP | HINIC_INTF_UP)) ==
476 	    (HINIC_LINK_UP | HINIC_INTF_UP)) {
477 		netif_info(nic_dev, drv, netdev, "link + intf UP\n");
478 		netif_carrier_on(netdev);
479 		netif_tx_wake_all_queues(netdev);
480 	}
481 
482 	up(&nic_dev->mgmt_lock);
483 
484 	netif_info(nic_dev, drv, netdev, "HINIC_INTF is UP\n");
485 	return 0;
486 
487 err_port_link:
488 	up(&nic_dev->mgmt_lock);
489 	ret = hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_DISABLE);
490 	if (ret)
491 		netif_warn(nic_dev, drv, netdev,
492 			   "Failed to revert func port state\n");
493 
494 err_func_port_state:
495 	ret = hinic_port_set_state(nic_dev, HINIC_PORT_DISABLE);
496 	if (ret)
497 		netif_warn(nic_dev, drv, netdev,
498 			   "Failed to revert port state\n");
499 err_port_state:
500 	free_rxqs(nic_dev);
501 	if (nic_dev->flags & HINIC_RSS_ENABLE) {
502 		hinic_rss_deinit(nic_dev);
503 		hinic_rss_template_free(nic_dev, nic_dev->rss_tmpl_idx);
504 	}
505 
506 err_create_rxqs:
507 	disable_txqs_napi(nic_dev);
508 	free_txqs(nic_dev);
509 
510 err_create_txqs:
511 	if (!(nic_dev->flags & HINIC_INTF_UP))
512 		hinic_hwdev_ifdown(nic_dev->hwdev);
513 	return err;
514 }
515 
516 int hinic_close(struct net_device *netdev)
517 {
518 	struct hinic_dev *nic_dev = netdev_priv(netdev);
519 	unsigned int flags;
520 
521 	/* Disable txq napi firstly to aviod rewaking txq in free_tx_poll */
522 	disable_txqs_napi(nic_dev);
523 
524 	down(&nic_dev->mgmt_lock);
525 
526 	flags = nic_dev->flags;
527 	nic_dev->flags &= ~HINIC_INTF_UP;
528 
529 	netif_carrier_off(netdev);
530 	netif_tx_disable(netdev);
531 
532 	update_nic_stats(nic_dev);
533 
534 	up(&nic_dev->mgmt_lock);
535 
536 	if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
537 		hinic_notify_all_vfs_link_changed(nic_dev->hwdev, 0);
538 
539 	hinic_port_set_state(nic_dev, HINIC_PORT_DISABLE);
540 
541 	hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_DISABLE);
542 
543 	if (nic_dev->flags & HINIC_RSS_ENABLE) {
544 		hinic_rss_deinit(nic_dev);
545 		hinic_rss_template_free(nic_dev, nic_dev->rss_tmpl_idx);
546 	}
547 
548 	free_rxqs(nic_dev);
549 	free_txqs(nic_dev);
550 
551 	if (flags & HINIC_INTF_UP)
552 		hinic_hwdev_ifdown(nic_dev->hwdev);
553 
554 	netif_info(nic_dev, drv, netdev, "HINIC_INTF is DOWN\n");
555 	return 0;
556 }
557 
558 static int hinic_change_mtu(struct net_device *netdev, int new_mtu)
559 {
560 	struct hinic_dev *nic_dev = netdev_priv(netdev);
561 	int err;
562 
563 	netif_info(nic_dev, drv, netdev, "set_mtu = %d\n", new_mtu);
564 
565 	err = hinic_port_set_mtu(nic_dev, new_mtu);
566 	if (err)
567 		netif_err(nic_dev, drv, netdev, "Failed to set port mtu\n");
568 	else
569 		netdev->mtu = new_mtu;
570 
571 	return err;
572 }
573 
574 /**
575  * change_mac_addr - change the main mac address of network device
576  * @netdev: network device
577  * @addr: mac address to set
578  *
579  * Return 0 - Success, negative - Failure
580  **/
581 static int change_mac_addr(struct net_device *netdev, const u8 *addr)
582 {
583 	struct hinic_dev *nic_dev = netdev_priv(netdev);
584 	u16 vid = 0;
585 	int err;
586 
587 	if (!is_valid_ether_addr(addr))
588 		return -EADDRNOTAVAIL;
589 
590 	netif_info(nic_dev, drv, netdev, "change mac addr = %02x %02x %02x %02x %02x %02x\n",
591 		   addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
592 
593 	down(&nic_dev->mgmt_lock);
594 
595 	do {
596 		err = hinic_port_del_mac(nic_dev, netdev->dev_addr, vid);
597 		if (err) {
598 			netif_err(nic_dev, drv, netdev,
599 				  "Failed to delete mac\n");
600 			break;
601 		}
602 
603 		err = hinic_port_add_mac(nic_dev, addr, vid);
604 		if (err) {
605 			netif_err(nic_dev, drv, netdev, "Failed to add mac\n");
606 			break;
607 		}
608 
609 		vid = find_next_bit(nic_dev->vlan_bitmap, VLAN_N_VID, vid + 1);
610 	} while (vid != VLAN_N_VID);
611 
612 	up(&nic_dev->mgmt_lock);
613 	return err;
614 }
615 
616 static int hinic_set_mac_addr(struct net_device *netdev, void *addr)
617 {
618 	unsigned char new_mac[ETH_ALEN];
619 	struct sockaddr *saddr = addr;
620 	int err;
621 
622 	memcpy(new_mac, saddr->sa_data, ETH_ALEN);
623 
624 	err = change_mac_addr(netdev, new_mac);
625 	if (!err)
626 		memcpy(netdev->dev_addr, new_mac, ETH_ALEN);
627 
628 	return err;
629 }
630 
631 /**
632  * add_mac_addr - add mac address to network device
633  * @netdev: network device
634  * @addr: mac address to add
635  *
636  * Return 0 - Success, negative - Failure
637  **/
638 static int add_mac_addr(struct net_device *netdev, const u8 *addr)
639 {
640 	struct hinic_dev *nic_dev = netdev_priv(netdev);
641 	u16 vid = 0;
642 	int err;
643 
644 	netif_info(nic_dev, drv, netdev, "set mac addr = %02x %02x %02x %02x %02x %02x\n",
645 		   addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
646 
647 	down(&nic_dev->mgmt_lock);
648 
649 	do {
650 		err = hinic_port_add_mac(nic_dev, addr, vid);
651 		if (err) {
652 			netif_err(nic_dev, drv, netdev, "Failed to add mac\n");
653 			break;
654 		}
655 
656 		vid = find_next_bit(nic_dev->vlan_bitmap, VLAN_N_VID, vid + 1);
657 	} while (vid != VLAN_N_VID);
658 
659 	up(&nic_dev->mgmt_lock);
660 	return err;
661 }
662 
663 /**
664  * remove_mac_addr - remove mac address from network device
665  * @netdev: network device
666  * @addr: mac address to remove
667  *
668  * Return 0 - Success, negative - Failure
669  **/
670 static int remove_mac_addr(struct net_device *netdev, const u8 *addr)
671 {
672 	struct hinic_dev *nic_dev = netdev_priv(netdev);
673 	u16 vid = 0;
674 	int err;
675 
676 	if (!is_valid_ether_addr(addr))
677 		return -EADDRNOTAVAIL;
678 
679 	netif_info(nic_dev, drv, netdev, "remove mac addr = %02x %02x %02x %02x %02x %02x\n",
680 		   addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
681 
682 	down(&nic_dev->mgmt_lock);
683 
684 	do {
685 		err = hinic_port_del_mac(nic_dev, addr, vid);
686 		if (err) {
687 			netif_err(nic_dev, drv, netdev,
688 				  "Failed to delete mac\n");
689 			break;
690 		}
691 
692 		vid = find_next_bit(nic_dev->vlan_bitmap, VLAN_N_VID, vid + 1);
693 	} while (vid != VLAN_N_VID);
694 
695 	up(&nic_dev->mgmt_lock);
696 	return err;
697 }
698 
699 static int hinic_vlan_rx_add_vid(struct net_device *netdev,
700 				 __always_unused __be16 proto, u16 vid)
701 {
702 	struct hinic_dev *nic_dev = netdev_priv(netdev);
703 	int ret, err;
704 
705 	netif_info(nic_dev, drv, netdev, "add vid = %d\n", vid);
706 
707 	down(&nic_dev->mgmt_lock);
708 
709 	err = hinic_port_add_vlan(nic_dev, vid);
710 	if (err) {
711 		netif_err(nic_dev, drv, netdev, "Failed to add vlan\n");
712 		goto err_vlan_add;
713 	}
714 
715 	err = hinic_port_add_mac(nic_dev, netdev->dev_addr, vid);
716 	if (err && err != HINIC_PF_SET_VF_ALREADY) {
717 		netif_err(nic_dev, drv, netdev, "Failed to set mac\n");
718 		goto err_add_mac;
719 	}
720 
721 	bitmap_set(nic_dev->vlan_bitmap, vid, 1);
722 
723 	up(&nic_dev->mgmt_lock);
724 	return 0;
725 
726 err_add_mac:
727 	ret = hinic_port_del_vlan(nic_dev, vid);
728 	if (ret)
729 		netif_err(nic_dev, drv, netdev,
730 			  "Failed to revert by removing vlan\n");
731 
732 err_vlan_add:
733 	up(&nic_dev->mgmt_lock);
734 	return err;
735 }
736 
737 static int hinic_vlan_rx_kill_vid(struct net_device *netdev,
738 				  __always_unused __be16 proto, u16 vid)
739 {
740 	struct hinic_dev *nic_dev = netdev_priv(netdev);
741 	int err;
742 
743 	netif_info(nic_dev, drv, netdev, "remove vid = %d\n", vid);
744 
745 	down(&nic_dev->mgmt_lock);
746 
747 	err = hinic_port_del_vlan(nic_dev, vid);
748 	if (err) {
749 		netif_err(nic_dev, drv, netdev, "Failed to delete vlan\n");
750 		goto err_del_vlan;
751 	}
752 
753 	bitmap_clear(nic_dev->vlan_bitmap, vid, 1);
754 
755 	up(&nic_dev->mgmt_lock);
756 	return 0;
757 
758 err_del_vlan:
759 	up(&nic_dev->mgmt_lock);
760 	return err;
761 }
762 
763 static void set_rx_mode(struct work_struct *work)
764 {
765 	struct hinic_rx_mode_work *rx_mode_work = work_to_rx_mode_work(work);
766 	struct hinic_dev *nic_dev = rx_mode_work_to_nic_dev(rx_mode_work);
767 
768 	hinic_port_set_rx_mode(nic_dev, rx_mode_work->rx_mode);
769 
770 	__dev_uc_sync(nic_dev->netdev, add_mac_addr, remove_mac_addr);
771 	__dev_mc_sync(nic_dev->netdev, add_mac_addr, remove_mac_addr);
772 }
773 
774 static void hinic_set_rx_mode(struct net_device *netdev)
775 {
776 	struct hinic_dev *nic_dev = netdev_priv(netdev);
777 	struct hinic_rx_mode_work *rx_mode_work;
778 	u32 rx_mode;
779 
780 	rx_mode_work = &nic_dev->rx_mode_work;
781 
782 	rx_mode = HINIC_RX_MODE_UC |
783 		  HINIC_RX_MODE_MC |
784 		  HINIC_RX_MODE_BC;
785 
786 	if (netdev->flags & IFF_PROMISC) {
787 		if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
788 			rx_mode |= HINIC_RX_MODE_PROMISC;
789 	} else if (netdev->flags & IFF_ALLMULTI) {
790 		rx_mode |= HINIC_RX_MODE_MC_ALL;
791 	}
792 
793 	rx_mode_work->rx_mode = rx_mode;
794 
795 	queue_work(nic_dev->workq, &rx_mode_work->work);
796 }
797 
798 static void hinic_tx_timeout(struct net_device *netdev, unsigned int txqueue)
799 {
800 	struct hinic_dev *nic_dev = netdev_priv(netdev);
801 	u16 sw_pi, hw_ci, sw_ci;
802 	struct hinic_sq *sq;
803 	u16 num_sqs, q_id;
804 
805 	num_sqs = hinic_hwdev_num_qps(nic_dev->hwdev);
806 
807 	netif_err(nic_dev, drv, netdev, "Tx timeout\n");
808 
809 	for (q_id = 0; q_id < num_sqs; q_id++) {
810 		if (!netif_xmit_stopped(netdev_get_tx_queue(netdev, q_id)))
811 			continue;
812 
813 		sq = hinic_hwdev_get_sq(nic_dev->hwdev, q_id);
814 		sw_pi = atomic_read(&sq->wq->prod_idx) & sq->wq->mask;
815 		hw_ci = be16_to_cpu(*(u16 *)(sq->hw_ci_addr)) & sq->wq->mask;
816 		sw_ci = atomic_read(&sq->wq->cons_idx) & sq->wq->mask;
817 		netif_err(nic_dev, drv, netdev, "Txq%d: sw_pi: %d, hw_ci: %d, sw_ci: %d, napi->state: 0x%lx\n",
818 			  q_id, sw_pi, hw_ci, sw_ci,
819 			  nic_dev->txqs[q_id].napi.state);
820 	}
821 }
822 
823 static void hinic_get_stats64(struct net_device *netdev,
824 			      struct rtnl_link_stats64 *stats)
825 {
826 	struct hinic_dev *nic_dev = netdev_priv(netdev);
827 	struct hinic_rxq_stats *nic_rx_stats;
828 	struct hinic_txq_stats *nic_tx_stats;
829 
830 	nic_rx_stats = &nic_dev->rx_stats;
831 	nic_tx_stats = &nic_dev->tx_stats;
832 
833 	down(&nic_dev->mgmt_lock);
834 
835 	if (nic_dev->flags & HINIC_INTF_UP)
836 		update_nic_stats(nic_dev);
837 
838 	up(&nic_dev->mgmt_lock);
839 
840 	stats->rx_bytes   = nic_rx_stats->bytes;
841 	stats->rx_packets = nic_rx_stats->pkts;
842 	stats->rx_errors  = nic_rx_stats->errors;
843 
844 	stats->tx_bytes   = nic_tx_stats->bytes;
845 	stats->tx_packets = nic_tx_stats->pkts;
846 	stats->tx_errors  = nic_tx_stats->tx_dropped;
847 }
848 
849 static int hinic_set_features(struct net_device *netdev,
850 			      netdev_features_t features)
851 {
852 	struct hinic_dev *nic_dev = netdev_priv(netdev);
853 
854 	return set_features(nic_dev, nic_dev->netdev->features,
855 			    features, false);
856 }
857 
858 static netdev_features_t hinic_fix_features(struct net_device *netdev,
859 					    netdev_features_t features)
860 {
861 	struct hinic_dev *nic_dev = netdev_priv(netdev);
862 
863 	/* If Rx checksum is disabled, then LRO should also be disabled */
864 	if (!(features & NETIF_F_RXCSUM)) {
865 		netif_info(nic_dev, drv, netdev, "disabling LRO as RXCSUM is off\n");
866 		features &= ~NETIF_F_LRO;
867 	}
868 
869 	return features;
870 }
871 
872 static const struct net_device_ops hinic_netdev_ops = {
873 	.ndo_open = hinic_open,
874 	.ndo_stop = hinic_close,
875 	.ndo_change_mtu = hinic_change_mtu,
876 	.ndo_set_mac_address = hinic_set_mac_addr,
877 	.ndo_validate_addr = eth_validate_addr,
878 	.ndo_vlan_rx_add_vid = hinic_vlan_rx_add_vid,
879 	.ndo_vlan_rx_kill_vid = hinic_vlan_rx_kill_vid,
880 	.ndo_set_rx_mode = hinic_set_rx_mode,
881 	.ndo_start_xmit = hinic_xmit_frame,
882 	.ndo_tx_timeout = hinic_tx_timeout,
883 	.ndo_get_stats64 = hinic_get_stats64,
884 	.ndo_fix_features = hinic_fix_features,
885 	.ndo_set_features = hinic_set_features,
886 	.ndo_set_vf_mac	= hinic_ndo_set_vf_mac,
887 	.ndo_set_vf_vlan = hinic_ndo_set_vf_vlan,
888 	.ndo_get_vf_config = hinic_ndo_get_vf_config,
889 	.ndo_set_vf_trust = hinic_ndo_set_vf_trust,
890 	.ndo_set_vf_rate = hinic_ndo_set_vf_bw,
891 	.ndo_set_vf_spoofchk = hinic_ndo_set_vf_spoofchk,
892 	.ndo_set_vf_link_state = hinic_ndo_set_vf_link_state,
893 };
894 
895 static const struct net_device_ops hinicvf_netdev_ops = {
896 	.ndo_open = hinic_open,
897 	.ndo_stop = hinic_close,
898 	.ndo_change_mtu = hinic_change_mtu,
899 	.ndo_set_mac_address = hinic_set_mac_addr,
900 	.ndo_validate_addr = eth_validate_addr,
901 	.ndo_vlan_rx_add_vid = hinic_vlan_rx_add_vid,
902 	.ndo_vlan_rx_kill_vid = hinic_vlan_rx_kill_vid,
903 	.ndo_set_rx_mode = hinic_set_rx_mode,
904 	.ndo_start_xmit = hinic_xmit_frame,
905 	.ndo_tx_timeout = hinic_tx_timeout,
906 	.ndo_get_stats64 = hinic_get_stats64,
907 	.ndo_fix_features = hinic_fix_features,
908 	.ndo_set_features = hinic_set_features,
909 };
910 
911 static void netdev_features_init(struct net_device *netdev)
912 {
913 	netdev->hw_features = NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM |
914 			      NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6 |
915 			      NETIF_F_RXCSUM | NETIF_F_LRO |
916 			      NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
917 
918 	netdev->vlan_features = netdev->hw_features;
919 
920 	netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
921 }
922 
923 static void hinic_refresh_nic_cfg(struct hinic_dev *nic_dev)
924 {
925 	struct hinic_nic_cfg *nic_cfg = &nic_dev->hwdev->func_to_io.nic_cfg;
926 	struct hinic_pause_config pause_info = {0};
927 	struct hinic_port_cap port_cap = {0};
928 
929 	if (hinic_port_get_cap(nic_dev, &port_cap))
930 		return;
931 
932 	mutex_lock(&nic_cfg->cfg_mutex);
933 	if (nic_cfg->pause_set || !port_cap.autoneg_state) {
934 		nic_cfg->auto_neg = port_cap.autoneg_state;
935 		pause_info.auto_neg = nic_cfg->auto_neg;
936 		pause_info.rx_pause = nic_cfg->rx_pause;
937 		pause_info.tx_pause = nic_cfg->tx_pause;
938 		hinic_set_hw_pause_info(nic_dev->hwdev, &pause_info);
939 	}
940 	mutex_unlock(&nic_cfg->cfg_mutex);
941 }
942 
943 /**
944  * link_status_event_handler - link event handler
945  * @handle: nic device for the handler
946  * @buf_in: input buffer
947  * @in_size: input size
948  * @buf_in: output buffer
949  * @out_size: returned output size
950  *
951  * Return 0 - Success, negative - Failure
952  **/
953 static void link_status_event_handler(void *handle, void *buf_in, u16 in_size,
954 				      void *buf_out, u16 *out_size)
955 {
956 	struct hinic_port_link_status *link_status, *ret_link_status;
957 	struct hinic_dev *nic_dev = handle;
958 
959 	link_status = buf_in;
960 
961 	if (link_status->link == HINIC_LINK_STATE_UP) {
962 		down(&nic_dev->mgmt_lock);
963 
964 		nic_dev->flags |= HINIC_LINK_UP;
965 		nic_dev->cable_unplugged = false;
966 		nic_dev->module_unrecognized = false;
967 
968 		if ((nic_dev->flags & (HINIC_LINK_UP | HINIC_INTF_UP)) ==
969 		    (HINIC_LINK_UP | HINIC_INTF_UP)) {
970 			netif_carrier_on(nic_dev->netdev);
971 			netif_tx_wake_all_queues(nic_dev->netdev);
972 		}
973 
974 		up(&nic_dev->mgmt_lock);
975 
976 		if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
977 			hinic_refresh_nic_cfg(nic_dev);
978 
979 		netif_info(nic_dev, drv, nic_dev->netdev, "HINIC_Link is UP\n");
980 	} else {
981 		down(&nic_dev->mgmt_lock);
982 
983 		nic_dev->flags &= ~HINIC_LINK_UP;
984 
985 		netif_carrier_off(nic_dev->netdev);
986 		netif_tx_disable(nic_dev->netdev);
987 
988 		up(&nic_dev->mgmt_lock);
989 
990 		netif_info(nic_dev, drv, nic_dev->netdev, "HINIC_Link is DOWN\n");
991 	}
992 
993 	if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
994 		hinic_notify_all_vfs_link_changed(nic_dev->hwdev,
995 						  link_status->link);
996 
997 	ret_link_status = buf_out;
998 	ret_link_status->status = 0;
999 
1000 	*out_size = sizeof(*ret_link_status);
1001 }
1002 
1003 static void cable_plug_event(void *handle,
1004 			     void *buf_in, u16 in_size,
1005 			     void *buf_out, u16 *out_size)
1006 {
1007 	struct hinic_cable_plug_event *plug_event = buf_in;
1008 	struct hinic_dev *nic_dev = handle;
1009 
1010 	nic_dev->cable_unplugged = plug_event->plugged ? false : true;
1011 
1012 	*out_size = sizeof(*plug_event);
1013 	plug_event = buf_out;
1014 	plug_event->status = 0;
1015 }
1016 
1017 static void link_err_event(void *handle,
1018 			   void *buf_in, u16 in_size,
1019 			   void *buf_out, u16 *out_size)
1020 {
1021 	struct hinic_link_err_event *link_err = buf_in;
1022 	struct hinic_dev *nic_dev = handle;
1023 
1024 	if (link_err->err_type >= LINK_ERR_NUM)
1025 		netif_info(nic_dev, link, nic_dev->netdev,
1026 			   "Link failed, Unknown error type: 0x%x\n",
1027 			   link_err->err_type);
1028 	else
1029 		nic_dev->module_unrecognized = true;
1030 
1031 	*out_size = sizeof(*link_err);
1032 	link_err = buf_out;
1033 	link_err->status = 0;
1034 }
1035 
1036 static int set_features(struct hinic_dev *nic_dev,
1037 			netdev_features_t pre_features,
1038 			netdev_features_t features, bool force_change)
1039 {
1040 	netdev_features_t changed = force_change ? ~0 : pre_features ^ features;
1041 	u32 csum_en = HINIC_RX_CSUM_OFFLOAD_EN;
1042 	netdev_features_t failed_features = 0;
1043 	int ret = 0;
1044 	int err = 0;
1045 
1046 	if (changed & NETIF_F_TSO) {
1047 		ret = hinic_port_set_tso(nic_dev, (features & NETIF_F_TSO) ?
1048 					 HINIC_TSO_ENABLE : HINIC_TSO_DISABLE);
1049 		if (ret) {
1050 			err = ret;
1051 			failed_features |= NETIF_F_TSO;
1052 		}
1053 	}
1054 
1055 	if (changed & NETIF_F_RXCSUM) {
1056 		ret = hinic_set_rx_csum_offload(nic_dev, csum_en);
1057 		if (ret) {
1058 			err = ret;
1059 			failed_features |= NETIF_F_RXCSUM;
1060 		}
1061 	}
1062 
1063 	if (changed & NETIF_F_LRO) {
1064 		ret = hinic_set_rx_lro_state(nic_dev,
1065 					     !!(features & NETIF_F_LRO),
1066 					     HINIC_LRO_RX_TIMER_DEFAULT,
1067 					     HINIC_LRO_MAX_WQE_NUM_DEFAULT);
1068 		if (ret) {
1069 			err = ret;
1070 			failed_features |= NETIF_F_LRO;
1071 		}
1072 	}
1073 
1074 	if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
1075 		ret = hinic_set_rx_vlan_offload(nic_dev,
1076 						!!(features &
1077 						   NETIF_F_HW_VLAN_CTAG_RX));
1078 		if (ret) {
1079 			err = ret;
1080 			failed_features |= NETIF_F_HW_VLAN_CTAG_RX;
1081 		}
1082 	}
1083 
1084 	if (err) {
1085 		nic_dev->netdev->features = features ^ failed_features;
1086 		return -EIO;
1087 	}
1088 
1089 	return 0;
1090 }
1091 
1092 static int hinic_init_intr_coalesce(struct hinic_dev *nic_dev)
1093 {
1094 	u64 size;
1095 	u16 i;
1096 
1097 	size = sizeof(struct hinic_intr_coal_info) * nic_dev->max_qps;
1098 	nic_dev->rx_intr_coalesce = kzalloc(size, GFP_KERNEL);
1099 	if (!nic_dev->rx_intr_coalesce)
1100 		return -ENOMEM;
1101 	nic_dev->tx_intr_coalesce = kzalloc(size, GFP_KERNEL);
1102 	if (!nic_dev->tx_intr_coalesce) {
1103 		kfree(nic_dev->rx_intr_coalesce);
1104 		return -ENOMEM;
1105 	}
1106 
1107 	for (i = 0; i < nic_dev->max_qps; i++) {
1108 		nic_dev->rx_intr_coalesce[i].pending_limt =
1109 			HINIC_DEAULT_TXRX_MSIX_PENDING_LIMIT;
1110 		nic_dev->rx_intr_coalesce[i].coalesce_timer_cfg =
1111 			HINIC_DEAULT_TXRX_MSIX_COALESC_TIMER_CFG;
1112 		nic_dev->rx_intr_coalesce[i].resend_timer_cfg =
1113 			HINIC_DEAULT_TXRX_MSIX_RESEND_TIMER_CFG;
1114 		nic_dev->tx_intr_coalesce[i].pending_limt =
1115 			HINIC_DEAULT_TXRX_MSIX_PENDING_LIMIT;
1116 		nic_dev->tx_intr_coalesce[i].coalesce_timer_cfg =
1117 			HINIC_DEAULT_TXRX_MSIX_COALESC_TIMER_CFG;
1118 		nic_dev->tx_intr_coalesce[i].resend_timer_cfg =
1119 			HINIC_DEAULT_TXRX_MSIX_RESEND_TIMER_CFG;
1120 	}
1121 
1122 	return 0;
1123 }
1124 
1125 static void hinic_free_intr_coalesce(struct hinic_dev *nic_dev)
1126 {
1127 	kfree(nic_dev->tx_intr_coalesce);
1128 	kfree(nic_dev->rx_intr_coalesce);
1129 }
1130 
1131 /**
1132  * nic_dev_init - Initialize the NIC device
1133  * @pdev: the NIC pci device
1134  *
1135  * Return 0 - Success, negative - Failure
1136  **/
1137 static int nic_dev_init(struct pci_dev *pdev)
1138 {
1139 	struct hinic_rx_mode_work *rx_mode_work;
1140 	struct hinic_txq_stats *tx_stats;
1141 	struct hinic_rxq_stats *rx_stats;
1142 	struct hinic_dev *nic_dev;
1143 	struct net_device *netdev;
1144 	struct hinic_hwdev *hwdev;
1145 	struct devlink *devlink;
1146 	int err, num_qps;
1147 
1148 	devlink = hinic_devlink_alloc();
1149 	if (!devlink) {
1150 		dev_err(&pdev->dev, "Hinic devlink alloc failed\n");
1151 		return -ENOMEM;
1152 	}
1153 
1154 	hwdev = hinic_init_hwdev(pdev, devlink);
1155 	if (IS_ERR(hwdev)) {
1156 		dev_err(&pdev->dev, "Failed to initialize HW device\n");
1157 		hinic_devlink_free(devlink);
1158 		return PTR_ERR(hwdev);
1159 	}
1160 
1161 	num_qps = hinic_hwdev_num_qps(hwdev);
1162 	if (num_qps <= 0) {
1163 		dev_err(&pdev->dev, "Invalid number of QPS\n");
1164 		err = -EINVAL;
1165 		goto err_num_qps;
1166 	}
1167 
1168 	netdev = alloc_etherdev_mq(sizeof(*nic_dev), num_qps);
1169 	if (!netdev) {
1170 		dev_err(&pdev->dev, "Failed to allocate Ethernet device\n");
1171 		err = -ENOMEM;
1172 		goto err_alloc_etherdev;
1173 	}
1174 
1175 	if (!HINIC_IS_VF(hwdev->hwif))
1176 		netdev->netdev_ops = &hinic_netdev_ops;
1177 	else
1178 		netdev->netdev_ops = &hinicvf_netdev_ops;
1179 
1180 	netdev->max_mtu = ETH_MAX_MTU;
1181 
1182 	nic_dev = netdev_priv(netdev);
1183 	nic_dev->netdev = netdev;
1184 	nic_dev->hwdev  = hwdev;
1185 	nic_dev->msg_enable = MSG_ENABLE_DEFAULT;
1186 	nic_dev->flags = 0;
1187 	nic_dev->txqs = NULL;
1188 	nic_dev->rxqs = NULL;
1189 	nic_dev->tx_weight = tx_weight;
1190 	nic_dev->rx_weight = rx_weight;
1191 	nic_dev->sq_depth = HINIC_SQ_DEPTH;
1192 	nic_dev->rq_depth = HINIC_RQ_DEPTH;
1193 	nic_dev->sriov_info.hwdev = hwdev;
1194 	nic_dev->sriov_info.pdev = pdev;
1195 	nic_dev->max_qps = num_qps;
1196 	nic_dev->devlink = devlink;
1197 
1198 	hinic_set_ethtool_ops(netdev);
1199 
1200 	sema_init(&nic_dev->mgmt_lock, 1);
1201 
1202 	tx_stats = &nic_dev->tx_stats;
1203 	rx_stats = &nic_dev->rx_stats;
1204 
1205 	u64_stats_init(&tx_stats->syncp);
1206 	u64_stats_init(&rx_stats->syncp);
1207 
1208 	nic_dev->vlan_bitmap = devm_kzalloc(&pdev->dev,
1209 					    VLAN_BITMAP_SIZE(nic_dev),
1210 					    GFP_KERNEL);
1211 	if (!nic_dev->vlan_bitmap) {
1212 		err = -ENOMEM;
1213 		goto err_vlan_bitmap;
1214 	}
1215 
1216 	nic_dev->workq = create_singlethread_workqueue(HINIC_WQ_NAME);
1217 	if (!nic_dev->workq) {
1218 		err = -ENOMEM;
1219 		goto err_workq;
1220 	}
1221 
1222 	pci_set_drvdata(pdev, netdev);
1223 
1224 	err = hinic_port_get_mac(nic_dev, netdev->dev_addr);
1225 	if (err) {
1226 		dev_err(&pdev->dev, "Failed to get mac address\n");
1227 		goto err_get_mac;
1228 	}
1229 
1230 	if (!is_valid_ether_addr(netdev->dev_addr)) {
1231 		if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) {
1232 			dev_err(&pdev->dev, "Invalid MAC address\n");
1233 			err = -EIO;
1234 			goto err_add_mac;
1235 		}
1236 
1237 		dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
1238 			 netdev->dev_addr);
1239 		eth_hw_addr_random(netdev);
1240 	}
1241 
1242 	err = hinic_port_add_mac(nic_dev, netdev->dev_addr, 0);
1243 	if (err && err != HINIC_PF_SET_VF_ALREADY) {
1244 		dev_err(&pdev->dev, "Failed to add mac\n");
1245 		goto err_add_mac;
1246 	}
1247 
1248 	err = hinic_port_set_mtu(nic_dev, netdev->mtu);
1249 	if (err) {
1250 		dev_err(&pdev->dev, "Failed to set mtu\n");
1251 		goto err_set_mtu;
1252 	}
1253 
1254 	rx_mode_work = &nic_dev->rx_mode_work;
1255 	INIT_WORK(&rx_mode_work->work, set_rx_mode);
1256 
1257 	netdev_features_init(netdev);
1258 
1259 	netif_carrier_off(netdev);
1260 
1261 	hinic_hwdev_cb_register(nic_dev->hwdev, HINIC_MGMT_MSG_CMD_LINK_STATUS,
1262 				nic_dev, link_status_event_handler);
1263 	hinic_hwdev_cb_register(nic_dev->hwdev,
1264 				HINIC_MGMT_MSG_CMD_CABLE_PLUG_EVENT,
1265 				nic_dev, cable_plug_event);
1266 	hinic_hwdev_cb_register(nic_dev->hwdev,
1267 				HINIC_MGMT_MSG_CMD_LINK_ERR_EVENT,
1268 				nic_dev, link_err_event);
1269 
1270 	err = set_features(nic_dev, 0, nic_dev->netdev->features, true);
1271 	if (err)
1272 		goto err_set_features;
1273 
1274 	/* enable pause and disable pfc by default */
1275 	err = hinic_dcb_set_pfc(nic_dev->hwdev, 0, 0);
1276 	if (err)
1277 		goto err_set_pfc;
1278 
1279 	SET_NETDEV_DEV(netdev, &pdev->dev);
1280 
1281 	err = hinic_init_intr_coalesce(nic_dev);
1282 	if (err) {
1283 		dev_err(&pdev->dev, "Failed to init_intr_coalesce\n");
1284 		goto err_init_intr;
1285 	}
1286 
1287 	err = register_netdev(netdev);
1288 	if (err) {
1289 		dev_err(&pdev->dev, "Failed to register netdev\n");
1290 		goto err_reg_netdev;
1291 	}
1292 
1293 	return 0;
1294 
1295 err_reg_netdev:
1296 	hinic_free_intr_coalesce(nic_dev);
1297 err_init_intr:
1298 err_set_pfc:
1299 err_set_features:
1300 	hinic_hwdev_cb_unregister(nic_dev->hwdev,
1301 				  HINIC_MGMT_MSG_CMD_LINK_ERR_EVENT);
1302 	hinic_hwdev_cb_unregister(nic_dev->hwdev,
1303 				  HINIC_MGMT_MSG_CMD_CABLE_PLUG_EVENT);
1304 	hinic_hwdev_cb_unregister(nic_dev->hwdev,
1305 				  HINIC_MGMT_MSG_CMD_LINK_STATUS);
1306 	cancel_work_sync(&rx_mode_work->work);
1307 
1308 err_set_mtu:
1309 	hinic_port_del_mac(nic_dev, netdev->dev_addr, 0);
1310 err_add_mac:
1311 err_get_mac:
1312 	pci_set_drvdata(pdev, NULL);
1313 	destroy_workqueue(nic_dev->workq);
1314 err_workq:
1315 err_vlan_bitmap:
1316 	free_netdev(netdev);
1317 
1318 err_alloc_etherdev:
1319 err_num_qps:
1320 	hinic_free_hwdev(hwdev);
1321 	hinic_devlink_free(devlink);
1322 	return err;
1323 }
1324 
1325 static int hinic_probe(struct pci_dev *pdev,
1326 		       const struct pci_device_id *id)
1327 {
1328 	int err = pci_enable_device(pdev);
1329 
1330 	if (err) {
1331 		dev_err(&pdev->dev, "Failed to enable PCI device\n");
1332 		return err;
1333 	}
1334 
1335 	err = pci_request_regions(pdev, HINIC_DRV_NAME);
1336 	if (err) {
1337 		dev_err(&pdev->dev, "Failed to request PCI regions\n");
1338 		goto err_pci_regions;
1339 	}
1340 
1341 	pci_set_master(pdev);
1342 
1343 	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1344 	if (err) {
1345 		dev_warn(&pdev->dev, "Couldn't set 64-bit DMA mask\n");
1346 		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1347 		if (err) {
1348 			dev_err(&pdev->dev, "Failed to set DMA mask\n");
1349 			goto err_dma_mask;
1350 		}
1351 	}
1352 
1353 	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1354 	if (err) {
1355 		dev_warn(&pdev->dev,
1356 			 "Couldn't set 64-bit consistent DMA mask\n");
1357 		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1358 		if (err) {
1359 			dev_err(&pdev->dev,
1360 				"Failed to set consistent DMA mask\n");
1361 			goto err_dma_consistent_mask;
1362 		}
1363 	}
1364 
1365 	err = nic_dev_init(pdev);
1366 	if (err) {
1367 		dev_err(&pdev->dev, "Failed to initialize NIC device\n");
1368 		goto err_nic_dev_init;
1369 	}
1370 
1371 	dev_info(&pdev->dev, "HiNIC driver - probed\n");
1372 	return 0;
1373 
1374 err_nic_dev_init:
1375 err_dma_consistent_mask:
1376 err_dma_mask:
1377 	pci_release_regions(pdev);
1378 
1379 err_pci_regions:
1380 	pci_disable_device(pdev);
1381 	return err;
1382 }
1383 
1384 #define HINIC_WAIT_SRIOV_CFG_TIMEOUT	15000
1385 
1386 static void wait_sriov_cfg_complete(struct hinic_dev *nic_dev)
1387 {
1388 	struct hinic_sriov_info *sriov_info = &nic_dev->sriov_info;
1389 	u32 loop_cnt = 0;
1390 
1391 	set_bit(HINIC_FUNC_REMOVE, &sriov_info->state);
1392 	usleep_range(9900, 10000);
1393 
1394 	while (loop_cnt < HINIC_WAIT_SRIOV_CFG_TIMEOUT) {
1395 		if (!test_bit(HINIC_SRIOV_ENABLE, &sriov_info->state) &&
1396 		    !test_bit(HINIC_SRIOV_DISABLE, &sriov_info->state))
1397 			return;
1398 
1399 		usleep_range(9900, 10000);
1400 		loop_cnt++;
1401 	}
1402 }
1403 
1404 static void hinic_remove(struct pci_dev *pdev)
1405 {
1406 	struct net_device *netdev = pci_get_drvdata(pdev);
1407 	struct hinic_dev *nic_dev = netdev_priv(netdev);
1408 	struct devlink *devlink = nic_dev->devlink;
1409 	struct hinic_rx_mode_work *rx_mode_work;
1410 
1411 	if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) {
1412 		wait_sriov_cfg_complete(nic_dev);
1413 		hinic_pci_sriov_disable(pdev);
1414 	}
1415 
1416 	unregister_netdev(netdev);
1417 
1418 	hinic_free_intr_coalesce(nic_dev);
1419 
1420 	hinic_port_del_mac(nic_dev, netdev->dev_addr, 0);
1421 
1422 	hinic_hwdev_cb_unregister(nic_dev->hwdev,
1423 				  HINIC_MGMT_MSG_CMD_LINK_ERR_EVENT);
1424 	hinic_hwdev_cb_unregister(nic_dev->hwdev,
1425 				  HINIC_MGMT_MSG_CMD_CABLE_PLUG_EVENT);
1426 	hinic_hwdev_cb_unregister(nic_dev->hwdev,
1427 				  HINIC_MGMT_MSG_CMD_LINK_STATUS);
1428 
1429 	rx_mode_work = &nic_dev->rx_mode_work;
1430 	cancel_work_sync(&rx_mode_work->work);
1431 
1432 	pci_set_drvdata(pdev, NULL);
1433 
1434 	destroy_workqueue(nic_dev->workq);
1435 
1436 	hinic_free_hwdev(nic_dev->hwdev);
1437 
1438 	free_netdev(netdev);
1439 
1440 	hinic_devlink_free(devlink);
1441 
1442 	pci_release_regions(pdev);
1443 	pci_disable_device(pdev);
1444 
1445 	dev_info(&pdev->dev, "HiNIC driver - removed\n");
1446 }
1447 
1448 static void hinic_shutdown(struct pci_dev *pdev)
1449 {
1450 	pci_disable_device(pdev);
1451 }
1452 
1453 static const struct pci_device_id hinic_pci_table[] = {
1454 	{ PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_QUAD_PORT_25GE), 0},
1455 	{ PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_DUAL_PORT_100GE), 0},
1456 	{ PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_DUAL_PORT_100GE_MEZZ), 0},
1457 	{ PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_QUAD_PORT_25GE_MEZZ), 0},
1458 	{ PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_VF), 0},
1459 	{ 0, 0}
1460 };
1461 MODULE_DEVICE_TABLE(pci, hinic_pci_table);
1462 
1463 static struct pci_driver hinic_driver = {
1464 	.name           = HINIC_DRV_NAME,
1465 	.id_table       = hinic_pci_table,
1466 	.probe          = hinic_probe,
1467 	.remove         = hinic_remove,
1468 	.shutdown       = hinic_shutdown,
1469 	.sriov_configure = hinic_pci_sriov_configure,
1470 };
1471 
1472 module_pci_driver(hinic_driver);
1473