1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #include <linux/dma-mapping.h>
5 #include <linux/etherdevice.h>
6 #include <linux/interrupt.h>
7 #include <linux/if_vlan.h>
8 #include <linux/ip.h>
9 #include <linux/ipv6.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 #include <linux/aer.h>
13 #include <linux/skbuff.h>
14 #include <linux/sctp.h>
15 #include <linux/vermagic.h>
16 #include <net/gre.h>
17 #include <net/pkt_cls.h>
18 #include <net/tcp.h>
19 #include <net/vxlan.h>
20 
21 #include "hnae3.h"
22 #include "hns3_enet.h"
23 
24 #define hns3_set_field(origin, shift, val)	((origin) |= ((val) << (shift)))
25 #define hns3_tx_bd_count(S)	DIV_ROUND_UP(S, HNS3_MAX_BD_SIZE)
26 
27 static void hns3_clear_all_ring(struct hnae3_handle *h);
28 static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h);
29 static void hns3_remove_hw_addr(struct net_device *netdev);
30 
31 static const char hns3_driver_name[] = "hns3";
32 const char hns3_driver_version[] = VERMAGIC_STRING;
33 static const char hns3_driver_string[] =
34 			"Hisilicon Ethernet Network Driver for Hip08 Family";
35 static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation.";
36 static struct hnae3_client client;
37 
38 static int debug = -1;
39 module_param(debug, int, 0);
40 MODULE_PARM_DESC(debug, " Network interface message level setting");
41 
42 #define DEFAULT_MSG_LEVEL (NETIF_MSG_PROBE | NETIF_MSG_LINK | \
43 			   NETIF_MSG_IFDOWN | NETIF_MSG_IFUP)
44 
45 /* hns3_pci_tbl - PCI Device ID Table
46  *
47  * Last entry must be all 0s
48  *
49  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
50  *   Class, Class Mask, private data (not used) }
51  */
52 static const struct pci_device_id hns3_pci_tbl[] = {
53 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
54 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
55 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA),
56 	 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
57 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC),
58 	 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
59 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA),
60 	 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
61 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC),
62 	 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
63 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC),
64 	 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
65 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0},
66 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF),
67 	 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
68 	/* required last entry */
69 	{0, }
70 };
71 MODULE_DEVICE_TABLE(pci, hns3_pci_tbl);
72 
73 static irqreturn_t hns3_irq_handle(int irq, void *vector)
74 {
75 	struct hns3_enet_tqp_vector *tqp_vector = vector;
76 
77 	napi_schedule(&tqp_vector->napi);
78 
79 	return IRQ_HANDLED;
80 }
81 
82 /* This callback function is used to set affinity changes to the irq affinity
83  * masks when the irq_set_affinity_notifier function is used.
84  */
85 static void hns3_nic_irq_affinity_notify(struct irq_affinity_notify *notify,
86 					 const cpumask_t *mask)
87 {
88 	struct hns3_enet_tqp_vector *tqp_vectors =
89 		container_of(notify, struct hns3_enet_tqp_vector,
90 			     affinity_notify);
91 
92 	tqp_vectors->affinity_mask = *mask;
93 }
94 
95 static void hns3_nic_irq_affinity_release(struct kref *ref)
96 {
97 }
98 
99 static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv)
100 {
101 	struct hns3_enet_tqp_vector *tqp_vectors;
102 	unsigned int i;
103 
104 	for (i = 0; i < priv->vector_num; i++) {
105 		tqp_vectors = &priv->tqp_vector[i];
106 
107 		if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED)
108 			continue;
109 
110 		/* clear the affinity notifier and affinity mask */
111 		irq_set_affinity_notifier(tqp_vectors->vector_irq, NULL);
112 		irq_set_affinity_hint(tqp_vectors->vector_irq, NULL);
113 
114 		/* release the irq resource */
115 		free_irq(tqp_vectors->vector_irq, tqp_vectors);
116 		tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED;
117 	}
118 }
119 
120 static int hns3_nic_init_irq(struct hns3_nic_priv *priv)
121 {
122 	struct hns3_enet_tqp_vector *tqp_vectors;
123 	int txrx_int_idx = 0;
124 	int rx_int_idx = 0;
125 	int tx_int_idx = 0;
126 	unsigned int i;
127 	int ret;
128 
129 	for (i = 0; i < priv->vector_num; i++) {
130 		tqp_vectors = &priv->tqp_vector[i];
131 
132 		if (tqp_vectors->irq_init_flag == HNS3_VECTOR_INITED)
133 			continue;
134 
135 		if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) {
136 			snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
137 				 "%s-%s-%d", priv->netdev->name, "TxRx",
138 				 txrx_int_idx++);
139 			txrx_int_idx++;
140 		} else if (tqp_vectors->rx_group.ring) {
141 			snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
142 				 "%s-%s-%d", priv->netdev->name, "Rx",
143 				 rx_int_idx++);
144 		} else if (tqp_vectors->tx_group.ring) {
145 			snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
146 				 "%s-%s-%d", priv->netdev->name, "Tx",
147 				 tx_int_idx++);
148 		} else {
149 			/* Skip this unused q_vector */
150 			continue;
151 		}
152 
153 		tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0';
154 
155 		ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0,
156 				  tqp_vectors->name,
157 				       tqp_vectors);
158 		if (ret) {
159 			netdev_err(priv->netdev, "request irq(%d) fail\n",
160 				   tqp_vectors->vector_irq);
161 			return ret;
162 		}
163 
164 		tqp_vectors->affinity_notify.notify =
165 					hns3_nic_irq_affinity_notify;
166 		tqp_vectors->affinity_notify.release =
167 					hns3_nic_irq_affinity_release;
168 		irq_set_affinity_notifier(tqp_vectors->vector_irq,
169 					  &tqp_vectors->affinity_notify);
170 		irq_set_affinity_hint(tqp_vectors->vector_irq,
171 				      &tqp_vectors->affinity_mask);
172 
173 		tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED;
174 	}
175 
176 	return 0;
177 }
178 
179 static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector,
180 				 u32 mask_en)
181 {
182 	writel(mask_en, tqp_vector->mask_addr);
183 }
184 
185 static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector)
186 {
187 	napi_enable(&tqp_vector->napi);
188 
189 	/* enable vector */
190 	hns3_mask_vector_irq(tqp_vector, 1);
191 }
192 
193 static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector)
194 {
195 	/* disable vector */
196 	hns3_mask_vector_irq(tqp_vector, 0);
197 
198 	disable_irq(tqp_vector->vector_irq);
199 	napi_disable(&tqp_vector->napi);
200 }
201 
202 void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
203 				 u32 rl_value)
204 {
205 	u32 rl_reg = hns3_rl_usec_to_reg(rl_value);
206 
207 	/* this defines the configuration for RL (Interrupt Rate Limiter).
208 	 * Rl defines rate of interrupts i.e. number of interrupts-per-second
209 	 * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
210 	 */
211 
212 	if (rl_reg > 0 && !tqp_vector->tx_group.coal.gl_adapt_enable &&
213 	    !tqp_vector->rx_group.coal.gl_adapt_enable)
214 		/* According to the hardware, the range of rl_reg is
215 		 * 0-59 and the unit is 4.
216 		 */
217 		rl_reg |=  HNS3_INT_RL_ENABLE_MASK;
218 
219 	writel(rl_reg, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET);
220 }
221 
222 void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector,
223 				    u32 gl_value)
224 {
225 	u32 rx_gl_reg = hns3_gl_usec_to_reg(gl_value);
226 
227 	writel(rx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET);
228 }
229 
230 void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector,
231 				    u32 gl_value)
232 {
233 	u32 tx_gl_reg = hns3_gl_usec_to_reg(gl_value);
234 
235 	writel(tx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET);
236 }
237 
238 static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector,
239 				   struct hns3_nic_priv *priv)
240 {
241 	/* initialize the configuration for interrupt coalescing.
242 	 * 1. GL (Interrupt Gap Limiter)
243 	 * 2. RL (Interrupt Rate Limiter)
244 	 */
245 
246 	/* Default: enable interrupt coalescing self-adaptive and GL */
247 	tqp_vector->tx_group.coal.gl_adapt_enable = 1;
248 	tqp_vector->rx_group.coal.gl_adapt_enable = 1;
249 
250 	tqp_vector->tx_group.coal.int_gl = HNS3_INT_GL_50K;
251 	tqp_vector->rx_group.coal.int_gl = HNS3_INT_GL_50K;
252 
253 	tqp_vector->rx_group.coal.flow_level = HNS3_FLOW_LOW;
254 	tqp_vector->tx_group.coal.flow_level = HNS3_FLOW_LOW;
255 }
256 
257 static void hns3_vector_gl_rl_init_hw(struct hns3_enet_tqp_vector *tqp_vector,
258 				      struct hns3_nic_priv *priv)
259 {
260 	struct hnae3_handle *h = priv->ae_handle;
261 
262 	hns3_set_vector_coalesce_tx_gl(tqp_vector,
263 				       tqp_vector->tx_group.coal.int_gl);
264 	hns3_set_vector_coalesce_rx_gl(tqp_vector,
265 				       tqp_vector->rx_group.coal.int_gl);
266 	hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting);
267 }
268 
269 static int hns3_nic_set_real_num_queue(struct net_device *netdev)
270 {
271 	struct hnae3_handle *h = hns3_get_handle(netdev);
272 	struct hnae3_knic_private_info *kinfo = &h->kinfo;
273 	unsigned int queue_size = kinfo->rss_size * kinfo->num_tc;
274 	int i, ret;
275 
276 	if (kinfo->num_tc <= 1) {
277 		netdev_reset_tc(netdev);
278 	} else {
279 		ret = netdev_set_num_tc(netdev, kinfo->num_tc);
280 		if (ret) {
281 			netdev_err(netdev,
282 				   "netdev_set_num_tc fail, ret=%d!\n", ret);
283 			return ret;
284 		}
285 
286 		for (i = 0; i < HNAE3_MAX_TC; i++) {
287 			if (!kinfo->tc_info[i].enable)
288 				continue;
289 
290 			netdev_set_tc_queue(netdev,
291 					    kinfo->tc_info[i].tc,
292 					    kinfo->tc_info[i].tqp_count,
293 					    kinfo->tc_info[i].tqp_offset);
294 		}
295 	}
296 
297 	ret = netif_set_real_num_tx_queues(netdev, queue_size);
298 	if (ret) {
299 		netdev_err(netdev,
300 			   "netif_set_real_num_tx_queues fail, ret=%d!\n",
301 			   ret);
302 		return ret;
303 	}
304 
305 	ret = netif_set_real_num_rx_queues(netdev, queue_size);
306 	if (ret) {
307 		netdev_err(netdev,
308 			   "netif_set_real_num_rx_queues fail, ret=%d!\n", ret);
309 		return ret;
310 	}
311 
312 	return 0;
313 }
314 
315 static u16 hns3_get_max_available_channels(struct hnae3_handle *h)
316 {
317 	u16 alloc_tqps, max_rss_size, rss_size;
318 
319 	h->ae_algo->ops->get_tqps_and_rss_info(h, &alloc_tqps, &max_rss_size);
320 	rss_size = alloc_tqps / h->kinfo.num_tc;
321 
322 	return min_t(u16, rss_size, max_rss_size);
323 }
324 
325 static void hns3_tqp_enable(struct hnae3_queue *tqp)
326 {
327 	u32 rcb_reg;
328 
329 	rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG);
330 	rcb_reg |= BIT(HNS3_RING_EN_B);
331 	hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg);
332 }
333 
334 static void hns3_tqp_disable(struct hnae3_queue *tqp)
335 {
336 	u32 rcb_reg;
337 
338 	rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG);
339 	rcb_reg &= ~BIT(HNS3_RING_EN_B);
340 	hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg);
341 }
342 
343 static int hns3_nic_net_up(struct net_device *netdev)
344 {
345 	struct hns3_nic_priv *priv = netdev_priv(netdev);
346 	struct hnae3_handle *h = priv->ae_handle;
347 	int i, j;
348 	int ret;
349 
350 	ret = hns3_nic_reset_all_ring(h);
351 	if (ret)
352 		return ret;
353 
354 	/* get irq resource for all vectors */
355 	ret = hns3_nic_init_irq(priv);
356 	if (ret) {
357 		netdev_err(netdev, "hns init irq failed! ret=%d\n", ret);
358 		return ret;
359 	}
360 
361 	clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
362 
363 	/* enable the vectors */
364 	for (i = 0; i < priv->vector_num; i++)
365 		hns3_vector_enable(&priv->tqp_vector[i]);
366 
367 	/* enable rcb */
368 	for (j = 0; j < h->kinfo.num_tqps; j++)
369 		hns3_tqp_enable(h->kinfo.tqp[j]);
370 
371 	/* start the ae_dev */
372 	ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0;
373 	if (ret)
374 		goto out_start_err;
375 
376 	return 0;
377 
378 out_start_err:
379 	set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
380 	while (j--)
381 		hns3_tqp_disable(h->kinfo.tqp[j]);
382 
383 	for (j = i - 1; j >= 0; j--)
384 		hns3_vector_disable(&priv->tqp_vector[j]);
385 
386 	hns3_nic_uninit_irq(priv);
387 
388 	return ret;
389 }
390 
391 static void hns3_config_xps(struct hns3_nic_priv *priv)
392 {
393 	int i;
394 
395 	for (i = 0; i < priv->vector_num; i++) {
396 		struct hns3_enet_tqp_vector *tqp_vector = &priv->tqp_vector[i];
397 		struct hns3_enet_ring *ring = tqp_vector->tx_group.ring;
398 
399 		while (ring) {
400 			int ret;
401 
402 			ret = netif_set_xps_queue(priv->netdev,
403 						  &tqp_vector->affinity_mask,
404 						  ring->tqp->tqp_index);
405 			if (ret)
406 				netdev_warn(priv->netdev,
407 					    "set xps queue failed: %d", ret);
408 
409 			ring = ring->next;
410 		}
411 	}
412 }
413 
414 static int hns3_nic_net_open(struct net_device *netdev)
415 {
416 	struct hns3_nic_priv *priv = netdev_priv(netdev);
417 	struct hnae3_handle *h = hns3_get_handle(netdev);
418 	struct hnae3_knic_private_info *kinfo;
419 	int i, ret;
420 
421 	if (hns3_nic_resetting(netdev))
422 		return -EBUSY;
423 
424 	netif_carrier_off(netdev);
425 
426 	ret = hns3_nic_set_real_num_queue(netdev);
427 	if (ret)
428 		return ret;
429 
430 	ret = hns3_nic_net_up(netdev);
431 	if (ret) {
432 		netdev_err(netdev,
433 			   "hns net up fail, ret=%d!\n", ret);
434 		return ret;
435 	}
436 
437 	kinfo = &h->kinfo;
438 	for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
439 		netdev_set_prio_tc_map(netdev, i,
440 				       kinfo->prio_tc[i]);
441 	}
442 
443 	if (h->ae_algo->ops->set_timer_task)
444 		h->ae_algo->ops->set_timer_task(priv->ae_handle, true);
445 
446 	hns3_config_xps(priv);
447 	return 0;
448 }
449 
450 static void hns3_nic_net_down(struct net_device *netdev)
451 {
452 	struct hns3_nic_priv *priv = netdev_priv(netdev);
453 	struct hnae3_handle *h = hns3_get_handle(netdev);
454 	const struct hnae3_ae_ops *ops;
455 	int i;
456 
457 	/* disable vectors */
458 	for (i = 0; i < priv->vector_num; i++)
459 		hns3_vector_disable(&priv->tqp_vector[i]);
460 
461 	/* disable rcb */
462 	for (i = 0; i < h->kinfo.num_tqps; i++)
463 		hns3_tqp_disable(h->kinfo.tqp[i]);
464 
465 	/* stop ae_dev */
466 	ops = priv->ae_handle->ae_algo->ops;
467 	if (ops->stop)
468 		ops->stop(priv->ae_handle);
469 
470 	/* free irq resources */
471 	hns3_nic_uninit_irq(priv);
472 
473 	hns3_clear_all_ring(priv->ae_handle);
474 }
475 
476 static int hns3_nic_net_stop(struct net_device *netdev)
477 {
478 	struct hns3_nic_priv *priv = netdev_priv(netdev);
479 	struct hnae3_handle *h = hns3_get_handle(netdev);
480 
481 	if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
482 		return 0;
483 
484 	if (h->ae_algo->ops->set_timer_task)
485 		h->ae_algo->ops->set_timer_task(priv->ae_handle, false);
486 
487 	netif_tx_stop_all_queues(netdev);
488 	netif_carrier_off(netdev);
489 
490 	hns3_nic_net_down(netdev);
491 
492 	return 0;
493 }
494 
495 static int hns3_nic_uc_sync(struct net_device *netdev,
496 			    const unsigned char *addr)
497 {
498 	struct hnae3_handle *h = hns3_get_handle(netdev);
499 
500 	if (h->ae_algo->ops->add_uc_addr)
501 		return h->ae_algo->ops->add_uc_addr(h, addr);
502 
503 	return 0;
504 }
505 
506 static int hns3_nic_uc_unsync(struct net_device *netdev,
507 			      const unsigned char *addr)
508 {
509 	struct hnae3_handle *h = hns3_get_handle(netdev);
510 
511 	if (h->ae_algo->ops->rm_uc_addr)
512 		return h->ae_algo->ops->rm_uc_addr(h, addr);
513 
514 	return 0;
515 }
516 
517 static int hns3_nic_mc_sync(struct net_device *netdev,
518 			    const unsigned char *addr)
519 {
520 	struct hnae3_handle *h = hns3_get_handle(netdev);
521 
522 	if (h->ae_algo->ops->add_mc_addr)
523 		return h->ae_algo->ops->add_mc_addr(h, addr);
524 
525 	return 0;
526 }
527 
528 static int hns3_nic_mc_unsync(struct net_device *netdev,
529 			      const unsigned char *addr)
530 {
531 	struct hnae3_handle *h = hns3_get_handle(netdev);
532 
533 	if (h->ae_algo->ops->rm_mc_addr)
534 		return h->ae_algo->ops->rm_mc_addr(h, addr);
535 
536 	return 0;
537 }
538 
539 static u8 hns3_get_netdev_flags(struct net_device *netdev)
540 {
541 	u8 flags = 0;
542 
543 	if (netdev->flags & IFF_PROMISC) {
544 		flags = HNAE3_USER_UPE | HNAE3_USER_MPE | HNAE3_BPE;
545 	} else {
546 		flags |= HNAE3_VLAN_FLTR;
547 		if (netdev->flags & IFF_ALLMULTI)
548 			flags |= HNAE3_USER_MPE;
549 	}
550 
551 	return flags;
552 }
553 
554 static void hns3_nic_set_rx_mode(struct net_device *netdev)
555 {
556 	struct hnae3_handle *h = hns3_get_handle(netdev);
557 	u8 new_flags;
558 	int ret;
559 
560 	new_flags = hns3_get_netdev_flags(netdev);
561 
562 	ret = __dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync);
563 	if (ret) {
564 		netdev_err(netdev, "sync uc address fail\n");
565 		if (ret == -ENOSPC)
566 			new_flags |= HNAE3_OVERFLOW_UPE;
567 	}
568 
569 	if (netdev->flags & IFF_MULTICAST) {
570 		ret = __dev_mc_sync(netdev, hns3_nic_mc_sync,
571 				    hns3_nic_mc_unsync);
572 		if (ret) {
573 			netdev_err(netdev, "sync mc address fail\n");
574 			if (ret == -ENOSPC)
575 				new_flags |= HNAE3_OVERFLOW_MPE;
576 		}
577 	}
578 
579 	/* User mode Promisc mode enable and vlan filtering is disabled to
580 	 * let all packets in. MAC-VLAN Table overflow Promisc enabled and
581 	 * vlan fitering is enabled
582 	 */
583 	hns3_enable_vlan_filter(netdev, new_flags & HNAE3_VLAN_FLTR);
584 	h->netdev_flags = new_flags;
585 	hns3_update_promisc_mode(netdev, new_flags);
586 }
587 
588 int hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags)
589 {
590 	struct hns3_nic_priv *priv = netdev_priv(netdev);
591 	struct hnae3_handle *h = priv->ae_handle;
592 
593 	if (h->ae_algo->ops->set_promisc_mode) {
594 		return h->ae_algo->ops->set_promisc_mode(h,
595 						promisc_flags & HNAE3_UPE,
596 						promisc_flags & HNAE3_MPE);
597 	}
598 
599 	return 0;
600 }
601 
602 void hns3_enable_vlan_filter(struct net_device *netdev, bool enable)
603 {
604 	struct hns3_nic_priv *priv = netdev_priv(netdev);
605 	struct hnae3_handle *h = priv->ae_handle;
606 	bool last_state;
607 
608 	if (h->pdev->revision >= 0x21 && h->ae_algo->ops->enable_vlan_filter) {
609 		last_state = h->netdev_flags & HNAE3_VLAN_FLTR ? true : false;
610 		if (enable != last_state) {
611 			netdev_info(netdev,
612 				    "%s vlan filter\n",
613 				    enable ? "enable" : "disable");
614 			h->ae_algo->ops->enable_vlan_filter(h, enable);
615 		}
616 	}
617 }
618 
619 static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
620 			u16 *mss, u32 *type_cs_vlan_tso)
621 {
622 	u32 l4_offset, hdr_len;
623 	union l3_hdr_info l3;
624 	union l4_hdr_info l4;
625 	u32 l4_paylen;
626 	int ret;
627 
628 	if (!skb_is_gso(skb))
629 		return 0;
630 
631 	ret = skb_cow_head(skb, 0);
632 	if (unlikely(ret))
633 		return ret;
634 
635 	l3.hdr = skb_network_header(skb);
636 	l4.hdr = skb_transport_header(skb);
637 
638 	/* Software should clear the IPv4's checksum field when tso is
639 	 * needed.
640 	 */
641 	if (l3.v4->version == 4)
642 		l3.v4->check = 0;
643 
644 	/* tunnel packet.*/
645 	if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
646 					 SKB_GSO_GRE_CSUM |
647 					 SKB_GSO_UDP_TUNNEL |
648 					 SKB_GSO_UDP_TUNNEL_CSUM)) {
649 		if ((!(skb_shinfo(skb)->gso_type &
650 		    SKB_GSO_PARTIAL)) &&
651 		    (skb_shinfo(skb)->gso_type &
652 		    SKB_GSO_UDP_TUNNEL_CSUM)) {
653 			/* Software should clear the udp's checksum
654 			 * field when tso is needed.
655 			 */
656 			l4.udp->check = 0;
657 		}
658 		/* reset l3&l4 pointers from outer to inner headers */
659 		l3.hdr = skb_inner_network_header(skb);
660 		l4.hdr = skb_inner_transport_header(skb);
661 
662 		/* Software should clear the IPv4's checksum field when
663 		 * tso is needed.
664 		 */
665 		if (l3.v4->version == 4)
666 			l3.v4->check = 0;
667 	}
668 
669 	/* normal or tunnel packet*/
670 	l4_offset = l4.hdr - skb->data;
671 	hdr_len = (l4.tcp->doff << 2) + l4_offset;
672 
673 	/* remove payload length from inner pseudo checksum when tso*/
674 	l4_paylen = skb->len - l4_offset;
675 	csum_replace_by_diff(&l4.tcp->check,
676 			     (__force __wsum)htonl(l4_paylen));
677 
678 	/* find the txbd field values */
679 	*paylen = skb->len - hdr_len;
680 	hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_TSO_B, 1);
681 
682 	/* get MSS for TSO */
683 	*mss = skb_shinfo(skb)->gso_size;
684 
685 	return 0;
686 }
687 
688 static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
689 				u8 *il4_proto)
690 {
691 	union l3_hdr_info l3;
692 	unsigned char *l4_hdr;
693 	unsigned char *exthdr;
694 	u8 l4_proto_tmp;
695 	__be16 frag_off;
696 
697 	/* find outer header point */
698 	l3.hdr = skb_network_header(skb);
699 	l4_hdr = skb_transport_header(skb);
700 
701 	if (skb->protocol == htons(ETH_P_IPV6)) {
702 		exthdr = l3.hdr + sizeof(*l3.v6);
703 		l4_proto_tmp = l3.v6->nexthdr;
704 		if (l4_hdr != exthdr)
705 			ipv6_skip_exthdr(skb, exthdr - skb->data,
706 					 &l4_proto_tmp, &frag_off);
707 	} else if (skb->protocol == htons(ETH_P_IP)) {
708 		l4_proto_tmp = l3.v4->protocol;
709 	} else {
710 		return -EINVAL;
711 	}
712 
713 	*ol4_proto = l4_proto_tmp;
714 
715 	/* tunnel packet */
716 	if (!skb->encapsulation) {
717 		*il4_proto = 0;
718 		return 0;
719 	}
720 
721 	/* find inner header point */
722 	l3.hdr = skb_inner_network_header(skb);
723 	l4_hdr = skb_inner_transport_header(skb);
724 
725 	if (l3.v6->version == 6) {
726 		exthdr = l3.hdr + sizeof(*l3.v6);
727 		l4_proto_tmp = l3.v6->nexthdr;
728 		if (l4_hdr != exthdr)
729 			ipv6_skip_exthdr(skb, exthdr - skb->data,
730 					 &l4_proto_tmp, &frag_off);
731 	} else if (l3.v4->version == 4) {
732 		l4_proto_tmp = l3.v4->protocol;
733 	}
734 
735 	*il4_proto = l4_proto_tmp;
736 
737 	return 0;
738 }
739 
740 static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
741 				u8 il4_proto, u32 *type_cs_vlan_tso,
742 				u32 *ol_type_vlan_len_msec)
743 {
744 	union l3_hdr_info l3;
745 	union l4_hdr_info l4;
746 	unsigned char *l2_hdr;
747 	u8 l4_proto = ol4_proto;
748 	u32 ol2_len;
749 	u32 ol3_len;
750 	u32 ol4_len;
751 	u32 l2_len;
752 	u32 l3_len;
753 
754 	l3.hdr = skb_network_header(skb);
755 	l4.hdr = skb_transport_header(skb);
756 
757 	/* compute L2 header size for normal packet, defined in 2 Bytes */
758 	l2_len = l3.hdr - skb->data;
759 	hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, l2_len >> 1);
760 
761 	/* tunnel packet*/
762 	if (skb->encapsulation) {
763 		/* compute OL2 header size, defined in 2 Bytes */
764 		ol2_len = l2_len;
765 		hns3_set_field(*ol_type_vlan_len_msec,
766 			       HNS3_TXD_L2LEN_S, ol2_len >> 1);
767 
768 		/* compute OL3 header size, defined in 4 Bytes */
769 		ol3_len = l4.hdr - l3.hdr;
770 		hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_S,
771 			       ol3_len >> 2);
772 
773 		/* MAC in UDP, MAC in GRE (0x6558)*/
774 		if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) {
775 			/* switch MAC header ptr from outer to inner header.*/
776 			l2_hdr = skb_inner_mac_header(skb);
777 
778 			/* compute OL4 header size, defined in 4 Bytes. */
779 			ol4_len = l2_hdr - l4.hdr;
780 			hns3_set_field(*ol_type_vlan_len_msec,
781 				       HNS3_TXD_L4LEN_S, ol4_len >> 2);
782 
783 			/* switch IP header ptr from outer to inner header */
784 			l3.hdr = skb_inner_network_header(skb);
785 
786 			/* compute inner l2 header size, defined in 2 Bytes. */
787 			l2_len = l3.hdr - l2_hdr;
788 			hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S,
789 				       l2_len >> 1);
790 		} else {
791 			/* skb packet types not supported by hardware,
792 			 * txbd len fild doesn't be filled.
793 			 */
794 			return;
795 		}
796 
797 		/* switch L4 header pointer from outer to inner */
798 		l4.hdr = skb_inner_transport_header(skb);
799 
800 		l4_proto = il4_proto;
801 	}
802 
803 	/* compute inner(/normal) L3 header size, defined in 4 Bytes */
804 	l3_len = l4.hdr - l3.hdr;
805 	hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_S, l3_len >> 2);
806 
807 	/* compute inner(/normal) L4 header size, defined in 4 Bytes */
808 	switch (l4_proto) {
809 	case IPPROTO_TCP:
810 		hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
811 			       l4.tcp->doff);
812 		break;
813 	case IPPROTO_SCTP:
814 		hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
815 			       (sizeof(struct sctphdr) >> 2));
816 		break;
817 	case IPPROTO_UDP:
818 		hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
819 			       (sizeof(struct udphdr) >> 2));
820 		break;
821 	default:
822 		/* skb packet types not supported by hardware,
823 		 * txbd len fild doesn't be filled.
824 		 */
825 		return;
826 	}
827 }
828 
829 /* when skb->encapsulation is 0, skb->ip_summed is CHECKSUM_PARTIAL
830  * and it is udp packet, which has a dest port as the IANA assigned.
831  * the hardware is expected to do the checksum offload, but the
832  * hardware will not do the checksum offload when udp dest port is
833  * 4789.
834  */
835 static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
836 {
837 	union l4_hdr_info l4;
838 
839 	l4.hdr = skb_transport_header(skb);
840 
841 	if (!(!skb->encapsulation &&
842 	      l4.udp->dest == htons(IANA_VXLAN_UDP_PORT)))
843 		return false;
844 
845 	skb_checksum_help(skb);
846 
847 	return true;
848 }
849 
850 static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
851 				   u8 il4_proto, u32 *type_cs_vlan_tso,
852 				   u32 *ol_type_vlan_len_msec)
853 {
854 	union l3_hdr_info l3;
855 	u32 l4_proto = ol4_proto;
856 
857 	l3.hdr = skb_network_header(skb);
858 
859 	/* define OL3 type and tunnel type(OL4).*/
860 	if (skb->encapsulation) {
861 		/* define outer network header type.*/
862 		if (skb->protocol == htons(ETH_P_IP)) {
863 			if (skb_is_gso(skb))
864 				hns3_set_field(*ol_type_vlan_len_msec,
865 					       HNS3_TXD_OL3T_S,
866 					       HNS3_OL3T_IPV4_CSUM);
867 			else
868 				hns3_set_field(*ol_type_vlan_len_msec,
869 					       HNS3_TXD_OL3T_S,
870 					       HNS3_OL3T_IPV4_NO_CSUM);
871 
872 		} else if (skb->protocol == htons(ETH_P_IPV6)) {
873 			hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_S,
874 				       HNS3_OL3T_IPV6);
875 		}
876 
877 		/* define tunnel type(OL4).*/
878 		switch (l4_proto) {
879 		case IPPROTO_UDP:
880 			hns3_set_field(*ol_type_vlan_len_msec,
881 				       HNS3_TXD_TUNTYPE_S,
882 				       HNS3_TUN_MAC_IN_UDP);
883 			break;
884 		case IPPROTO_GRE:
885 			hns3_set_field(*ol_type_vlan_len_msec,
886 				       HNS3_TXD_TUNTYPE_S,
887 				       HNS3_TUN_NVGRE);
888 			break;
889 		default:
890 			/* drop the skb tunnel packet if hardware don't support,
891 			 * because hardware can't calculate csum when TSO.
892 			 */
893 			if (skb_is_gso(skb))
894 				return -EDOM;
895 
896 			/* the stack computes the IP header already,
897 			 * driver calculate l4 checksum when not TSO.
898 			 */
899 			skb_checksum_help(skb);
900 			return 0;
901 		}
902 
903 		l3.hdr = skb_inner_network_header(skb);
904 		l4_proto = il4_proto;
905 	}
906 
907 	if (l3.v4->version == 4) {
908 		hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S,
909 			       HNS3_L3T_IPV4);
910 
911 		/* the stack computes the IP header already, the only time we
912 		 * need the hardware to recompute it is in the case of TSO.
913 		 */
914 		if (skb_is_gso(skb))
915 			hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1);
916 	} else if (l3.v6->version == 6) {
917 		hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S,
918 			       HNS3_L3T_IPV6);
919 	}
920 
921 	switch (l4_proto) {
922 	case IPPROTO_TCP:
923 		hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
924 		hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
925 			       HNS3_L4T_TCP);
926 		break;
927 	case IPPROTO_UDP:
928 		if (hns3_tunnel_csum_bug(skb))
929 			break;
930 
931 		hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
932 		hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
933 			       HNS3_L4T_UDP);
934 		break;
935 	case IPPROTO_SCTP:
936 		hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
937 		hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
938 			       HNS3_L4T_SCTP);
939 		break;
940 	default:
941 		/* drop the skb tunnel packet if hardware don't support,
942 		 * because hardware can't calculate csum when TSO.
943 		 */
944 		if (skb_is_gso(skb))
945 			return -EDOM;
946 
947 		/* the stack computes the IP header already,
948 		 * driver calculate l4 checksum when not TSO.
949 		 */
950 		skb_checksum_help(skb);
951 		return 0;
952 	}
953 
954 	return 0;
955 }
956 
957 static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
958 {
959 	/* Config bd buffer end */
960 	hns3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end);
961 	hns3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1);
962 }
963 
964 static int hns3_fill_desc_vtags(struct sk_buff *skb,
965 				struct hns3_enet_ring *tx_ring,
966 				u32 *inner_vlan_flag,
967 				u32 *out_vlan_flag,
968 				u16 *inner_vtag,
969 				u16 *out_vtag)
970 {
971 #define HNS3_TX_VLAN_PRIO_SHIFT 13
972 
973 	struct hnae3_handle *handle = tx_ring->tqp->handle;
974 
975 	/* Since HW limitation, if port based insert VLAN enabled, only one VLAN
976 	 * header is allowed in skb, otherwise it will cause RAS error.
977 	 */
978 	if (unlikely(skb_vlan_tagged_multi(skb) &&
979 		     handle->port_base_vlan_state ==
980 		     HNAE3_PORT_BASE_VLAN_ENABLE))
981 		return -EINVAL;
982 
983 	if (skb->protocol == htons(ETH_P_8021Q) &&
984 	    !(tx_ring->tqp->handle->kinfo.netdev->features &
985 	    NETIF_F_HW_VLAN_CTAG_TX)) {
986 		/* When HW VLAN acceleration is turned off, and the stack
987 		 * sets the protocol to 802.1q, the driver just need to
988 		 * set the protocol to the encapsulated ethertype.
989 		 */
990 		skb->protocol = vlan_get_protocol(skb);
991 		return 0;
992 	}
993 
994 	if (skb_vlan_tag_present(skb)) {
995 		u16 vlan_tag;
996 
997 		vlan_tag = skb_vlan_tag_get(skb);
998 		vlan_tag |= (skb->priority & 0x7) << HNS3_TX_VLAN_PRIO_SHIFT;
999 
1000 		/* Based on hw strategy, use out_vtag in two layer tag case,
1001 		 * and use inner_vtag in one tag case.
1002 		 */
1003 		if (skb->protocol == htons(ETH_P_8021Q)) {
1004 			if (handle->port_base_vlan_state ==
1005 			    HNAE3_PORT_BASE_VLAN_DISABLE){
1006 				hns3_set_field(*out_vlan_flag,
1007 					       HNS3_TXD_OVLAN_B, 1);
1008 				*out_vtag = vlan_tag;
1009 			} else {
1010 				hns3_set_field(*inner_vlan_flag,
1011 					       HNS3_TXD_VLAN_B, 1);
1012 				*inner_vtag = vlan_tag;
1013 			}
1014 		} else {
1015 			hns3_set_field(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1);
1016 			*inner_vtag = vlan_tag;
1017 		}
1018 	} else if (skb->protocol == htons(ETH_P_8021Q)) {
1019 		struct vlan_ethhdr *vhdr;
1020 		int rc;
1021 
1022 		rc = skb_cow_head(skb, 0);
1023 		if (unlikely(rc < 0))
1024 			return rc;
1025 		vhdr = (struct vlan_ethhdr *)skb->data;
1026 		vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority & 0x7)
1027 					<< HNS3_TX_VLAN_PRIO_SHIFT);
1028 	}
1029 
1030 	skb->protocol = vlan_get_protocol(skb);
1031 	return 0;
1032 }
1033 
1034 static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
1035 			  int size, int frag_end, enum hns_desc_type type)
1036 {
1037 	struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
1038 	struct hns3_desc *desc = &ring->desc[ring->next_to_use];
1039 	struct device *dev = ring_to_dev(ring);
1040 	struct skb_frag_struct *frag;
1041 	unsigned int frag_buf_num;
1042 	int k, sizeoflast;
1043 	dma_addr_t dma;
1044 
1045 	if (type == DESC_TYPE_SKB) {
1046 		struct sk_buff *skb = (struct sk_buff *)priv;
1047 		u32 ol_type_vlan_len_msec = 0;
1048 		u32 type_cs_vlan_tso = 0;
1049 		u32 paylen = skb->len;
1050 		u16 inner_vtag = 0;
1051 		u16 out_vtag = 0;
1052 		u16 mss = 0;
1053 		int ret;
1054 
1055 		ret = hns3_fill_desc_vtags(skb, ring, &type_cs_vlan_tso,
1056 					   &ol_type_vlan_len_msec,
1057 					   &inner_vtag, &out_vtag);
1058 		if (unlikely(ret))
1059 			return ret;
1060 
1061 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
1062 			u8 ol4_proto, il4_proto;
1063 
1064 			skb_reset_mac_len(skb);
1065 
1066 			ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
1067 			if (unlikely(ret))
1068 				return ret;
1069 			hns3_set_l2l3l4_len(skb, ol4_proto, il4_proto,
1070 					    &type_cs_vlan_tso,
1071 					    &ol_type_vlan_len_msec);
1072 			ret = hns3_set_l3l4_type_csum(skb, ol4_proto, il4_proto,
1073 						      &type_cs_vlan_tso,
1074 						      &ol_type_vlan_len_msec);
1075 			if (unlikely(ret))
1076 				return ret;
1077 
1078 			ret = hns3_set_tso(skb, &paylen, &mss,
1079 					   &type_cs_vlan_tso);
1080 			if (unlikely(ret))
1081 				return ret;
1082 		}
1083 
1084 		/* Set txbd */
1085 		desc->tx.ol_type_vlan_len_msec =
1086 			cpu_to_le32(ol_type_vlan_len_msec);
1087 		desc->tx.type_cs_vlan_tso_len =
1088 			cpu_to_le32(type_cs_vlan_tso);
1089 		desc->tx.paylen = cpu_to_le32(paylen);
1090 		desc->tx.mss = cpu_to_le16(mss);
1091 		desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
1092 		desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag);
1093 
1094 		dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
1095 	} else {
1096 		frag = (struct skb_frag_struct *)priv;
1097 		dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
1098 	}
1099 
1100 	if (unlikely(dma_mapping_error(ring->dev, dma))) {
1101 		ring->stats.sw_err_cnt++;
1102 		return -ENOMEM;
1103 	}
1104 
1105 	desc_cb->length = size;
1106 
1107 	if (likely(size <= HNS3_MAX_BD_SIZE)) {
1108 		u16 bdtp_fe_sc_vld_ra_ri = 0;
1109 
1110 		desc_cb->priv = priv;
1111 		desc_cb->dma = dma;
1112 		desc_cb->type = type;
1113 		desc->addr = cpu_to_le64(dma);
1114 		desc->tx.send_size = cpu_to_le16(size);
1115 		hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end);
1116 		desc->tx.bdtp_fe_sc_vld_ra_ri =
1117 			cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
1118 
1119 		ring_ptr_move_fw(ring, next_to_use);
1120 		return 0;
1121 	}
1122 
1123 	frag_buf_num = hns3_tx_bd_count(size);
1124 	sizeoflast = size & HNS3_TX_LAST_SIZE_M;
1125 	sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE;
1126 
1127 	/* When frag size is bigger than hardware limit, split this frag */
1128 	for (k = 0; k < frag_buf_num; k++) {
1129 		u16 bdtp_fe_sc_vld_ra_ri = 0;
1130 
1131 		/* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
1132 		desc_cb->priv = priv;
1133 		desc_cb->dma = dma + HNS3_MAX_BD_SIZE * k;
1134 		desc_cb->type = (type == DESC_TYPE_SKB && !k) ?
1135 					DESC_TYPE_SKB : DESC_TYPE_PAGE;
1136 
1137 		/* now, fill the descriptor */
1138 		desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k);
1139 		desc->tx.send_size = cpu_to_le16((k == frag_buf_num - 1) ?
1140 				(u16)sizeoflast : (u16)HNS3_MAX_BD_SIZE);
1141 		hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri,
1142 				       frag_end && (k == frag_buf_num - 1) ?
1143 						1 : 0);
1144 		desc->tx.bdtp_fe_sc_vld_ra_ri =
1145 				cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
1146 
1147 		/* move ring pointer to next.*/
1148 		ring_ptr_move_fw(ring, next_to_use);
1149 
1150 		desc_cb = &ring->desc_cb[ring->next_to_use];
1151 		desc = &ring->desc[ring->next_to_use];
1152 	}
1153 
1154 	return 0;
1155 }
1156 
1157 static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum,
1158 				   struct hns3_enet_ring *ring)
1159 {
1160 	struct sk_buff *skb = *out_skb;
1161 	struct sk_buff *new_skb = NULL;
1162 	struct skb_frag_struct *frag;
1163 	int bdnum_for_frag;
1164 	int frag_num;
1165 	int buf_num;
1166 	int size;
1167 	int i;
1168 
1169 	size = skb_headlen(skb);
1170 	buf_num = hns3_tx_bd_count(size);
1171 
1172 	frag_num = skb_shinfo(skb)->nr_frags;
1173 	for (i = 0; i < frag_num; i++) {
1174 		frag = &skb_shinfo(skb)->frags[i];
1175 		size = skb_frag_size(frag);
1176 		bdnum_for_frag = hns3_tx_bd_count(size);
1177 		if (unlikely(bdnum_for_frag > HNS3_MAX_BD_PER_FRAG))
1178 			return -ENOMEM;
1179 
1180 		buf_num += bdnum_for_frag;
1181 	}
1182 
1183 	if (unlikely(buf_num > HNS3_MAX_BD_PER_FRAG)) {
1184 		buf_num = hns3_tx_bd_count(skb->len);
1185 		if (ring_space(ring) < buf_num)
1186 			return -EBUSY;
1187 		/* manual split the send packet */
1188 		new_skb = skb_copy(skb, GFP_ATOMIC);
1189 		if (!new_skb)
1190 			return -ENOMEM;
1191 		dev_kfree_skb_any(skb);
1192 		*out_skb = new_skb;
1193 	}
1194 
1195 	if (unlikely(ring_space(ring) < buf_num))
1196 		return -EBUSY;
1197 
1198 	*bnum = buf_num;
1199 	return 0;
1200 }
1201 
1202 static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum,
1203 				  struct hns3_enet_ring *ring)
1204 {
1205 	struct sk_buff *skb = *out_skb;
1206 	struct sk_buff *new_skb = NULL;
1207 	int buf_num;
1208 
1209 	/* No. of segments (plus a header) */
1210 	buf_num = skb_shinfo(skb)->nr_frags + 1;
1211 
1212 	if (unlikely(buf_num > HNS3_MAX_BD_PER_FRAG)) {
1213 		buf_num = hns3_tx_bd_count(skb->len);
1214 		if (ring_space(ring) < buf_num)
1215 			return -EBUSY;
1216 		/* manual split the send packet */
1217 		new_skb = skb_copy(skb, GFP_ATOMIC);
1218 		if (!new_skb)
1219 			return -ENOMEM;
1220 		dev_kfree_skb_any(skb);
1221 		*out_skb = new_skb;
1222 	}
1223 
1224 	if (unlikely(ring_space(ring) < buf_num))
1225 		return -EBUSY;
1226 
1227 	*bnum = buf_num;
1228 
1229 	return 0;
1230 }
1231 
1232 static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
1233 {
1234 	struct device *dev = ring_to_dev(ring);
1235 	unsigned int i;
1236 
1237 	for (i = 0; i < ring->desc_num; i++) {
1238 		/* check if this is where we started */
1239 		if (ring->next_to_use == next_to_use_orig)
1240 			break;
1241 
1242 		/* unmap the descriptor dma address */
1243 		if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB)
1244 			dma_unmap_single(dev,
1245 					 ring->desc_cb[ring->next_to_use].dma,
1246 					ring->desc_cb[ring->next_to_use].length,
1247 					DMA_TO_DEVICE);
1248 		else if (ring->desc_cb[ring->next_to_use].length)
1249 			dma_unmap_page(dev,
1250 				       ring->desc_cb[ring->next_to_use].dma,
1251 				       ring->desc_cb[ring->next_to_use].length,
1252 				       DMA_TO_DEVICE);
1253 
1254 		ring->desc_cb[ring->next_to_use].length = 0;
1255 
1256 		/* rollback one */
1257 		ring_ptr_move_bw(ring, next_to_use);
1258 	}
1259 }
1260 
1261 netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
1262 {
1263 	struct hns3_nic_priv *priv = netdev_priv(netdev);
1264 	struct hns3_nic_ring_data *ring_data =
1265 		&tx_ring_data(priv, skb->queue_mapping);
1266 	struct hns3_enet_ring *ring = ring_data->ring;
1267 	struct netdev_queue *dev_queue;
1268 	struct skb_frag_struct *frag;
1269 	int next_to_use_head;
1270 	int next_to_use_frag;
1271 	int buf_num;
1272 	int seg_num;
1273 	int size;
1274 	int ret;
1275 	int i;
1276 
1277 	/* Prefetch the data used later */
1278 	prefetch(skb->data);
1279 
1280 	switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) {
1281 	case -EBUSY:
1282 		u64_stats_update_begin(&ring->syncp);
1283 		ring->stats.tx_busy++;
1284 		u64_stats_update_end(&ring->syncp);
1285 
1286 		goto out_net_tx_busy;
1287 	case -ENOMEM:
1288 		u64_stats_update_begin(&ring->syncp);
1289 		ring->stats.sw_err_cnt++;
1290 		u64_stats_update_end(&ring->syncp);
1291 		netdev_err(netdev, "no memory to xmit!\n");
1292 
1293 		goto out_err_tx_ok;
1294 	default:
1295 		break;
1296 	}
1297 
1298 	/* No. of segments (plus a header) */
1299 	seg_num = skb_shinfo(skb)->nr_frags + 1;
1300 	/* Fill the first part */
1301 	size = skb_headlen(skb);
1302 
1303 	next_to_use_head = ring->next_to_use;
1304 
1305 	ret = hns3_fill_desc(ring, skb, size, seg_num == 1 ? 1 : 0,
1306 			     DESC_TYPE_SKB);
1307 	if (unlikely(ret))
1308 		goto head_fill_err;
1309 
1310 	next_to_use_frag = ring->next_to_use;
1311 	/* Fill the fragments */
1312 	for (i = 1; i < seg_num; i++) {
1313 		frag = &skb_shinfo(skb)->frags[i - 1];
1314 		size = skb_frag_size(frag);
1315 
1316 		ret = hns3_fill_desc(ring, frag, size,
1317 				     seg_num - 1 == i ? 1 : 0,
1318 				     DESC_TYPE_PAGE);
1319 
1320 		if (unlikely(ret))
1321 			goto frag_fill_err;
1322 	}
1323 
1324 	/* Complete translate all packets */
1325 	dev_queue = netdev_get_tx_queue(netdev, ring_data->queue_index);
1326 	netdev_tx_sent_queue(dev_queue, skb->len);
1327 
1328 	wmb(); /* Commit all data before submit */
1329 
1330 	hnae3_queue_xmit(ring->tqp, buf_num);
1331 
1332 	return NETDEV_TX_OK;
1333 
1334 frag_fill_err:
1335 	hns3_clear_desc(ring, next_to_use_frag);
1336 
1337 head_fill_err:
1338 	hns3_clear_desc(ring, next_to_use_head);
1339 
1340 out_err_tx_ok:
1341 	dev_kfree_skb_any(skb);
1342 	return NETDEV_TX_OK;
1343 
1344 out_net_tx_busy:
1345 	netif_stop_subqueue(netdev, ring_data->queue_index);
1346 	smp_mb(); /* Commit all data before submit */
1347 
1348 	return NETDEV_TX_BUSY;
1349 }
1350 
1351 static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
1352 {
1353 	struct hnae3_handle *h = hns3_get_handle(netdev);
1354 	struct sockaddr *mac_addr = p;
1355 	int ret;
1356 
1357 	if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data))
1358 		return -EADDRNOTAVAIL;
1359 
1360 	if (ether_addr_equal(netdev->dev_addr, mac_addr->sa_data)) {
1361 		netdev_info(netdev, "already using mac address %pM\n",
1362 			    mac_addr->sa_data);
1363 		return 0;
1364 	}
1365 
1366 	ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data, false);
1367 	if (ret) {
1368 		netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret);
1369 		return ret;
1370 	}
1371 
1372 	ether_addr_copy(netdev->dev_addr, mac_addr->sa_data);
1373 
1374 	return 0;
1375 }
1376 
1377 static int hns3_nic_do_ioctl(struct net_device *netdev,
1378 			     struct ifreq *ifr, int cmd)
1379 {
1380 	struct hnae3_handle *h = hns3_get_handle(netdev);
1381 
1382 	if (!netif_running(netdev))
1383 		return -EINVAL;
1384 
1385 	if (!h->ae_algo->ops->do_ioctl)
1386 		return -EOPNOTSUPP;
1387 
1388 	return h->ae_algo->ops->do_ioctl(h, ifr, cmd);
1389 }
1390 
1391 static int hns3_nic_set_features(struct net_device *netdev,
1392 				 netdev_features_t features)
1393 {
1394 	netdev_features_t changed = netdev->features ^ features;
1395 	struct hns3_nic_priv *priv = netdev_priv(netdev);
1396 	struct hnae3_handle *h = priv->ae_handle;
1397 	bool enable;
1398 	int ret;
1399 
1400 	if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) {
1401 		if (features & (NETIF_F_TSO | NETIF_F_TSO6))
1402 			priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
1403 		else
1404 			priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
1405 	}
1406 
1407 	if (changed & (NETIF_F_GRO_HW) && h->ae_algo->ops->set_gro_en) {
1408 		enable = !!(features & NETIF_F_GRO_HW);
1409 		ret = h->ae_algo->ops->set_gro_en(h, enable);
1410 		if (ret)
1411 			return ret;
1412 	}
1413 
1414 	if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) &&
1415 	    h->ae_algo->ops->enable_vlan_filter) {
1416 		enable = !!(features & NETIF_F_HW_VLAN_CTAG_FILTER);
1417 		h->ae_algo->ops->enable_vlan_filter(h, enable);
1418 	}
1419 
1420 	if ((changed & NETIF_F_HW_VLAN_CTAG_RX) &&
1421 	    h->ae_algo->ops->enable_hw_strip_rxvtag) {
1422 		enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
1423 		ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, enable);
1424 		if (ret)
1425 			return ret;
1426 	}
1427 
1428 	if ((changed & NETIF_F_NTUPLE) && h->ae_algo->ops->enable_fd) {
1429 		enable = !!(features & NETIF_F_NTUPLE);
1430 		h->ae_algo->ops->enable_fd(h, enable);
1431 	}
1432 
1433 	netdev->features = features;
1434 	return 0;
1435 }
1436 
1437 static void hns3_nic_get_stats64(struct net_device *netdev,
1438 				 struct rtnl_link_stats64 *stats)
1439 {
1440 	struct hns3_nic_priv *priv = netdev_priv(netdev);
1441 	int queue_num = priv->ae_handle->kinfo.num_tqps;
1442 	struct hnae3_handle *handle = priv->ae_handle;
1443 	struct hns3_enet_ring *ring;
1444 	u64 rx_length_errors = 0;
1445 	u64 rx_crc_errors = 0;
1446 	u64 rx_multicast = 0;
1447 	unsigned int start;
1448 	u64 tx_errors = 0;
1449 	u64 rx_errors = 0;
1450 	unsigned int idx;
1451 	u64 tx_bytes = 0;
1452 	u64 rx_bytes = 0;
1453 	u64 tx_pkts = 0;
1454 	u64 rx_pkts = 0;
1455 	u64 tx_drop = 0;
1456 	u64 rx_drop = 0;
1457 
1458 	if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state))
1459 		return;
1460 
1461 	handle->ae_algo->ops->update_stats(handle, &netdev->stats);
1462 
1463 	for (idx = 0; idx < queue_num; idx++) {
1464 		/* fetch the tx stats */
1465 		ring = priv->ring_data[idx].ring;
1466 		do {
1467 			start = u64_stats_fetch_begin_irq(&ring->syncp);
1468 			tx_bytes += ring->stats.tx_bytes;
1469 			tx_pkts += ring->stats.tx_pkts;
1470 			tx_drop += ring->stats.sw_err_cnt;
1471 			tx_errors += ring->stats.sw_err_cnt;
1472 		} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1473 
1474 		/* fetch the rx stats */
1475 		ring = priv->ring_data[idx + queue_num].ring;
1476 		do {
1477 			start = u64_stats_fetch_begin_irq(&ring->syncp);
1478 			rx_bytes += ring->stats.rx_bytes;
1479 			rx_pkts += ring->stats.rx_pkts;
1480 			rx_drop += ring->stats.non_vld_descs;
1481 			rx_drop += ring->stats.l2_err;
1482 			rx_errors += ring->stats.non_vld_descs;
1483 			rx_errors += ring->stats.l2_err;
1484 			rx_crc_errors += ring->stats.l2_err;
1485 			rx_crc_errors += ring->stats.l3l4_csum_err;
1486 			rx_multicast += ring->stats.rx_multicast;
1487 			rx_length_errors += ring->stats.err_pkt_len;
1488 		} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1489 	}
1490 
1491 	stats->tx_bytes = tx_bytes;
1492 	stats->tx_packets = tx_pkts;
1493 	stats->rx_bytes = rx_bytes;
1494 	stats->rx_packets = rx_pkts;
1495 
1496 	stats->rx_errors = rx_errors;
1497 	stats->multicast = rx_multicast;
1498 	stats->rx_length_errors = rx_length_errors;
1499 	stats->rx_crc_errors = rx_crc_errors;
1500 	stats->rx_missed_errors = netdev->stats.rx_missed_errors;
1501 
1502 	stats->tx_errors = tx_errors;
1503 	stats->rx_dropped = rx_drop;
1504 	stats->tx_dropped = tx_drop;
1505 	stats->collisions = netdev->stats.collisions;
1506 	stats->rx_over_errors = netdev->stats.rx_over_errors;
1507 	stats->rx_frame_errors = netdev->stats.rx_frame_errors;
1508 	stats->rx_fifo_errors = netdev->stats.rx_fifo_errors;
1509 	stats->tx_aborted_errors = netdev->stats.tx_aborted_errors;
1510 	stats->tx_carrier_errors = netdev->stats.tx_carrier_errors;
1511 	stats->tx_fifo_errors = netdev->stats.tx_fifo_errors;
1512 	stats->tx_heartbeat_errors = netdev->stats.tx_heartbeat_errors;
1513 	stats->tx_window_errors = netdev->stats.tx_window_errors;
1514 	stats->rx_compressed = netdev->stats.rx_compressed;
1515 	stats->tx_compressed = netdev->stats.tx_compressed;
1516 }
1517 
1518 static int hns3_setup_tc(struct net_device *netdev, void *type_data)
1519 {
1520 	struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
1521 	struct hnae3_handle *h = hns3_get_handle(netdev);
1522 	struct hnae3_knic_private_info *kinfo = &h->kinfo;
1523 	u8 *prio_tc = mqprio_qopt->qopt.prio_tc_map;
1524 	u8 tc = mqprio_qopt->qopt.num_tc;
1525 	u16 mode = mqprio_qopt->mode;
1526 	u8 hw = mqprio_qopt->qopt.hw;
1527 
1528 	if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS &&
1529 	       mode == TC_MQPRIO_MODE_CHANNEL) || (!hw && tc == 0)))
1530 		return -EOPNOTSUPP;
1531 
1532 	if (tc > HNAE3_MAX_TC)
1533 		return -EINVAL;
1534 
1535 	if (!netdev)
1536 		return -EINVAL;
1537 
1538 	return (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ?
1539 		kinfo->dcb_ops->setup_tc(h, tc, prio_tc) : -EOPNOTSUPP;
1540 }
1541 
1542 static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type,
1543 			     void *type_data)
1544 {
1545 	if (type != TC_SETUP_QDISC_MQPRIO)
1546 		return -EOPNOTSUPP;
1547 
1548 	return hns3_setup_tc(dev, type_data);
1549 }
1550 
1551 static int hns3_vlan_rx_add_vid(struct net_device *netdev,
1552 				__be16 proto, u16 vid)
1553 {
1554 	struct hnae3_handle *h = hns3_get_handle(netdev);
1555 	struct hns3_nic_priv *priv = netdev_priv(netdev);
1556 	int ret = -EIO;
1557 
1558 	if (h->ae_algo->ops->set_vlan_filter)
1559 		ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false);
1560 
1561 	if (!ret)
1562 		set_bit(vid, priv->active_vlans);
1563 
1564 	return ret;
1565 }
1566 
1567 static int hns3_vlan_rx_kill_vid(struct net_device *netdev,
1568 				 __be16 proto, u16 vid)
1569 {
1570 	struct hnae3_handle *h = hns3_get_handle(netdev);
1571 	struct hns3_nic_priv *priv = netdev_priv(netdev);
1572 	int ret = -EIO;
1573 
1574 	if (h->ae_algo->ops->set_vlan_filter)
1575 		ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true);
1576 
1577 	if (!ret)
1578 		clear_bit(vid, priv->active_vlans);
1579 
1580 	return ret;
1581 }
1582 
1583 static int hns3_restore_vlan(struct net_device *netdev)
1584 {
1585 	struct hns3_nic_priv *priv = netdev_priv(netdev);
1586 	int ret = 0;
1587 	u16 vid;
1588 
1589 	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
1590 		ret = hns3_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
1591 		if (ret) {
1592 			netdev_err(netdev, "Restore vlan: %d filter, ret:%d\n",
1593 				   vid, ret);
1594 			return ret;
1595 		}
1596 	}
1597 
1598 	return ret;
1599 }
1600 
1601 static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
1602 				u8 qos, __be16 vlan_proto)
1603 {
1604 	struct hnae3_handle *h = hns3_get_handle(netdev);
1605 	int ret = -EIO;
1606 
1607 	if (h->ae_algo->ops->set_vf_vlan_filter)
1608 		ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan,
1609 						   qos, vlan_proto);
1610 
1611 	return ret;
1612 }
1613 
1614 static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
1615 {
1616 	struct hnae3_handle *h = hns3_get_handle(netdev);
1617 	int ret;
1618 
1619 	if (hns3_nic_resetting(netdev))
1620 		return -EBUSY;
1621 
1622 	if (!h->ae_algo->ops->set_mtu)
1623 		return -EOPNOTSUPP;
1624 
1625 	ret = h->ae_algo->ops->set_mtu(h, new_mtu);
1626 	if (ret)
1627 		netdev_err(netdev, "failed to change MTU in hardware %d\n",
1628 			   ret);
1629 	else
1630 		netdev->mtu = new_mtu;
1631 
1632 	return ret;
1633 }
1634 
1635 static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
1636 {
1637 	struct hns3_nic_priv *priv = netdev_priv(ndev);
1638 	struct hnae3_handle *h = hns3_get_handle(ndev);
1639 	struct hns3_enet_ring *tx_ring = NULL;
1640 	struct napi_struct *napi;
1641 	int timeout_queue = 0;
1642 	int hw_head, hw_tail;
1643 	int fbd_num, fbd_oft;
1644 	int ebd_num, ebd_oft;
1645 	int bd_num, bd_err;
1646 	int ring_en, tc;
1647 	int i;
1648 
1649 	/* Find the stopped queue the same way the stack does */
1650 	for (i = 0; i < ndev->num_tx_queues; i++) {
1651 		struct netdev_queue *q;
1652 		unsigned long trans_start;
1653 
1654 		q = netdev_get_tx_queue(ndev, i);
1655 		trans_start = q->trans_start;
1656 		if (netif_xmit_stopped(q) &&
1657 		    time_after(jiffies,
1658 			       (trans_start + ndev->watchdog_timeo))) {
1659 			timeout_queue = i;
1660 			break;
1661 		}
1662 	}
1663 
1664 	if (i == ndev->num_tx_queues) {
1665 		netdev_info(ndev,
1666 			    "no netdev TX timeout queue found, timeout count: %llu\n",
1667 			    priv->tx_timeout_count);
1668 		return false;
1669 	}
1670 
1671 	priv->tx_timeout_count++;
1672 
1673 	tx_ring = priv->ring_data[timeout_queue].ring;
1674 	napi = &tx_ring->tqp_vector->napi;
1675 
1676 	netdev_info(ndev,
1677 		    "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, napi state: %lu\n",
1678 		    priv->tx_timeout_count, timeout_queue, tx_ring->next_to_use,
1679 		    tx_ring->next_to_clean, napi->state);
1680 
1681 	netdev_info(ndev,
1682 		    "tx_pkts: %llu, tx_bytes: %llu, io_err_cnt: %llu, sw_err_cnt: %llu\n",
1683 		    tx_ring->stats.tx_pkts, tx_ring->stats.tx_bytes,
1684 		    tx_ring->stats.io_err_cnt, tx_ring->stats.sw_err_cnt);
1685 
1686 	netdev_info(ndev,
1687 		    "seg_pkt_cnt: %llu, tx_err_cnt: %llu, restart_queue: %llu, tx_busy: %llu\n",
1688 		    tx_ring->stats.seg_pkt_cnt, tx_ring->stats.tx_err_cnt,
1689 		    tx_ring->stats.restart_queue, tx_ring->stats.tx_busy);
1690 
1691 	/* When mac received many pause frames continuous, it's unable to send
1692 	 * packets, which may cause tx timeout
1693 	 */
1694 	if (h->ae_algo->ops->update_stats &&
1695 	    h->ae_algo->ops->get_mac_pause_stats) {
1696 		u64 tx_pause_cnt, rx_pause_cnt;
1697 
1698 		h->ae_algo->ops->update_stats(h, &ndev->stats);
1699 		h->ae_algo->ops->get_mac_pause_stats(h, &tx_pause_cnt,
1700 						     &rx_pause_cnt);
1701 		netdev_info(ndev, "tx_pause_cnt: %llu, rx_pause_cnt: %llu\n",
1702 			    tx_pause_cnt, rx_pause_cnt);
1703 	}
1704 
1705 	hw_head = readl_relaxed(tx_ring->tqp->io_base +
1706 				HNS3_RING_TX_RING_HEAD_REG);
1707 	hw_tail = readl_relaxed(tx_ring->tqp->io_base +
1708 				HNS3_RING_TX_RING_TAIL_REG);
1709 	fbd_num = readl_relaxed(tx_ring->tqp->io_base +
1710 				HNS3_RING_TX_RING_FBDNUM_REG);
1711 	fbd_oft = readl_relaxed(tx_ring->tqp->io_base +
1712 				HNS3_RING_TX_RING_OFFSET_REG);
1713 	ebd_num = readl_relaxed(tx_ring->tqp->io_base +
1714 				HNS3_RING_TX_RING_EBDNUM_REG);
1715 	ebd_oft = readl_relaxed(tx_ring->tqp->io_base +
1716 				HNS3_RING_TX_RING_EBD_OFFSET_REG);
1717 	bd_num = readl_relaxed(tx_ring->tqp->io_base +
1718 			       HNS3_RING_TX_RING_BD_NUM_REG);
1719 	bd_err = readl_relaxed(tx_ring->tqp->io_base +
1720 			       HNS3_RING_TX_RING_BD_ERR_REG);
1721 	ring_en = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_EN_REG);
1722 	tc = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_TX_RING_TC_REG);
1723 
1724 	netdev_info(ndev,
1725 		    "BD_NUM: 0x%x HW_HEAD: 0x%x, HW_TAIL: 0x%x, BD_ERR: 0x%x, INT: 0x%x\n",
1726 		    bd_num, hw_head, hw_tail, bd_err,
1727 		    readl(tx_ring->tqp_vector->mask_addr));
1728 	netdev_info(ndev,
1729 		    "RING_EN: 0x%x, TC: 0x%x, FBD_NUM: 0x%x FBD_OFT: 0x%x, EBD_NUM: 0x%x, EBD_OFT: 0x%x\n",
1730 		    ring_en, tc, fbd_num, fbd_oft, ebd_num, ebd_oft);
1731 
1732 	return true;
1733 }
1734 
1735 static void hns3_nic_net_timeout(struct net_device *ndev)
1736 {
1737 	struct hns3_nic_priv *priv = netdev_priv(ndev);
1738 	struct hnae3_handle *h = priv->ae_handle;
1739 
1740 	if (!hns3_get_tx_timeo_queue_info(ndev))
1741 		return;
1742 
1743 	/* request the reset, and let the hclge to determine
1744 	 * which reset level should be done
1745 	 */
1746 	if (h->ae_algo->ops->reset_event)
1747 		h->ae_algo->ops->reset_event(h->pdev, h);
1748 }
1749 
1750 static const struct net_device_ops hns3_nic_netdev_ops = {
1751 	.ndo_open		= hns3_nic_net_open,
1752 	.ndo_stop		= hns3_nic_net_stop,
1753 	.ndo_start_xmit		= hns3_nic_net_xmit,
1754 	.ndo_tx_timeout		= hns3_nic_net_timeout,
1755 	.ndo_set_mac_address	= hns3_nic_net_set_mac_address,
1756 	.ndo_do_ioctl		= hns3_nic_do_ioctl,
1757 	.ndo_change_mtu		= hns3_nic_change_mtu,
1758 	.ndo_set_features	= hns3_nic_set_features,
1759 	.ndo_get_stats64	= hns3_nic_get_stats64,
1760 	.ndo_setup_tc		= hns3_nic_setup_tc,
1761 	.ndo_set_rx_mode	= hns3_nic_set_rx_mode,
1762 	.ndo_vlan_rx_add_vid	= hns3_vlan_rx_add_vid,
1763 	.ndo_vlan_rx_kill_vid	= hns3_vlan_rx_kill_vid,
1764 	.ndo_set_vf_vlan	= hns3_ndo_set_vf_vlan,
1765 };
1766 
1767 bool hns3_is_phys_func(struct pci_dev *pdev)
1768 {
1769 	u32 dev_id = pdev->device;
1770 
1771 	switch (dev_id) {
1772 	case HNAE3_DEV_ID_GE:
1773 	case HNAE3_DEV_ID_25GE:
1774 	case HNAE3_DEV_ID_25GE_RDMA:
1775 	case HNAE3_DEV_ID_25GE_RDMA_MACSEC:
1776 	case HNAE3_DEV_ID_50GE_RDMA:
1777 	case HNAE3_DEV_ID_50GE_RDMA_MACSEC:
1778 	case HNAE3_DEV_ID_100G_RDMA_MACSEC:
1779 		return true;
1780 	case HNAE3_DEV_ID_100G_VF:
1781 	case HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF:
1782 		return false;
1783 	default:
1784 		dev_warn(&pdev->dev, "un-recognized pci device-id %d",
1785 			 dev_id);
1786 	}
1787 
1788 	return false;
1789 }
1790 
1791 static void hns3_disable_sriov(struct pci_dev *pdev)
1792 {
1793 	/* If our VFs are assigned we cannot shut down SR-IOV
1794 	 * without causing issues, so just leave the hardware
1795 	 * available but disabled
1796 	 */
1797 	if (pci_vfs_assigned(pdev)) {
1798 		dev_warn(&pdev->dev,
1799 			 "disabling driver while VFs are assigned\n");
1800 		return;
1801 	}
1802 
1803 	pci_disable_sriov(pdev);
1804 }
1805 
1806 static void hns3_get_dev_capability(struct pci_dev *pdev,
1807 				    struct hnae3_ae_dev *ae_dev)
1808 {
1809 	if (pdev->revision >= 0x21) {
1810 		hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B, 1);
1811 		hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_GRO_B, 1);
1812 	}
1813 }
1814 
1815 /* hns3_probe - Device initialization routine
1816  * @pdev: PCI device information struct
1817  * @ent: entry in hns3_pci_tbl
1818  *
1819  * hns3_probe initializes a PF identified by a pci_dev structure.
1820  * The OS initialization, configuring of the PF private structure,
1821  * and a hardware reset occur.
1822  *
1823  * Returns 0 on success, negative on failure
1824  */
1825 static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1826 {
1827 	struct hnae3_ae_dev *ae_dev;
1828 	int ret;
1829 
1830 	ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev),
1831 			      GFP_KERNEL);
1832 	if (!ae_dev) {
1833 		ret = -ENOMEM;
1834 		return ret;
1835 	}
1836 
1837 	ae_dev->pdev = pdev;
1838 	ae_dev->flag = ent->driver_data;
1839 	ae_dev->dev_type = HNAE3_DEV_KNIC;
1840 	ae_dev->reset_type = HNAE3_NONE_RESET;
1841 	hns3_get_dev_capability(pdev, ae_dev);
1842 	pci_set_drvdata(pdev, ae_dev);
1843 
1844 	ret = hnae3_register_ae_dev(ae_dev);
1845 	if (ret) {
1846 		devm_kfree(&pdev->dev, ae_dev);
1847 		pci_set_drvdata(pdev, NULL);
1848 	}
1849 
1850 	return ret;
1851 }
1852 
1853 /* hns3_remove - Device removal routine
1854  * @pdev: PCI device information struct
1855  */
1856 static void hns3_remove(struct pci_dev *pdev)
1857 {
1858 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1859 
1860 	if (hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))
1861 		hns3_disable_sriov(pdev);
1862 
1863 	hnae3_unregister_ae_dev(ae_dev);
1864 	pci_set_drvdata(pdev, NULL);
1865 }
1866 
1867 /**
1868  * hns3_pci_sriov_configure
1869  * @pdev: pointer to a pci_dev structure
1870  * @num_vfs: number of VFs to allocate
1871  *
1872  * Enable or change the number of VFs. Called when the user updates the number
1873  * of VFs in sysfs.
1874  **/
1875 static int hns3_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
1876 {
1877 	int ret;
1878 
1879 	if (!(hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))) {
1880 		dev_warn(&pdev->dev, "Can not config SRIOV\n");
1881 		return -EINVAL;
1882 	}
1883 
1884 	if (num_vfs) {
1885 		ret = pci_enable_sriov(pdev, num_vfs);
1886 		if (ret)
1887 			dev_err(&pdev->dev, "SRIOV enable failed %d\n", ret);
1888 		else
1889 			return num_vfs;
1890 	} else if (!pci_vfs_assigned(pdev)) {
1891 		pci_disable_sriov(pdev);
1892 	} else {
1893 		dev_warn(&pdev->dev,
1894 			 "Unable to free VFs because some are assigned to VMs.\n");
1895 	}
1896 
1897 	return 0;
1898 }
1899 
1900 static void hns3_shutdown(struct pci_dev *pdev)
1901 {
1902 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1903 
1904 	hnae3_unregister_ae_dev(ae_dev);
1905 	devm_kfree(&pdev->dev, ae_dev);
1906 	pci_set_drvdata(pdev, NULL);
1907 
1908 	if (system_state == SYSTEM_POWER_OFF)
1909 		pci_set_power_state(pdev, PCI_D3hot);
1910 }
1911 
1912 static pci_ers_result_t hns3_error_detected(struct pci_dev *pdev,
1913 					    pci_channel_state_t state)
1914 {
1915 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1916 	pci_ers_result_t ret;
1917 
1918 	dev_info(&pdev->dev, "PCI error detected, state(=%d)!!\n", state);
1919 
1920 	if (state == pci_channel_io_perm_failure)
1921 		return PCI_ERS_RESULT_DISCONNECT;
1922 
1923 	if (!ae_dev) {
1924 		dev_err(&pdev->dev,
1925 			"Can't recover - error happened during device init\n");
1926 		return PCI_ERS_RESULT_NONE;
1927 	}
1928 
1929 	if (ae_dev->ops->handle_hw_ras_error)
1930 		ret = ae_dev->ops->handle_hw_ras_error(ae_dev);
1931 	else
1932 		return PCI_ERS_RESULT_NONE;
1933 
1934 	return ret;
1935 }
1936 
1937 static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev)
1938 {
1939 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1940 	struct device *dev = &pdev->dev;
1941 
1942 	dev_info(dev, "requesting reset due to PCI error\n");
1943 
1944 	/* request the reset */
1945 	if (ae_dev->ops->reset_event) {
1946 		if (!ae_dev->override_pci_need_reset)
1947 			ae_dev->ops->reset_event(pdev, NULL);
1948 
1949 		return PCI_ERS_RESULT_RECOVERED;
1950 	}
1951 
1952 	return PCI_ERS_RESULT_DISCONNECT;
1953 }
1954 
1955 static void hns3_reset_prepare(struct pci_dev *pdev)
1956 {
1957 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1958 
1959 	dev_info(&pdev->dev, "hns3 flr prepare\n");
1960 	if (ae_dev && ae_dev->ops && ae_dev->ops->flr_prepare)
1961 		ae_dev->ops->flr_prepare(ae_dev);
1962 }
1963 
1964 static void hns3_reset_done(struct pci_dev *pdev)
1965 {
1966 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1967 
1968 	dev_info(&pdev->dev, "hns3 flr done\n");
1969 	if (ae_dev && ae_dev->ops && ae_dev->ops->flr_done)
1970 		ae_dev->ops->flr_done(ae_dev);
1971 }
1972 
1973 static const struct pci_error_handlers hns3_err_handler = {
1974 	.error_detected = hns3_error_detected,
1975 	.slot_reset     = hns3_slot_reset,
1976 	.reset_prepare	= hns3_reset_prepare,
1977 	.reset_done	= hns3_reset_done,
1978 };
1979 
1980 static struct pci_driver hns3_driver = {
1981 	.name     = hns3_driver_name,
1982 	.id_table = hns3_pci_tbl,
1983 	.probe    = hns3_probe,
1984 	.remove   = hns3_remove,
1985 	.shutdown = hns3_shutdown,
1986 	.sriov_configure = hns3_pci_sriov_configure,
1987 	.err_handler    = &hns3_err_handler,
1988 };
1989 
1990 /* set default feature to hns3 */
1991 static void hns3_set_default_feature(struct net_device *netdev)
1992 {
1993 	struct hnae3_handle *h = hns3_get_handle(netdev);
1994 	struct pci_dev *pdev = h->pdev;
1995 
1996 	netdev->priv_flags |= IFF_UNICAST_FLT;
1997 
1998 	netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1999 		NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
2000 		NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
2001 		NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
2002 		NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
2003 
2004 	netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
2005 
2006 	netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
2007 
2008 	netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2009 		NETIF_F_HW_VLAN_CTAG_FILTER |
2010 		NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
2011 		NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
2012 		NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
2013 		NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
2014 		NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
2015 
2016 	netdev->vlan_features |=
2017 		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2018 		NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO |
2019 		NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
2020 		NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
2021 		NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
2022 
2023 	netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2024 		NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
2025 		NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
2026 		NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
2027 		NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
2028 		NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
2029 
2030 	if (pdev->revision >= 0x21) {
2031 		netdev->hw_features |= NETIF_F_GRO_HW;
2032 		netdev->features |= NETIF_F_GRO_HW;
2033 
2034 		if (!(h->flags & HNAE3_SUPPORT_VF)) {
2035 			netdev->hw_features |= NETIF_F_NTUPLE;
2036 			netdev->features |= NETIF_F_NTUPLE;
2037 		}
2038 	}
2039 }
2040 
2041 static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
2042 			     struct hns3_desc_cb *cb)
2043 {
2044 	unsigned int order = hnae3_page_order(ring);
2045 	struct page *p;
2046 
2047 	p = dev_alloc_pages(order);
2048 	if (!p)
2049 		return -ENOMEM;
2050 
2051 	cb->priv = p;
2052 	cb->page_offset = 0;
2053 	cb->reuse_flag = 0;
2054 	cb->buf  = page_address(p);
2055 	cb->length = hnae3_page_size(ring);
2056 	cb->type = DESC_TYPE_PAGE;
2057 
2058 	return 0;
2059 }
2060 
2061 static void hns3_free_buffer(struct hns3_enet_ring *ring,
2062 			     struct hns3_desc_cb *cb)
2063 {
2064 	if (cb->type == DESC_TYPE_SKB)
2065 		dev_kfree_skb_any((struct sk_buff *)cb->priv);
2066 	else if (!HNAE3_IS_TX_RING(ring))
2067 		put_page((struct page *)cb->priv);
2068 	memset(cb, 0, sizeof(*cb));
2069 }
2070 
2071 static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb)
2072 {
2073 	cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0,
2074 			       cb->length, ring_to_dma_dir(ring));
2075 
2076 	if (unlikely(dma_mapping_error(ring_to_dev(ring), cb->dma)))
2077 		return -EIO;
2078 
2079 	return 0;
2080 }
2081 
2082 static void hns3_unmap_buffer(struct hns3_enet_ring *ring,
2083 			      struct hns3_desc_cb *cb)
2084 {
2085 	if (cb->type == DESC_TYPE_SKB)
2086 		dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
2087 				 ring_to_dma_dir(ring));
2088 	else if (cb->length)
2089 		dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
2090 			       ring_to_dma_dir(ring));
2091 }
2092 
2093 static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i)
2094 {
2095 	hns3_unmap_buffer(ring, &ring->desc_cb[i]);
2096 	ring->desc[i].addr = 0;
2097 }
2098 
2099 static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i)
2100 {
2101 	struct hns3_desc_cb *cb = &ring->desc_cb[i];
2102 
2103 	if (!ring->desc_cb[i].dma)
2104 		return;
2105 
2106 	hns3_buffer_detach(ring, i);
2107 	hns3_free_buffer(ring, cb);
2108 }
2109 
2110 static void hns3_free_buffers(struct hns3_enet_ring *ring)
2111 {
2112 	int i;
2113 
2114 	for (i = 0; i < ring->desc_num; i++)
2115 		hns3_free_buffer_detach(ring, i);
2116 }
2117 
2118 /* free desc along with its attached buffer */
2119 static void hns3_free_desc(struct hns3_enet_ring *ring)
2120 {
2121 	int size = ring->desc_num * sizeof(ring->desc[0]);
2122 
2123 	hns3_free_buffers(ring);
2124 
2125 	if (ring->desc) {
2126 		dma_free_coherent(ring_to_dev(ring), size,
2127 				  ring->desc, ring->desc_dma_addr);
2128 		ring->desc = NULL;
2129 	}
2130 }
2131 
2132 static int hns3_alloc_desc(struct hns3_enet_ring *ring)
2133 {
2134 	int size = ring->desc_num * sizeof(ring->desc[0]);
2135 
2136 	ring->desc = dma_alloc_coherent(ring_to_dev(ring), size,
2137 					&ring->desc_dma_addr, GFP_KERNEL);
2138 	if (!ring->desc)
2139 		return -ENOMEM;
2140 
2141 	return 0;
2142 }
2143 
2144 static int hns3_reserve_buffer_map(struct hns3_enet_ring *ring,
2145 				   struct hns3_desc_cb *cb)
2146 {
2147 	int ret;
2148 
2149 	ret = hns3_alloc_buffer(ring, cb);
2150 	if (ret)
2151 		goto out;
2152 
2153 	ret = hns3_map_buffer(ring, cb);
2154 	if (ret)
2155 		goto out_with_buf;
2156 
2157 	return 0;
2158 
2159 out_with_buf:
2160 	hns3_free_buffer(ring, cb);
2161 out:
2162 	return ret;
2163 }
2164 
2165 static int hns3_alloc_buffer_attach(struct hns3_enet_ring *ring, int i)
2166 {
2167 	int ret = hns3_reserve_buffer_map(ring, &ring->desc_cb[i]);
2168 
2169 	if (ret)
2170 		return ret;
2171 
2172 	ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
2173 
2174 	return 0;
2175 }
2176 
2177 /* Allocate memory for raw pkg, and map with dma */
2178 static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring)
2179 {
2180 	int i, j, ret;
2181 
2182 	for (i = 0; i < ring->desc_num; i++) {
2183 		ret = hns3_alloc_buffer_attach(ring, i);
2184 		if (ret)
2185 			goto out_buffer_fail;
2186 	}
2187 
2188 	return 0;
2189 
2190 out_buffer_fail:
2191 	for (j = i - 1; j >= 0; j--)
2192 		hns3_free_buffer_detach(ring, j);
2193 	return ret;
2194 }
2195 
2196 /* detach a in-used buffer and replace with a reserved one  */
2197 static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
2198 				struct hns3_desc_cb *res_cb)
2199 {
2200 	hns3_unmap_buffer(ring, &ring->desc_cb[i]);
2201 	ring->desc_cb[i] = *res_cb;
2202 	ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
2203 	ring->desc[i].rx.bd_base_info = 0;
2204 }
2205 
2206 static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
2207 {
2208 	ring->desc_cb[i].reuse_flag = 0;
2209 	ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma
2210 		+ ring->desc_cb[i].page_offset);
2211 	ring->desc[i].rx.bd_base_info = 0;
2212 }
2213 
2214 static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes,
2215 				      int *pkts)
2216 {
2217 	int ntc = ring->next_to_clean;
2218 	struct hns3_desc_cb *desc_cb;
2219 
2220 	desc_cb = &ring->desc_cb[ntc];
2221 	(*pkts) += (desc_cb->type == DESC_TYPE_SKB);
2222 	(*bytes) += desc_cb->length;
2223 	/* desc_cb will be cleaned, after hnae3_free_buffer_detach*/
2224 	hns3_free_buffer_detach(ring, ntc);
2225 
2226 	if (++ntc == ring->desc_num)
2227 		ntc = 0;
2228 
2229 	/* This smp_store_release() pairs with smp_load_acquire() in
2230 	 * ring_space called by hns3_nic_net_xmit.
2231 	 */
2232 	smp_store_release(&ring->next_to_clean, ntc);
2233 }
2234 
2235 static int is_valid_clean_head(struct hns3_enet_ring *ring, int h)
2236 {
2237 	int u = ring->next_to_use;
2238 	int c = ring->next_to_clean;
2239 
2240 	if (unlikely(h > ring->desc_num))
2241 		return 0;
2242 
2243 	return u > c ? (h > c && h <= u) : (h > c || h <= u);
2244 }
2245 
2246 void hns3_clean_tx_ring(struct hns3_enet_ring *ring)
2247 {
2248 	struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2249 	struct hns3_nic_priv *priv = netdev_priv(netdev);
2250 	struct netdev_queue *dev_queue;
2251 	int bytes, pkts;
2252 	int head;
2253 
2254 	head = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG);
2255 	rmb(); /* Make sure head is ready before touch any data */
2256 
2257 	if (is_ring_empty(ring) || head == ring->next_to_clean)
2258 		return; /* no data to poll */
2259 
2260 	if (unlikely(!is_valid_clean_head(ring, head))) {
2261 		netdev_err(netdev, "wrong head (%d, %d-%d)\n", head,
2262 			   ring->next_to_use, ring->next_to_clean);
2263 
2264 		u64_stats_update_begin(&ring->syncp);
2265 		ring->stats.io_err_cnt++;
2266 		u64_stats_update_end(&ring->syncp);
2267 		return;
2268 	}
2269 
2270 	bytes = 0;
2271 	pkts = 0;
2272 	while (head != ring->next_to_clean) {
2273 		hns3_nic_reclaim_one_desc(ring, &bytes, &pkts);
2274 		/* Issue prefetch for next Tx descriptor */
2275 		prefetch(&ring->desc_cb[ring->next_to_clean]);
2276 	}
2277 
2278 	ring->tqp_vector->tx_group.total_bytes += bytes;
2279 	ring->tqp_vector->tx_group.total_packets += pkts;
2280 
2281 	u64_stats_update_begin(&ring->syncp);
2282 	ring->stats.tx_bytes += bytes;
2283 	ring->stats.tx_pkts += pkts;
2284 	u64_stats_update_end(&ring->syncp);
2285 
2286 	dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index);
2287 	netdev_tx_completed_queue(dev_queue, pkts, bytes);
2288 
2289 	if (unlikely(pkts && netif_carrier_ok(netdev) &&
2290 		     (ring_space(ring) > HNS3_MAX_BD_PER_PKT))) {
2291 		/* Make sure that anybody stopping the queue after this
2292 		 * sees the new next_to_clean.
2293 		 */
2294 		smp_mb();
2295 		if (netif_tx_queue_stopped(dev_queue) &&
2296 		    !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) {
2297 			netif_tx_wake_queue(dev_queue);
2298 			ring->stats.restart_queue++;
2299 		}
2300 	}
2301 }
2302 
2303 static int hns3_desc_unused(struct hns3_enet_ring *ring)
2304 {
2305 	int ntc = ring->next_to_clean;
2306 	int ntu = ring->next_to_use;
2307 
2308 	return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
2309 }
2310 
2311 static void
2312 hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count)
2313 {
2314 	struct hns3_desc_cb *desc_cb;
2315 	struct hns3_desc_cb res_cbs;
2316 	int i, ret;
2317 
2318 	for (i = 0; i < cleand_count; i++) {
2319 		desc_cb = &ring->desc_cb[ring->next_to_use];
2320 		if (desc_cb->reuse_flag) {
2321 			u64_stats_update_begin(&ring->syncp);
2322 			ring->stats.reuse_pg_cnt++;
2323 			u64_stats_update_end(&ring->syncp);
2324 
2325 			hns3_reuse_buffer(ring, ring->next_to_use);
2326 		} else {
2327 			ret = hns3_reserve_buffer_map(ring, &res_cbs);
2328 			if (ret) {
2329 				u64_stats_update_begin(&ring->syncp);
2330 				ring->stats.sw_err_cnt++;
2331 				u64_stats_update_end(&ring->syncp);
2332 
2333 				netdev_err(ring->tqp->handle->kinfo.netdev,
2334 					   "hnae reserve buffer map failed.\n");
2335 				break;
2336 			}
2337 			hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
2338 		}
2339 
2340 		ring_ptr_move_fw(ring, next_to_use);
2341 	}
2342 
2343 	wmb(); /* Make all data has been write before submit */
2344 	writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
2345 }
2346 
2347 static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
2348 				struct hns3_enet_ring *ring, int pull_len,
2349 				struct hns3_desc_cb *desc_cb)
2350 {
2351 	struct hns3_desc *desc;
2352 	u32 truesize;
2353 	int size;
2354 	int last_offset;
2355 	bool twobufs;
2356 
2357 	twobufs = ((PAGE_SIZE < 8192) &&
2358 		hnae3_buf_size(ring) == HNS3_BUFFER_SIZE_2048);
2359 
2360 	desc = &ring->desc[ring->next_to_clean];
2361 	size = le16_to_cpu(desc->rx.size);
2362 
2363 	truesize = hnae3_buf_size(ring);
2364 
2365 	if (!twobufs)
2366 		last_offset = hnae3_page_size(ring) - hnae3_buf_size(ring);
2367 
2368 	skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
2369 			size - pull_len, truesize);
2370 
2371 	 /* Avoid re-using remote pages,flag default unreuse */
2372 	if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()))
2373 		return;
2374 
2375 	if (twobufs) {
2376 		/* If we are only owner of page we can reuse it */
2377 		if (likely(page_count(desc_cb->priv) == 1)) {
2378 			/* Flip page offset to other buffer */
2379 			desc_cb->page_offset ^= truesize;
2380 
2381 			desc_cb->reuse_flag = 1;
2382 			/* bump ref count on page before it is given*/
2383 			get_page(desc_cb->priv);
2384 		}
2385 		return;
2386 	}
2387 
2388 	/* Move offset up to the next cache line */
2389 	desc_cb->page_offset += truesize;
2390 
2391 	if (desc_cb->page_offset <= last_offset) {
2392 		desc_cb->reuse_flag = 1;
2393 		/* Bump ref count on page before it is given*/
2394 		get_page(desc_cb->priv);
2395 	}
2396 }
2397 
2398 static int hns3_gro_complete(struct sk_buff *skb)
2399 {
2400 	__be16 type = skb->protocol;
2401 	struct tcphdr *th;
2402 	int depth = 0;
2403 
2404 	while (type == htons(ETH_P_8021Q)) {
2405 		struct vlan_hdr *vh;
2406 
2407 		if ((depth + VLAN_HLEN) > skb_headlen(skb))
2408 			return -EFAULT;
2409 
2410 		vh = (struct vlan_hdr *)(skb->data + depth);
2411 		type = vh->h_vlan_encapsulated_proto;
2412 		depth += VLAN_HLEN;
2413 	}
2414 
2415 	if (type == htons(ETH_P_IP)) {
2416 		depth += sizeof(struct iphdr);
2417 	} else if (type == htons(ETH_P_IPV6)) {
2418 		depth += sizeof(struct ipv6hdr);
2419 	} else {
2420 		netdev_err(skb->dev,
2421 			   "Error: FW GRO supports only IPv4/IPv6, not 0x%04x, depth: %d\n",
2422 			   be16_to_cpu(type), depth);
2423 		return -EFAULT;
2424 	}
2425 
2426 	th = (struct tcphdr *)(skb->data + depth);
2427 	skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
2428 	if (th->cwr)
2429 		skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
2430 
2431 	skb->ip_summed = CHECKSUM_UNNECESSARY;
2432 
2433 	return 0;
2434 }
2435 
2436 static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
2437 			     u32 l234info, u32 bd_base_info)
2438 {
2439 	struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2440 	int l3_type, l4_type;
2441 	int ol4_type;
2442 
2443 	skb->ip_summed = CHECKSUM_NONE;
2444 
2445 	skb_checksum_none_assert(skb);
2446 
2447 	if (!(netdev->features & NETIF_F_RXCSUM))
2448 		return;
2449 
2450 	/* check if hardware has done checksum */
2451 	if (!(bd_base_info & BIT(HNS3_RXD_L3L4P_B)))
2452 		return;
2453 
2454 	if (unlikely(l234info & (BIT(HNS3_RXD_L3E_B) | BIT(HNS3_RXD_L4E_B) |
2455 				 BIT(HNS3_RXD_OL3E_B) |
2456 				 BIT(HNS3_RXD_OL4E_B)))) {
2457 		u64_stats_update_begin(&ring->syncp);
2458 		ring->stats.l3l4_csum_err++;
2459 		u64_stats_update_end(&ring->syncp);
2460 
2461 		return;
2462 	}
2463 
2464 	ol4_type = hnae3_get_field(l234info, HNS3_RXD_OL4ID_M,
2465 				   HNS3_RXD_OL4ID_S);
2466 	switch (ol4_type) {
2467 	case HNS3_OL4_TYPE_MAC_IN_UDP:
2468 	case HNS3_OL4_TYPE_NVGRE:
2469 		skb->csum_level = 1;
2470 		/* fall through */
2471 	case HNS3_OL4_TYPE_NO_TUN:
2472 		l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
2473 					  HNS3_RXD_L3ID_S);
2474 		l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M,
2475 					  HNS3_RXD_L4ID_S);
2476 
2477 		/* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */
2478 		if ((l3_type == HNS3_L3_TYPE_IPV4 ||
2479 		     l3_type == HNS3_L3_TYPE_IPV6) &&
2480 		    (l4_type == HNS3_L4_TYPE_UDP ||
2481 		     l4_type == HNS3_L4_TYPE_TCP ||
2482 		     l4_type == HNS3_L4_TYPE_SCTP))
2483 			skb->ip_summed = CHECKSUM_UNNECESSARY;
2484 		break;
2485 	default:
2486 		break;
2487 	}
2488 }
2489 
2490 static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb)
2491 {
2492 	if (skb_has_frag_list(skb))
2493 		napi_gro_flush(&ring->tqp_vector->napi, false);
2494 
2495 	napi_gro_receive(&ring->tqp_vector->napi, skb);
2496 }
2497 
2498 static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
2499 				struct hns3_desc *desc, u32 l234info,
2500 				u16 *vlan_tag)
2501 {
2502 	struct hnae3_handle *handle = ring->tqp->handle;
2503 	struct pci_dev *pdev = ring->tqp->handle->pdev;
2504 
2505 	if (pdev->revision == 0x20) {
2506 		*vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
2507 		if (!(*vlan_tag & VLAN_VID_MASK))
2508 			*vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
2509 
2510 		return (*vlan_tag != 0);
2511 	}
2512 
2513 #define HNS3_STRP_OUTER_VLAN	0x1
2514 #define HNS3_STRP_INNER_VLAN	0x2
2515 #define HNS3_STRP_BOTH		0x3
2516 
2517 	/* Hardware always insert VLAN tag into RX descriptor when
2518 	 * remove the tag from packet, driver needs to determine
2519 	 * reporting which tag to stack.
2520 	 */
2521 	switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M,
2522 				HNS3_RXD_STRP_TAGP_S)) {
2523 	case HNS3_STRP_OUTER_VLAN:
2524 		if (handle->port_base_vlan_state !=
2525 				HNAE3_PORT_BASE_VLAN_DISABLE)
2526 			return false;
2527 
2528 		*vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
2529 		return true;
2530 	case HNS3_STRP_INNER_VLAN:
2531 		if (handle->port_base_vlan_state !=
2532 				HNAE3_PORT_BASE_VLAN_DISABLE)
2533 			return false;
2534 
2535 		*vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
2536 		return true;
2537 	case HNS3_STRP_BOTH:
2538 		if (handle->port_base_vlan_state ==
2539 				HNAE3_PORT_BASE_VLAN_DISABLE)
2540 			*vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
2541 		else
2542 			*vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
2543 
2544 		return true;
2545 	default:
2546 		return false;
2547 	}
2548 }
2549 
2550 static int hns3_alloc_skb(struct hns3_enet_ring *ring, int length,
2551 			  unsigned char *va)
2552 {
2553 #define HNS3_NEED_ADD_FRAG	1
2554 	struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
2555 	struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2556 	struct sk_buff *skb;
2557 
2558 	ring->skb = napi_alloc_skb(&ring->tqp_vector->napi, HNS3_RX_HEAD_SIZE);
2559 	skb = ring->skb;
2560 	if (unlikely(!skb)) {
2561 		netdev_err(netdev, "alloc rx skb fail\n");
2562 
2563 		u64_stats_update_begin(&ring->syncp);
2564 		ring->stats.sw_err_cnt++;
2565 		u64_stats_update_end(&ring->syncp);
2566 
2567 		return -ENOMEM;
2568 	}
2569 
2570 	prefetchw(skb->data);
2571 
2572 	ring->pending_buf = 1;
2573 	ring->frag_num = 0;
2574 	ring->tail_skb = NULL;
2575 	if (length <= HNS3_RX_HEAD_SIZE) {
2576 		memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
2577 
2578 		/* We can reuse buffer as-is, just make sure it is local */
2579 		if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
2580 			desc_cb->reuse_flag = 1;
2581 		else /* This page cannot be reused so discard it */
2582 			put_page(desc_cb->priv);
2583 
2584 		ring_ptr_move_fw(ring, next_to_clean);
2585 		return 0;
2586 	}
2587 	u64_stats_update_begin(&ring->syncp);
2588 	ring->stats.seg_pkt_cnt++;
2589 	u64_stats_update_end(&ring->syncp);
2590 
2591 	ring->pull_len = eth_get_headlen(va, HNS3_RX_HEAD_SIZE);
2592 	__skb_put(skb, ring->pull_len);
2593 	hns3_nic_reuse_page(skb, ring->frag_num++, ring, ring->pull_len,
2594 			    desc_cb);
2595 	ring_ptr_move_fw(ring, next_to_clean);
2596 
2597 	return HNS3_NEED_ADD_FRAG;
2598 }
2599 
2600 static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc,
2601 			 struct sk_buff **out_skb, bool pending)
2602 {
2603 	struct sk_buff *skb = *out_skb;
2604 	struct sk_buff *head_skb = *out_skb;
2605 	struct sk_buff *new_skb;
2606 	struct hns3_desc_cb *desc_cb;
2607 	struct hns3_desc *pre_desc;
2608 	u32 bd_base_info;
2609 	int pre_bd;
2610 
2611 	/* if there is pending bd, the SW param next_to_clean has moved
2612 	 * to next and the next is NULL
2613 	 */
2614 	if (pending) {
2615 		pre_bd = (ring->next_to_clean - 1 + ring->desc_num) %
2616 			ring->desc_num;
2617 		pre_desc = &ring->desc[pre_bd];
2618 		bd_base_info = le32_to_cpu(pre_desc->rx.bd_base_info);
2619 	} else {
2620 		bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2621 	}
2622 
2623 	while (!(bd_base_info & BIT(HNS3_RXD_FE_B))) {
2624 		desc = &ring->desc[ring->next_to_clean];
2625 		desc_cb = &ring->desc_cb[ring->next_to_clean];
2626 		bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2627 		/* make sure HW write desc complete */
2628 		dma_rmb();
2629 		if (!(bd_base_info & BIT(HNS3_RXD_VLD_B)))
2630 			return -ENXIO;
2631 
2632 		if (unlikely(ring->frag_num >= MAX_SKB_FRAGS)) {
2633 			new_skb = napi_alloc_skb(&ring->tqp_vector->napi,
2634 						 HNS3_RX_HEAD_SIZE);
2635 			if (unlikely(!new_skb)) {
2636 				netdev_err(ring->tqp->handle->kinfo.netdev,
2637 					   "alloc rx skb frag fail\n");
2638 				return -ENXIO;
2639 			}
2640 			ring->frag_num = 0;
2641 
2642 			if (ring->tail_skb) {
2643 				ring->tail_skb->next = new_skb;
2644 				ring->tail_skb = new_skb;
2645 			} else {
2646 				skb_shinfo(skb)->frag_list = new_skb;
2647 				ring->tail_skb = new_skb;
2648 			}
2649 		}
2650 
2651 		if (ring->tail_skb) {
2652 			head_skb->truesize += hnae3_buf_size(ring);
2653 			head_skb->data_len += le16_to_cpu(desc->rx.size);
2654 			head_skb->len += le16_to_cpu(desc->rx.size);
2655 			skb = ring->tail_skb;
2656 		}
2657 
2658 		hns3_nic_reuse_page(skb, ring->frag_num++, ring, 0, desc_cb);
2659 		ring_ptr_move_fw(ring, next_to_clean);
2660 		ring->pending_buf++;
2661 	}
2662 
2663 	return 0;
2664 }
2665 
2666 static int hns3_set_gro_and_checksum(struct hns3_enet_ring *ring,
2667 				     struct sk_buff *skb, u32 l234info,
2668 				     u32 bd_base_info)
2669 {
2670 	u16 gro_count;
2671 	u32 l3_type;
2672 
2673 	gro_count = hnae3_get_field(l234info, HNS3_RXD_GRO_COUNT_M,
2674 				    HNS3_RXD_GRO_COUNT_S);
2675 	/* if there is no HW GRO, do not set gro params */
2676 	if (!gro_count) {
2677 		hns3_rx_checksum(ring, skb, l234info, bd_base_info);
2678 		return 0;
2679 	}
2680 
2681 	NAPI_GRO_CB(skb)->count = gro_count;
2682 
2683 	l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
2684 				  HNS3_RXD_L3ID_S);
2685 	if (l3_type == HNS3_L3_TYPE_IPV4)
2686 		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
2687 	else if (l3_type == HNS3_L3_TYPE_IPV6)
2688 		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
2689 	else
2690 		return -EFAULT;
2691 
2692 	skb_shinfo(skb)->gso_size = hnae3_get_field(bd_base_info,
2693 						    HNS3_RXD_GRO_SIZE_M,
2694 						    HNS3_RXD_GRO_SIZE_S);
2695 
2696 	return  hns3_gro_complete(skb);
2697 }
2698 
2699 static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
2700 				     struct sk_buff *skb, u32 rss_hash)
2701 {
2702 	struct hnae3_handle *handle = ring->tqp->handle;
2703 	enum pkt_hash_types rss_type;
2704 
2705 	if (rss_hash)
2706 		rss_type = handle->kinfo.rss_type;
2707 	else
2708 		rss_type = PKT_HASH_TYPE_NONE;
2709 
2710 	skb_set_hash(skb, rss_hash, rss_type);
2711 }
2712 
2713 static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb)
2714 {
2715 	struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2716 	enum hns3_pkt_l2t_type l2_frame_type;
2717 	u32 bd_base_info, l234info;
2718 	struct hns3_desc *desc;
2719 	unsigned int len;
2720 	int pre_ntc, ret;
2721 
2722 	/* bdinfo handled below is only valid on the last BD of the
2723 	 * current packet, and ring->next_to_clean indicates the first
2724 	 * descriptor of next packet, so need - 1 below.
2725 	 */
2726 	pre_ntc = ring->next_to_clean ? (ring->next_to_clean - 1) :
2727 					(ring->desc_num - 1);
2728 	desc = &ring->desc[pre_ntc];
2729 	bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2730 	l234info = le32_to_cpu(desc->rx.l234_info);
2731 
2732 	/* Based on hw strategy, the tag offloaded will be stored at
2733 	 * ot_vlan_tag in two layer tag case, and stored at vlan_tag
2734 	 * in one layer tag case.
2735 	 */
2736 	if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
2737 		u16 vlan_tag;
2738 
2739 		if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag))
2740 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2741 					       vlan_tag);
2742 	}
2743 
2744 	if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B)))) {
2745 		u64_stats_update_begin(&ring->syncp);
2746 		ring->stats.non_vld_descs++;
2747 		u64_stats_update_end(&ring->syncp);
2748 
2749 		return -EINVAL;
2750 	}
2751 
2752 	if (unlikely(!desc->rx.pkt_len || (l234info & (BIT(HNS3_RXD_TRUNCAT_B) |
2753 				  BIT(HNS3_RXD_L2E_B))))) {
2754 		u64_stats_update_begin(&ring->syncp);
2755 		if (l234info & BIT(HNS3_RXD_L2E_B))
2756 			ring->stats.l2_err++;
2757 		else
2758 			ring->stats.err_pkt_len++;
2759 		u64_stats_update_end(&ring->syncp);
2760 
2761 		return -EFAULT;
2762 	}
2763 
2764 	len = skb->len;
2765 
2766 	/* Do update ip stack process */
2767 	skb->protocol = eth_type_trans(skb, netdev);
2768 
2769 	/* This is needed in order to enable forwarding support */
2770 	ret = hns3_set_gro_and_checksum(ring, skb, l234info, bd_base_info);
2771 	if (unlikely(ret)) {
2772 		u64_stats_update_begin(&ring->syncp);
2773 		ring->stats.rx_err_cnt++;
2774 		u64_stats_update_end(&ring->syncp);
2775 		return ret;
2776 	}
2777 
2778 	l2_frame_type = hnae3_get_field(l234info, HNS3_RXD_DMAC_M,
2779 					HNS3_RXD_DMAC_S);
2780 
2781 	u64_stats_update_begin(&ring->syncp);
2782 	ring->stats.rx_pkts++;
2783 	ring->stats.rx_bytes += len;
2784 
2785 	if (l2_frame_type == HNS3_L2_TYPE_MULTICAST)
2786 		ring->stats.rx_multicast++;
2787 
2788 	u64_stats_update_end(&ring->syncp);
2789 
2790 	ring->tqp_vector->rx_group.total_bytes += len;
2791 
2792 	hns3_set_rx_skb_rss_type(ring, skb, le32_to_cpu(desc->rx.rss_hash));
2793 	return 0;
2794 }
2795 
2796 static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
2797 			     struct sk_buff **out_skb)
2798 {
2799 	struct sk_buff *skb = ring->skb;
2800 	struct hns3_desc_cb *desc_cb;
2801 	struct hns3_desc *desc;
2802 	u32 bd_base_info;
2803 	int length;
2804 	int ret;
2805 
2806 	desc = &ring->desc[ring->next_to_clean];
2807 	desc_cb = &ring->desc_cb[ring->next_to_clean];
2808 
2809 	prefetch(desc);
2810 
2811 	length = le16_to_cpu(desc->rx.size);
2812 	bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2813 
2814 	/* Check valid BD */
2815 	if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B))))
2816 		return -ENXIO;
2817 
2818 	if (!skb)
2819 		ring->va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
2820 
2821 	/* Prefetch first cache line of first page
2822 	 * Idea is to cache few bytes of the header of the packet. Our L1 Cache
2823 	 * line size is 64B so need to prefetch twice to make it 128B. But in
2824 	 * actual we can have greater size of caches with 128B Level 1 cache
2825 	 * lines. In such a case, single fetch would suffice to cache in the
2826 	 * relevant part of the header.
2827 	 */
2828 	prefetch(ring->va);
2829 #if L1_CACHE_BYTES < 128
2830 	prefetch(ring->va + L1_CACHE_BYTES);
2831 #endif
2832 
2833 	if (!skb) {
2834 		ret = hns3_alloc_skb(ring, length, ring->va);
2835 		*out_skb = skb = ring->skb;
2836 
2837 		if (ret < 0) /* alloc buffer fail */
2838 			return ret;
2839 		if (ret > 0) { /* need add frag */
2840 			ret = hns3_add_frag(ring, desc, &skb, false);
2841 			if (ret)
2842 				return ret;
2843 
2844 			/* As the head data may be changed when GRO enable, copy
2845 			 * the head data in after other data rx completed
2846 			 */
2847 			memcpy(skb->data, ring->va,
2848 			       ALIGN(ring->pull_len, sizeof(long)));
2849 		}
2850 	} else {
2851 		ret = hns3_add_frag(ring, desc, &skb, true);
2852 		if (ret)
2853 			return ret;
2854 
2855 		/* As the head data may be changed when GRO enable, copy
2856 		 * the head data in after other data rx completed
2857 		 */
2858 		memcpy(skb->data, ring->va,
2859 		       ALIGN(ring->pull_len, sizeof(long)));
2860 	}
2861 
2862 	ret = hns3_handle_bdinfo(ring, skb);
2863 	if (unlikely(ret)) {
2864 		dev_kfree_skb_any(skb);
2865 		return ret;
2866 	}
2867 
2868 	*out_skb = skb;
2869 
2870 	return 0;
2871 }
2872 
2873 int hns3_clean_rx_ring(
2874 		struct hns3_enet_ring *ring, int budget,
2875 		void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *))
2876 {
2877 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
2878 	int recv_pkts, recv_bds, clean_count, err;
2879 	int unused_count = hns3_desc_unused(ring);
2880 	struct sk_buff *skb = ring->skb;
2881 	int num;
2882 
2883 	num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG);
2884 	rmb(); /* Make sure num taken effect before the other data is touched */
2885 
2886 	recv_pkts = 0, recv_bds = 0, clean_count = 0;
2887 	num -= unused_count;
2888 	unused_count -= ring->pending_buf;
2889 
2890 	while (recv_pkts < budget && recv_bds < num) {
2891 		/* Reuse or realloc buffers */
2892 		if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
2893 			hns3_nic_alloc_rx_buffers(ring,
2894 						  clean_count + unused_count);
2895 			clean_count = 0;
2896 			unused_count = hns3_desc_unused(ring) -
2897 					ring->pending_buf;
2898 		}
2899 
2900 		/* Poll one pkt */
2901 		err = hns3_handle_rx_bd(ring, &skb);
2902 		if (unlikely(!skb)) /* This fault cannot be repaired */
2903 			goto out;
2904 
2905 		if (err == -ENXIO) { /* Do not get FE for the packet */
2906 			goto out;
2907 		} else if (unlikely(err)) {  /* Do jump the err */
2908 			recv_bds += ring->pending_buf;
2909 			clean_count += ring->pending_buf;
2910 			ring->skb = NULL;
2911 			ring->pending_buf = 0;
2912 			continue;
2913 		}
2914 
2915 		rx_fn(ring, skb);
2916 		recv_bds += ring->pending_buf;
2917 		clean_count += ring->pending_buf;
2918 		ring->skb = NULL;
2919 		ring->pending_buf = 0;
2920 
2921 		recv_pkts++;
2922 	}
2923 
2924 out:
2925 	/* Make all data has been write before submit */
2926 	if (clean_count + unused_count > 0)
2927 		hns3_nic_alloc_rx_buffers(ring,
2928 					  clean_count + unused_count);
2929 
2930 	return recv_pkts;
2931 }
2932 
2933 static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
2934 {
2935 	struct hns3_enet_tqp_vector *tqp_vector =
2936 					ring_group->ring->tqp_vector;
2937 	enum hns3_flow_level_range new_flow_level;
2938 	int packets_per_msecs;
2939 	int bytes_per_msecs;
2940 	u32 time_passed_ms;
2941 	u16 new_int_gl;
2942 
2943 	if (!tqp_vector->last_jiffies)
2944 		return false;
2945 
2946 	if (ring_group->total_packets == 0) {
2947 		ring_group->coal.int_gl = HNS3_INT_GL_50K;
2948 		ring_group->coal.flow_level = HNS3_FLOW_LOW;
2949 		return true;
2950 	}
2951 
2952 	/* Simple throttlerate management
2953 	 * 0-10MB/s   lower     (50000 ints/s)
2954 	 * 10-20MB/s   middle    (20000 ints/s)
2955 	 * 20-1249MB/s high      (18000 ints/s)
2956 	 * > 40000pps  ultra     (8000 ints/s)
2957 	 */
2958 	new_flow_level = ring_group->coal.flow_level;
2959 	new_int_gl = ring_group->coal.int_gl;
2960 	time_passed_ms =
2961 		jiffies_to_msecs(jiffies - tqp_vector->last_jiffies);
2962 
2963 	if (!time_passed_ms)
2964 		return false;
2965 
2966 	do_div(ring_group->total_packets, time_passed_ms);
2967 	packets_per_msecs = ring_group->total_packets;
2968 
2969 	do_div(ring_group->total_bytes, time_passed_ms);
2970 	bytes_per_msecs = ring_group->total_bytes;
2971 
2972 #define HNS3_RX_LOW_BYTE_RATE 10000
2973 #define HNS3_RX_MID_BYTE_RATE 20000
2974 
2975 	switch (new_flow_level) {
2976 	case HNS3_FLOW_LOW:
2977 		if (bytes_per_msecs > HNS3_RX_LOW_BYTE_RATE)
2978 			new_flow_level = HNS3_FLOW_MID;
2979 		break;
2980 	case HNS3_FLOW_MID:
2981 		if (bytes_per_msecs > HNS3_RX_MID_BYTE_RATE)
2982 			new_flow_level = HNS3_FLOW_HIGH;
2983 		else if (bytes_per_msecs <= HNS3_RX_LOW_BYTE_RATE)
2984 			new_flow_level = HNS3_FLOW_LOW;
2985 		break;
2986 	case HNS3_FLOW_HIGH:
2987 	case HNS3_FLOW_ULTRA:
2988 	default:
2989 		if (bytes_per_msecs <= HNS3_RX_MID_BYTE_RATE)
2990 			new_flow_level = HNS3_FLOW_MID;
2991 		break;
2992 	}
2993 
2994 #define HNS3_RX_ULTRA_PACKET_RATE 40
2995 
2996 	if (packets_per_msecs > HNS3_RX_ULTRA_PACKET_RATE &&
2997 	    &tqp_vector->rx_group == ring_group)
2998 		new_flow_level = HNS3_FLOW_ULTRA;
2999 
3000 	switch (new_flow_level) {
3001 	case HNS3_FLOW_LOW:
3002 		new_int_gl = HNS3_INT_GL_50K;
3003 		break;
3004 	case HNS3_FLOW_MID:
3005 		new_int_gl = HNS3_INT_GL_20K;
3006 		break;
3007 	case HNS3_FLOW_HIGH:
3008 		new_int_gl = HNS3_INT_GL_18K;
3009 		break;
3010 	case HNS3_FLOW_ULTRA:
3011 		new_int_gl = HNS3_INT_GL_8K;
3012 		break;
3013 	default:
3014 		break;
3015 	}
3016 
3017 	ring_group->total_bytes = 0;
3018 	ring_group->total_packets = 0;
3019 	ring_group->coal.flow_level = new_flow_level;
3020 	if (new_int_gl != ring_group->coal.int_gl) {
3021 		ring_group->coal.int_gl = new_int_gl;
3022 		return true;
3023 	}
3024 	return false;
3025 }
3026 
3027 static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector)
3028 {
3029 	struct hns3_enet_ring_group *rx_group = &tqp_vector->rx_group;
3030 	struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group;
3031 	bool rx_update, tx_update;
3032 
3033 	/* update param every 1000ms */
3034 	if (time_before(jiffies,
3035 			tqp_vector->last_jiffies + msecs_to_jiffies(1000)))
3036 		return;
3037 
3038 	if (rx_group->coal.gl_adapt_enable) {
3039 		rx_update = hns3_get_new_int_gl(rx_group);
3040 		if (rx_update)
3041 			hns3_set_vector_coalesce_rx_gl(tqp_vector,
3042 						       rx_group->coal.int_gl);
3043 	}
3044 
3045 	if (tx_group->coal.gl_adapt_enable) {
3046 		tx_update = hns3_get_new_int_gl(tx_group);
3047 		if (tx_update)
3048 			hns3_set_vector_coalesce_tx_gl(tqp_vector,
3049 						       tx_group->coal.int_gl);
3050 	}
3051 
3052 	tqp_vector->last_jiffies = jiffies;
3053 }
3054 
3055 static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
3056 {
3057 	struct hns3_nic_priv *priv = netdev_priv(napi->dev);
3058 	struct hns3_enet_ring *ring;
3059 	int rx_pkt_total = 0;
3060 
3061 	struct hns3_enet_tqp_vector *tqp_vector =
3062 		container_of(napi, struct hns3_enet_tqp_vector, napi);
3063 	bool clean_complete = true;
3064 	int rx_budget = budget;
3065 
3066 	if (unlikely(test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) {
3067 		napi_complete(napi);
3068 		return 0;
3069 	}
3070 
3071 	/* Since the actual Tx work is minimal, we can give the Tx a larger
3072 	 * budget and be more aggressive about cleaning up the Tx descriptors.
3073 	 */
3074 	hns3_for_each_ring(ring, tqp_vector->tx_group)
3075 		hns3_clean_tx_ring(ring);
3076 
3077 	/* make sure rx ring budget not smaller than 1 */
3078 	if (tqp_vector->num_tqps > 1)
3079 		rx_budget = max(budget / tqp_vector->num_tqps, 1);
3080 
3081 	hns3_for_each_ring(ring, tqp_vector->rx_group) {
3082 		int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget,
3083 						    hns3_rx_skb);
3084 
3085 		if (rx_cleaned >= rx_budget)
3086 			clean_complete = false;
3087 
3088 		rx_pkt_total += rx_cleaned;
3089 	}
3090 
3091 	tqp_vector->rx_group.total_packets += rx_pkt_total;
3092 
3093 	if (!clean_complete)
3094 		return budget;
3095 
3096 	if (napi_complete(napi) &&
3097 	    likely(!test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) {
3098 		hns3_update_new_int_gl(tqp_vector);
3099 		hns3_mask_vector_irq(tqp_vector, 1);
3100 	}
3101 
3102 	return rx_pkt_total;
3103 }
3104 
3105 static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
3106 				      struct hnae3_ring_chain_node *head)
3107 {
3108 	struct pci_dev *pdev = tqp_vector->handle->pdev;
3109 	struct hnae3_ring_chain_node *cur_chain = head;
3110 	struct hnae3_ring_chain_node *chain;
3111 	struct hns3_enet_ring *tx_ring;
3112 	struct hns3_enet_ring *rx_ring;
3113 
3114 	tx_ring = tqp_vector->tx_group.ring;
3115 	if (tx_ring) {
3116 		cur_chain->tqp_index = tx_ring->tqp->tqp_index;
3117 		hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
3118 			      HNAE3_RING_TYPE_TX);
3119 		hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
3120 				HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX);
3121 
3122 		cur_chain->next = NULL;
3123 
3124 		while (tx_ring->next) {
3125 			tx_ring = tx_ring->next;
3126 
3127 			chain = devm_kzalloc(&pdev->dev, sizeof(*chain),
3128 					     GFP_KERNEL);
3129 			if (!chain)
3130 				goto err_free_chain;
3131 
3132 			cur_chain->next = chain;
3133 			chain->tqp_index = tx_ring->tqp->tqp_index;
3134 			hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
3135 				      HNAE3_RING_TYPE_TX);
3136 			hnae3_set_field(chain->int_gl_idx,
3137 					HNAE3_RING_GL_IDX_M,
3138 					HNAE3_RING_GL_IDX_S,
3139 					HNAE3_RING_GL_TX);
3140 
3141 			cur_chain = chain;
3142 		}
3143 	}
3144 
3145 	rx_ring = tqp_vector->rx_group.ring;
3146 	if (!tx_ring && rx_ring) {
3147 		cur_chain->next = NULL;
3148 		cur_chain->tqp_index = rx_ring->tqp->tqp_index;
3149 		hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
3150 			      HNAE3_RING_TYPE_RX);
3151 		hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
3152 				HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
3153 
3154 		rx_ring = rx_ring->next;
3155 	}
3156 
3157 	while (rx_ring) {
3158 		chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL);
3159 		if (!chain)
3160 			goto err_free_chain;
3161 
3162 		cur_chain->next = chain;
3163 		chain->tqp_index = rx_ring->tqp->tqp_index;
3164 		hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
3165 			      HNAE3_RING_TYPE_RX);
3166 		hnae3_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
3167 				HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
3168 
3169 		cur_chain = chain;
3170 
3171 		rx_ring = rx_ring->next;
3172 	}
3173 
3174 	return 0;
3175 
3176 err_free_chain:
3177 	cur_chain = head->next;
3178 	while (cur_chain) {
3179 		chain = cur_chain->next;
3180 		devm_kfree(&pdev->dev, cur_chain);
3181 		cur_chain = chain;
3182 	}
3183 	head->next = NULL;
3184 
3185 	return -ENOMEM;
3186 }
3187 
3188 static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
3189 					struct hnae3_ring_chain_node *head)
3190 {
3191 	struct pci_dev *pdev = tqp_vector->handle->pdev;
3192 	struct hnae3_ring_chain_node *chain_tmp, *chain;
3193 
3194 	chain = head->next;
3195 
3196 	while (chain) {
3197 		chain_tmp = chain->next;
3198 		devm_kfree(&pdev->dev, chain);
3199 		chain = chain_tmp;
3200 	}
3201 }
3202 
3203 static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group,
3204 				   struct hns3_enet_ring *ring)
3205 {
3206 	ring->next = group->ring;
3207 	group->ring = ring;
3208 
3209 	group->count++;
3210 }
3211 
3212 static void hns3_nic_set_cpumask(struct hns3_nic_priv *priv)
3213 {
3214 	struct pci_dev *pdev = priv->ae_handle->pdev;
3215 	struct hns3_enet_tqp_vector *tqp_vector;
3216 	int num_vectors = priv->vector_num;
3217 	int numa_node;
3218 	int vector_i;
3219 
3220 	numa_node = dev_to_node(&pdev->dev);
3221 
3222 	for (vector_i = 0; vector_i < num_vectors; vector_i++) {
3223 		tqp_vector = &priv->tqp_vector[vector_i];
3224 		cpumask_set_cpu(cpumask_local_spread(vector_i, numa_node),
3225 				&tqp_vector->affinity_mask);
3226 	}
3227 }
3228 
3229 static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
3230 {
3231 	struct hnae3_ring_chain_node vector_ring_chain;
3232 	struct hnae3_handle *h = priv->ae_handle;
3233 	struct hns3_enet_tqp_vector *tqp_vector;
3234 	int ret = 0;
3235 	int i;
3236 
3237 	hns3_nic_set_cpumask(priv);
3238 
3239 	for (i = 0; i < priv->vector_num; i++) {
3240 		tqp_vector = &priv->tqp_vector[i];
3241 		hns3_vector_gl_rl_init_hw(tqp_vector, priv);
3242 		tqp_vector->num_tqps = 0;
3243 	}
3244 
3245 	for (i = 0; i < h->kinfo.num_tqps; i++) {
3246 		u16 vector_i = i % priv->vector_num;
3247 		u16 tqp_num = h->kinfo.num_tqps;
3248 
3249 		tqp_vector = &priv->tqp_vector[vector_i];
3250 
3251 		hns3_add_ring_to_group(&tqp_vector->tx_group,
3252 				       priv->ring_data[i].ring);
3253 
3254 		hns3_add_ring_to_group(&tqp_vector->rx_group,
3255 				       priv->ring_data[i + tqp_num].ring);
3256 
3257 		priv->ring_data[i].ring->tqp_vector = tqp_vector;
3258 		priv->ring_data[i + tqp_num].ring->tqp_vector = tqp_vector;
3259 		tqp_vector->num_tqps++;
3260 	}
3261 
3262 	for (i = 0; i < priv->vector_num; i++) {
3263 		tqp_vector = &priv->tqp_vector[i];
3264 
3265 		tqp_vector->rx_group.total_bytes = 0;
3266 		tqp_vector->rx_group.total_packets = 0;
3267 		tqp_vector->tx_group.total_bytes = 0;
3268 		tqp_vector->tx_group.total_packets = 0;
3269 		tqp_vector->handle = h;
3270 
3271 		ret = hns3_get_vector_ring_chain(tqp_vector,
3272 						 &vector_ring_chain);
3273 		if (ret)
3274 			goto map_ring_fail;
3275 
3276 		ret = h->ae_algo->ops->map_ring_to_vector(h,
3277 			tqp_vector->vector_irq, &vector_ring_chain);
3278 
3279 		hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
3280 
3281 		if (ret)
3282 			goto map_ring_fail;
3283 
3284 		netif_napi_add(priv->netdev, &tqp_vector->napi,
3285 			       hns3_nic_common_poll, NAPI_POLL_WEIGHT);
3286 	}
3287 
3288 	return 0;
3289 
3290 map_ring_fail:
3291 	while (i--)
3292 		netif_napi_del(&priv->tqp_vector[i].napi);
3293 
3294 	return ret;
3295 }
3296 
3297 static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv)
3298 {
3299 #define HNS3_VECTOR_PF_MAX_NUM		64
3300 
3301 	struct hnae3_handle *h = priv->ae_handle;
3302 	struct hns3_enet_tqp_vector *tqp_vector;
3303 	struct hnae3_vector_info *vector;
3304 	struct pci_dev *pdev = h->pdev;
3305 	u16 tqp_num = h->kinfo.num_tqps;
3306 	u16 vector_num;
3307 	int ret = 0;
3308 	u16 i;
3309 
3310 	/* RSS size, cpu online and vector_num should be the same */
3311 	/* Should consider 2p/4p later */
3312 	vector_num = min_t(u16, num_online_cpus(), tqp_num);
3313 	vector_num = min_t(u16, vector_num, HNS3_VECTOR_PF_MAX_NUM);
3314 
3315 	vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector),
3316 			      GFP_KERNEL);
3317 	if (!vector)
3318 		return -ENOMEM;
3319 
3320 	vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector);
3321 
3322 	priv->vector_num = vector_num;
3323 	priv->tqp_vector = (struct hns3_enet_tqp_vector *)
3324 		devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector),
3325 			     GFP_KERNEL);
3326 	if (!priv->tqp_vector) {
3327 		ret = -ENOMEM;
3328 		goto out;
3329 	}
3330 
3331 	for (i = 0; i < priv->vector_num; i++) {
3332 		tqp_vector = &priv->tqp_vector[i];
3333 		tqp_vector->idx = i;
3334 		tqp_vector->mask_addr = vector[i].io_addr;
3335 		tqp_vector->vector_irq = vector[i].vector;
3336 		hns3_vector_gl_rl_init(tqp_vector, priv);
3337 	}
3338 
3339 out:
3340 	devm_kfree(&pdev->dev, vector);
3341 	return ret;
3342 }
3343 
3344 static void hns3_clear_ring_group(struct hns3_enet_ring_group *group)
3345 {
3346 	group->ring = NULL;
3347 	group->count = 0;
3348 }
3349 
3350 static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
3351 {
3352 	struct hnae3_ring_chain_node vector_ring_chain;
3353 	struct hnae3_handle *h = priv->ae_handle;
3354 	struct hns3_enet_tqp_vector *tqp_vector;
3355 	int i;
3356 
3357 	for (i = 0; i < priv->vector_num; i++) {
3358 		tqp_vector = &priv->tqp_vector[i];
3359 
3360 		if (!tqp_vector->rx_group.ring && !tqp_vector->tx_group.ring)
3361 			continue;
3362 
3363 		hns3_get_vector_ring_chain(tqp_vector, &vector_ring_chain);
3364 
3365 		h->ae_algo->ops->unmap_ring_from_vector(h,
3366 			tqp_vector->vector_irq, &vector_ring_chain);
3367 
3368 		hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
3369 
3370 		if (tqp_vector->irq_init_flag == HNS3_VECTOR_INITED) {
3371 			irq_set_affinity_notifier(tqp_vector->vector_irq,
3372 						  NULL);
3373 			irq_set_affinity_hint(tqp_vector->vector_irq, NULL);
3374 			free_irq(tqp_vector->vector_irq, tqp_vector);
3375 			tqp_vector->irq_init_flag = HNS3_VECTOR_NOT_INITED;
3376 		}
3377 
3378 		hns3_clear_ring_group(&tqp_vector->rx_group);
3379 		hns3_clear_ring_group(&tqp_vector->tx_group);
3380 		netif_napi_del(&priv->tqp_vector[i].napi);
3381 	}
3382 }
3383 
3384 static int hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv)
3385 {
3386 	struct hnae3_handle *h = priv->ae_handle;
3387 	struct pci_dev *pdev = h->pdev;
3388 	int i, ret;
3389 
3390 	for (i = 0; i < priv->vector_num; i++) {
3391 		struct hns3_enet_tqp_vector *tqp_vector;
3392 
3393 		tqp_vector = &priv->tqp_vector[i];
3394 		ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq);
3395 		if (ret)
3396 			return ret;
3397 	}
3398 
3399 	devm_kfree(&pdev->dev, priv->tqp_vector);
3400 	return 0;
3401 }
3402 
3403 static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
3404 			     int ring_type)
3405 {
3406 	struct hns3_nic_ring_data *ring_data = priv->ring_data;
3407 	int queue_num = priv->ae_handle->kinfo.num_tqps;
3408 	struct pci_dev *pdev = priv->ae_handle->pdev;
3409 	struct hns3_enet_ring *ring;
3410 	int desc_num;
3411 
3412 	ring = devm_kzalloc(&pdev->dev, sizeof(*ring), GFP_KERNEL);
3413 	if (!ring)
3414 		return -ENOMEM;
3415 
3416 	if (ring_type == HNAE3_RING_TYPE_TX) {
3417 		desc_num = priv->ae_handle->kinfo.num_tx_desc;
3418 		ring_data[q->tqp_index].ring = ring;
3419 		ring_data[q->tqp_index].queue_index = q->tqp_index;
3420 		ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET;
3421 	} else {
3422 		desc_num = priv->ae_handle->kinfo.num_rx_desc;
3423 		ring_data[q->tqp_index + queue_num].ring = ring;
3424 		ring_data[q->tqp_index + queue_num].queue_index = q->tqp_index;
3425 		ring->io_base = q->io_base;
3426 	}
3427 
3428 	hnae3_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type);
3429 
3430 	ring->tqp = q;
3431 	ring->desc = NULL;
3432 	ring->desc_cb = NULL;
3433 	ring->dev = priv->dev;
3434 	ring->desc_dma_addr = 0;
3435 	ring->buf_size = q->buf_size;
3436 	ring->desc_num = desc_num;
3437 	ring->next_to_use = 0;
3438 	ring->next_to_clean = 0;
3439 
3440 	return 0;
3441 }
3442 
3443 static int hns3_queue_to_ring(struct hnae3_queue *tqp,
3444 			      struct hns3_nic_priv *priv)
3445 {
3446 	int ret;
3447 
3448 	ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX);
3449 	if (ret)
3450 		return ret;
3451 
3452 	ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX);
3453 	if (ret) {
3454 		devm_kfree(priv->dev, priv->ring_data[tqp->tqp_index].ring);
3455 		return ret;
3456 	}
3457 
3458 	return 0;
3459 }
3460 
3461 static int hns3_get_ring_config(struct hns3_nic_priv *priv)
3462 {
3463 	struct hnae3_handle *h = priv->ae_handle;
3464 	struct pci_dev *pdev = h->pdev;
3465 	int i, ret;
3466 
3467 	priv->ring_data =  devm_kzalloc(&pdev->dev,
3468 					array3_size(h->kinfo.num_tqps,
3469 						    sizeof(*priv->ring_data),
3470 						    2),
3471 					GFP_KERNEL);
3472 	if (!priv->ring_data)
3473 		return -ENOMEM;
3474 
3475 	for (i = 0; i < h->kinfo.num_tqps; i++) {
3476 		ret = hns3_queue_to_ring(h->kinfo.tqp[i], priv);
3477 		if (ret)
3478 			goto err;
3479 	}
3480 
3481 	return 0;
3482 err:
3483 	while (i--) {
3484 		devm_kfree(priv->dev, priv->ring_data[i].ring);
3485 		devm_kfree(priv->dev,
3486 			   priv->ring_data[i + h->kinfo.num_tqps].ring);
3487 	}
3488 
3489 	devm_kfree(&pdev->dev, priv->ring_data);
3490 	priv->ring_data = NULL;
3491 	return ret;
3492 }
3493 
3494 static void hns3_put_ring_config(struct hns3_nic_priv *priv)
3495 {
3496 	struct hnae3_handle *h = priv->ae_handle;
3497 	int i;
3498 
3499 	if (!priv->ring_data)
3500 		return;
3501 
3502 	for (i = 0; i < h->kinfo.num_tqps; i++) {
3503 		devm_kfree(priv->dev, priv->ring_data[i].ring);
3504 		devm_kfree(priv->dev,
3505 			   priv->ring_data[i + h->kinfo.num_tqps].ring);
3506 	}
3507 	devm_kfree(priv->dev, priv->ring_data);
3508 	priv->ring_data = NULL;
3509 }
3510 
3511 static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
3512 {
3513 	int ret;
3514 
3515 	if (ring->desc_num <= 0 || ring->buf_size <= 0)
3516 		return -EINVAL;
3517 
3518 	ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]),
3519 				GFP_KERNEL);
3520 	if (!ring->desc_cb) {
3521 		ret = -ENOMEM;
3522 		goto out;
3523 	}
3524 
3525 	ret = hns3_alloc_desc(ring);
3526 	if (ret)
3527 		goto out_with_desc_cb;
3528 
3529 	if (!HNAE3_IS_TX_RING(ring)) {
3530 		ret = hns3_alloc_ring_buffers(ring);
3531 		if (ret)
3532 			goto out_with_desc;
3533 	}
3534 
3535 	return 0;
3536 
3537 out_with_desc:
3538 	hns3_free_desc(ring);
3539 out_with_desc_cb:
3540 	kfree(ring->desc_cb);
3541 	ring->desc_cb = NULL;
3542 out:
3543 	return ret;
3544 }
3545 
3546 static void hns3_fini_ring(struct hns3_enet_ring *ring)
3547 {
3548 	hns3_free_desc(ring);
3549 	kfree(ring->desc_cb);
3550 	ring->desc_cb = NULL;
3551 	ring->next_to_clean = 0;
3552 	ring->next_to_use = 0;
3553 	ring->pending_buf = 0;
3554 	if (ring->skb) {
3555 		dev_kfree_skb_any(ring->skb);
3556 		ring->skb = NULL;
3557 	}
3558 }
3559 
3560 static int hns3_buf_size2type(u32 buf_size)
3561 {
3562 	int bd_size_type;
3563 
3564 	switch (buf_size) {
3565 	case 512:
3566 		bd_size_type = HNS3_BD_SIZE_512_TYPE;
3567 		break;
3568 	case 1024:
3569 		bd_size_type = HNS3_BD_SIZE_1024_TYPE;
3570 		break;
3571 	case 2048:
3572 		bd_size_type = HNS3_BD_SIZE_2048_TYPE;
3573 		break;
3574 	case 4096:
3575 		bd_size_type = HNS3_BD_SIZE_4096_TYPE;
3576 		break;
3577 	default:
3578 		bd_size_type = HNS3_BD_SIZE_2048_TYPE;
3579 	}
3580 
3581 	return bd_size_type;
3582 }
3583 
3584 static void hns3_init_ring_hw(struct hns3_enet_ring *ring)
3585 {
3586 	dma_addr_t dma = ring->desc_dma_addr;
3587 	struct hnae3_queue *q = ring->tqp;
3588 
3589 	if (!HNAE3_IS_TX_RING(ring)) {
3590 		hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG,
3591 			       (u32)dma);
3592 		hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG,
3593 			       (u32)((dma >> 31) >> 1));
3594 
3595 		hns3_write_dev(q, HNS3_RING_RX_RING_BD_LEN_REG,
3596 			       hns3_buf_size2type(ring->buf_size));
3597 		hns3_write_dev(q, HNS3_RING_RX_RING_BD_NUM_REG,
3598 			       ring->desc_num / 8 - 1);
3599 
3600 	} else {
3601 		hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_L_REG,
3602 			       (u32)dma);
3603 		hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG,
3604 			       (u32)((dma >> 31) >> 1));
3605 
3606 		hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG,
3607 			       ring->desc_num / 8 - 1);
3608 	}
3609 }
3610 
3611 static void hns3_init_tx_ring_tc(struct hns3_nic_priv *priv)
3612 {
3613 	struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo;
3614 	int i;
3615 
3616 	for (i = 0; i < HNAE3_MAX_TC; i++) {
3617 		struct hnae3_tc_info *tc_info = &kinfo->tc_info[i];
3618 		int j;
3619 
3620 		if (!tc_info->enable)
3621 			continue;
3622 
3623 		for (j = 0; j < tc_info->tqp_count; j++) {
3624 			struct hnae3_queue *q;
3625 
3626 			q = priv->ring_data[tc_info->tqp_offset + j].ring->tqp;
3627 			hns3_write_dev(q, HNS3_RING_TX_RING_TC_REG,
3628 				       tc_info->tc);
3629 		}
3630 	}
3631 }
3632 
3633 int hns3_init_all_ring(struct hns3_nic_priv *priv)
3634 {
3635 	struct hnae3_handle *h = priv->ae_handle;
3636 	int ring_num = h->kinfo.num_tqps * 2;
3637 	int i, j;
3638 	int ret;
3639 
3640 	for (i = 0; i < ring_num; i++) {
3641 		ret = hns3_alloc_ring_memory(priv->ring_data[i].ring);
3642 		if (ret) {
3643 			dev_err(priv->dev,
3644 				"Alloc ring memory fail! ret=%d\n", ret);
3645 			goto out_when_alloc_ring_memory;
3646 		}
3647 
3648 		u64_stats_init(&priv->ring_data[i].ring->syncp);
3649 	}
3650 
3651 	return 0;
3652 
3653 out_when_alloc_ring_memory:
3654 	for (j = i - 1; j >= 0; j--)
3655 		hns3_fini_ring(priv->ring_data[j].ring);
3656 
3657 	return -ENOMEM;
3658 }
3659 
3660 int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
3661 {
3662 	struct hnae3_handle *h = priv->ae_handle;
3663 	int i;
3664 
3665 	for (i = 0; i < h->kinfo.num_tqps; i++) {
3666 		hns3_fini_ring(priv->ring_data[i].ring);
3667 		hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring);
3668 	}
3669 	return 0;
3670 }
3671 
3672 /* Set mac addr if it is configured. or leave it to the AE driver */
3673 static int hns3_init_mac_addr(struct net_device *netdev, bool init)
3674 {
3675 	struct hns3_nic_priv *priv = netdev_priv(netdev);
3676 	struct hnae3_handle *h = priv->ae_handle;
3677 	u8 mac_addr_temp[ETH_ALEN];
3678 	int ret = 0;
3679 
3680 	if (h->ae_algo->ops->get_mac_addr && init) {
3681 		h->ae_algo->ops->get_mac_addr(h, mac_addr_temp);
3682 		ether_addr_copy(netdev->dev_addr, mac_addr_temp);
3683 	}
3684 
3685 	/* Check if the MAC address is valid, if not get a random one */
3686 	if (!is_valid_ether_addr(netdev->dev_addr)) {
3687 		eth_hw_addr_random(netdev);
3688 		dev_warn(priv->dev, "using random MAC address %pM\n",
3689 			 netdev->dev_addr);
3690 	}
3691 
3692 	if (h->ae_algo->ops->set_mac_addr)
3693 		ret = h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr, true);
3694 
3695 	return ret;
3696 }
3697 
3698 static int hns3_init_phy(struct net_device *netdev)
3699 {
3700 	struct hnae3_handle *h = hns3_get_handle(netdev);
3701 	int ret = 0;
3702 
3703 	if (h->ae_algo->ops->mac_connect_phy)
3704 		ret = h->ae_algo->ops->mac_connect_phy(h);
3705 
3706 	return ret;
3707 }
3708 
3709 static void hns3_uninit_phy(struct net_device *netdev)
3710 {
3711 	struct hnae3_handle *h = hns3_get_handle(netdev);
3712 
3713 	if (h->ae_algo->ops->mac_disconnect_phy)
3714 		h->ae_algo->ops->mac_disconnect_phy(h);
3715 }
3716 
3717 static int hns3_restore_fd_rules(struct net_device *netdev)
3718 {
3719 	struct hnae3_handle *h = hns3_get_handle(netdev);
3720 	int ret = 0;
3721 
3722 	if (h->ae_algo->ops->restore_fd_rules)
3723 		ret = h->ae_algo->ops->restore_fd_rules(h);
3724 
3725 	return ret;
3726 }
3727 
3728 static void hns3_del_all_fd_rules(struct net_device *netdev, bool clear_list)
3729 {
3730 	struct hnae3_handle *h = hns3_get_handle(netdev);
3731 
3732 	if (h->ae_algo->ops->del_all_fd_entries)
3733 		h->ae_algo->ops->del_all_fd_entries(h, clear_list);
3734 }
3735 
3736 static void hns3_nic_set_priv_ops(struct net_device *netdev)
3737 {
3738 	struct hns3_nic_priv *priv = netdev_priv(netdev);
3739 
3740 	if ((netdev->features & NETIF_F_TSO) ||
3741 	    (netdev->features & NETIF_F_TSO6))
3742 		priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
3743 	else
3744 		priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
3745 }
3746 
3747 static int hns3_client_start(struct hnae3_handle *handle)
3748 {
3749 	if (!handle->ae_algo->ops->client_start)
3750 		return 0;
3751 
3752 	return handle->ae_algo->ops->client_start(handle);
3753 }
3754 
3755 static void hns3_client_stop(struct hnae3_handle *handle)
3756 {
3757 	if (!handle->ae_algo->ops->client_stop)
3758 		return;
3759 
3760 	handle->ae_algo->ops->client_stop(handle);
3761 }
3762 
3763 static void hns3_info_show(struct hns3_nic_priv *priv)
3764 {
3765 	struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo;
3766 
3767 	dev_info(priv->dev, "MAC address: %pM\n", priv->netdev->dev_addr);
3768 	dev_info(priv->dev, "Task queue pairs numbers: %d\n", kinfo->num_tqps);
3769 	dev_info(priv->dev, "RSS size: %d\n", kinfo->rss_size);
3770 	dev_info(priv->dev, "Allocated RSS size: %d\n", kinfo->req_rss_size);
3771 	dev_info(priv->dev, "RX buffer length: %d\n", kinfo->rx_buf_len);
3772 	dev_info(priv->dev, "Desc num per TX queue: %d\n", kinfo->num_tx_desc);
3773 	dev_info(priv->dev, "Desc num per RX queue: %d\n", kinfo->num_rx_desc);
3774 	dev_info(priv->dev, "Total number of enabled TCs: %d\n", kinfo->num_tc);
3775 	dev_info(priv->dev, "Max mtu size: %d\n", priv->netdev->max_mtu);
3776 }
3777 
3778 static int hns3_client_init(struct hnae3_handle *handle)
3779 {
3780 	struct pci_dev *pdev = handle->pdev;
3781 	u16 alloc_tqps, max_rss_size;
3782 	struct hns3_nic_priv *priv;
3783 	struct net_device *netdev;
3784 	int ret;
3785 
3786 	handle->ae_algo->ops->get_tqps_and_rss_info(handle, &alloc_tqps,
3787 						    &max_rss_size);
3788 	netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv), alloc_tqps);
3789 	if (!netdev)
3790 		return -ENOMEM;
3791 
3792 	priv = netdev_priv(netdev);
3793 	priv->dev = &pdev->dev;
3794 	priv->netdev = netdev;
3795 	priv->ae_handle = handle;
3796 	priv->tx_timeout_count = 0;
3797 	set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
3798 
3799 	handle->msg_enable = netif_msg_init(debug, DEFAULT_MSG_LEVEL);
3800 
3801 	handle->kinfo.netdev = netdev;
3802 	handle->priv = (void *)priv;
3803 
3804 	hns3_init_mac_addr(netdev, true);
3805 
3806 	hns3_set_default_feature(netdev);
3807 
3808 	netdev->watchdog_timeo = HNS3_TX_TIMEOUT;
3809 	netdev->priv_flags |= IFF_UNICAST_FLT;
3810 	netdev->netdev_ops = &hns3_nic_netdev_ops;
3811 	SET_NETDEV_DEV(netdev, &pdev->dev);
3812 	hns3_ethtool_set_ops(netdev);
3813 	hns3_nic_set_priv_ops(netdev);
3814 
3815 	/* Carrier off reporting is important to ethtool even BEFORE open */
3816 	netif_carrier_off(netdev);
3817 
3818 	ret = hns3_get_ring_config(priv);
3819 	if (ret) {
3820 		ret = -ENOMEM;
3821 		goto out_get_ring_cfg;
3822 	}
3823 
3824 	ret = hns3_nic_alloc_vector_data(priv);
3825 	if (ret) {
3826 		ret = -ENOMEM;
3827 		goto out_alloc_vector_data;
3828 	}
3829 
3830 	ret = hns3_nic_init_vector_data(priv);
3831 	if (ret) {
3832 		ret = -ENOMEM;
3833 		goto out_init_vector_data;
3834 	}
3835 
3836 	ret = hns3_init_all_ring(priv);
3837 	if (ret) {
3838 		ret = -ENOMEM;
3839 		goto out_init_ring_data;
3840 	}
3841 
3842 	ret = hns3_init_phy(netdev);
3843 	if (ret)
3844 		goto out_init_phy;
3845 
3846 	ret = register_netdev(netdev);
3847 	if (ret) {
3848 		dev_err(priv->dev, "probe register netdev fail!\n");
3849 		goto out_reg_netdev_fail;
3850 	}
3851 
3852 	ret = hns3_client_start(handle);
3853 	if (ret) {
3854 		dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret);
3855 			goto out_client_start;
3856 	}
3857 
3858 	hns3_dcbnl_setup(handle);
3859 
3860 	hns3_dbg_init(handle);
3861 
3862 	/* MTU range: (ETH_MIN_MTU(kernel default) - 9702) */
3863 	netdev->max_mtu = HNS3_MAX_MTU;
3864 
3865 	set_bit(HNS3_NIC_STATE_INITED, &priv->state);
3866 
3867 	if (netif_msg_drv(handle))
3868 		hns3_info_show(priv);
3869 
3870 	return ret;
3871 
3872 out_client_start:
3873 	unregister_netdev(netdev);
3874 out_reg_netdev_fail:
3875 	hns3_uninit_phy(netdev);
3876 out_init_phy:
3877 	hns3_uninit_all_ring(priv);
3878 out_init_ring_data:
3879 	hns3_nic_uninit_vector_data(priv);
3880 out_init_vector_data:
3881 	hns3_nic_dealloc_vector_data(priv);
3882 out_alloc_vector_data:
3883 	priv->ring_data = NULL;
3884 out_get_ring_cfg:
3885 	priv->ae_handle = NULL;
3886 	free_netdev(netdev);
3887 	return ret;
3888 }
3889 
3890 static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
3891 {
3892 	struct net_device *netdev = handle->kinfo.netdev;
3893 	struct hns3_nic_priv *priv = netdev_priv(netdev);
3894 	int ret;
3895 
3896 	hns3_remove_hw_addr(netdev);
3897 
3898 	if (netdev->reg_state != NETREG_UNINITIALIZED)
3899 		unregister_netdev(netdev);
3900 
3901 	hns3_client_stop(handle);
3902 
3903 	if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
3904 		netdev_warn(netdev, "already uninitialized\n");
3905 		goto out_netdev_free;
3906 	}
3907 
3908 	hns3_del_all_fd_rules(netdev, true);
3909 
3910 	hns3_force_clear_all_rx_ring(handle);
3911 
3912 	hns3_uninit_phy(netdev);
3913 
3914 	hns3_nic_uninit_vector_data(priv);
3915 
3916 	ret = hns3_nic_dealloc_vector_data(priv);
3917 	if (ret)
3918 		netdev_err(netdev, "dealloc vector error\n");
3919 
3920 	ret = hns3_uninit_all_ring(priv);
3921 	if (ret)
3922 		netdev_err(netdev, "uninit ring error\n");
3923 
3924 	hns3_put_ring_config(priv);
3925 
3926 	hns3_dbg_uninit(handle);
3927 
3928 out_netdev_free:
3929 	free_netdev(netdev);
3930 }
3931 
3932 static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup)
3933 {
3934 	struct net_device *netdev = handle->kinfo.netdev;
3935 
3936 	if (!netdev)
3937 		return;
3938 
3939 	if (linkup) {
3940 		netif_carrier_on(netdev);
3941 		netif_tx_wake_all_queues(netdev);
3942 		if (netif_msg_link(handle))
3943 			netdev_info(netdev, "link up\n");
3944 	} else {
3945 		netif_carrier_off(netdev);
3946 		netif_tx_stop_all_queues(netdev);
3947 		if (netif_msg_link(handle))
3948 			netdev_info(netdev, "link down\n");
3949 	}
3950 }
3951 
3952 static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
3953 {
3954 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
3955 	struct net_device *ndev = kinfo->netdev;
3956 
3957 	if (tc > HNAE3_MAX_TC)
3958 		return -EINVAL;
3959 
3960 	if (!ndev)
3961 		return -ENODEV;
3962 
3963 	return hns3_nic_set_real_num_queue(ndev);
3964 }
3965 
3966 static int hns3_recover_hw_addr(struct net_device *ndev)
3967 {
3968 	struct netdev_hw_addr_list *list;
3969 	struct netdev_hw_addr *ha, *tmp;
3970 	int ret = 0;
3971 
3972 	netif_addr_lock_bh(ndev);
3973 	/* go through and sync uc_addr entries to the device */
3974 	list = &ndev->uc;
3975 	list_for_each_entry_safe(ha, tmp, &list->list, list) {
3976 		ret = hns3_nic_uc_sync(ndev, ha->addr);
3977 		if (ret)
3978 			goto out;
3979 	}
3980 
3981 	/* go through and sync mc_addr entries to the device */
3982 	list = &ndev->mc;
3983 	list_for_each_entry_safe(ha, tmp, &list->list, list) {
3984 		ret = hns3_nic_mc_sync(ndev, ha->addr);
3985 		if (ret)
3986 			goto out;
3987 	}
3988 
3989 out:
3990 	netif_addr_unlock_bh(ndev);
3991 	return ret;
3992 }
3993 
3994 static void hns3_remove_hw_addr(struct net_device *netdev)
3995 {
3996 	struct netdev_hw_addr_list *list;
3997 	struct netdev_hw_addr *ha, *tmp;
3998 
3999 	hns3_nic_uc_unsync(netdev, netdev->dev_addr);
4000 
4001 	netif_addr_lock_bh(netdev);
4002 	/* go through and unsync uc_addr entries to the device */
4003 	list = &netdev->uc;
4004 	list_for_each_entry_safe(ha, tmp, &list->list, list)
4005 		hns3_nic_uc_unsync(netdev, ha->addr);
4006 
4007 	/* go through and unsync mc_addr entries to the device */
4008 	list = &netdev->mc;
4009 	list_for_each_entry_safe(ha, tmp, &list->list, list)
4010 		if (ha->refcount > 1)
4011 			hns3_nic_mc_unsync(netdev, ha->addr);
4012 
4013 	netif_addr_unlock_bh(netdev);
4014 }
4015 
4016 static void hns3_clear_tx_ring(struct hns3_enet_ring *ring)
4017 {
4018 	while (ring->next_to_clean != ring->next_to_use) {
4019 		ring->desc[ring->next_to_clean].tx.bdtp_fe_sc_vld_ra_ri = 0;
4020 		hns3_free_buffer_detach(ring, ring->next_to_clean);
4021 		ring_ptr_move_fw(ring, next_to_clean);
4022 	}
4023 }
4024 
4025 static int hns3_clear_rx_ring(struct hns3_enet_ring *ring)
4026 {
4027 	struct hns3_desc_cb res_cbs;
4028 	int ret;
4029 
4030 	while (ring->next_to_use != ring->next_to_clean) {
4031 		/* When a buffer is not reused, it's memory has been
4032 		 * freed in hns3_handle_rx_bd or will be freed by
4033 		 * stack, so we need to replace the buffer here.
4034 		 */
4035 		if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
4036 			ret = hns3_reserve_buffer_map(ring, &res_cbs);
4037 			if (ret) {
4038 				u64_stats_update_begin(&ring->syncp);
4039 				ring->stats.sw_err_cnt++;
4040 				u64_stats_update_end(&ring->syncp);
4041 				/* if alloc new buffer fail, exit directly
4042 				 * and reclear in up flow.
4043 				 */
4044 				netdev_warn(ring->tqp->handle->kinfo.netdev,
4045 					    "reserve buffer map failed, ret = %d\n",
4046 					    ret);
4047 				return ret;
4048 			}
4049 			hns3_replace_buffer(ring, ring->next_to_use,
4050 					    &res_cbs);
4051 		}
4052 		ring_ptr_move_fw(ring, next_to_use);
4053 	}
4054 
4055 	/* Free the pending skb in rx ring */
4056 	if (ring->skb) {
4057 		dev_kfree_skb_any(ring->skb);
4058 		ring->skb = NULL;
4059 		ring->pending_buf = 0;
4060 	}
4061 
4062 	return 0;
4063 }
4064 
4065 static void hns3_force_clear_rx_ring(struct hns3_enet_ring *ring)
4066 {
4067 	while (ring->next_to_use != ring->next_to_clean) {
4068 		/* When a buffer is not reused, it's memory has been
4069 		 * freed in hns3_handle_rx_bd or will be freed by
4070 		 * stack, so only need to unmap the buffer here.
4071 		 */
4072 		if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
4073 			hns3_unmap_buffer(ring,
4074 					  &ring->desc_cb[ring->next_to_use]);
4075 			ring->desc_cb[ring->next_to_use].dma = 0;
4076 		}
4077 
4078 		ring_ptr_move_fw(ring, next_to_use);
4079 	}
4080 }
4081 
4082 static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h)
4083 {
4084 	struct net_device *ndev = h->kinfo.netdev;
4085 	struct hns3_nic_priv *priv = netdev_priv(ndev);
4086 	struct hns3_enet_ring *ring;
4087 	u32 i;
4088 
4089 	for (i = 0; i < h->kinfo.num_tqps; i++) {
4090 		ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
4091 		hns3_force_clear_rx_ring(ring);
4092 	}
4093 }
4094 
4095 static void hns3_clear_all_ring(struct hnae3_handle *h)
4096 {
4097 	struct net_device *ndev = h->kinfo.netdev;
4098 	struct hns3_nic_priv *priv = netdev_priv(ndev);
4099 	u32 i;
4100 
4101 	for (i = 0; i < h->kinfo.num_tqps; i++) {
4102 		struct netdev_queue *dev_queue;
4103 		struct hns3_enet_ring *ring;
4104 
4105 		ring = priv->ring_data[i].ring;
4106 		hns3_clear_tx_ring(ring);
4107 		dev_queue = netdev_get_tx_queue(ndev,
4108 						priv->ring_data[i].queue_index);
4109 		netdev_tx_reset_queue(dev_queue);
4110 
4111 		ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
4112 		/* Continue to clear other rings even if clearing some
4113 		 * rings failed.
4114 		 */
4115 		hns3_clear_rx_ring(ring);
4116 	}
4117 }
4118 
4119 int hns3_nic_reset_all_ring(struct hnae3_handle *h)
4120 {
4121 	struct net_device *ndev = h->kinfo.netdev;
4122 	struct hns3_nic_priv *priv = netdev_priv(ndev);
4123 	struct hns3_enet_ring *rx_ring;
4124 	int i, j;
4125 	int ret;
4126 
4127 	for (i = 0; i < h->kinfo.num_tqps; i++) {
4128 		ret = h->ae_algo->ops->reset_queue(h, i);
4129 		if (ret)
4130 			return ret;
4131 
4132 		hns3_init_ring_hw(priv->ring_data[i].ring);
4133 
4134 		/* We need to clear tx ring here because self test will
4135 		 * use the ring and will not run down before up
4136 		 */
4137 		hns3_clear_tx_ring(priv->ring_data[i].ring);
4138 		priv->ring_data[i].ring->next_to_clean = 0;
4139 		priv->ring_data[i].ring->next_to_use = 0;
4140 
4141 		rx_ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
4142 		hns3_init_ring_hw(rx_ring);
4143 		ret = hns3_clear_rx_ring(rx_ring);
4144 		if (ret)
4145 			return ret;
4146 
4147 		/* We can not know the hardware head and tail when this
4148 		 * function is called in reset flow, so we reuse all desc.
4149 		 */
4150 		for (j = 0; j < rx_ring->desc_num; j++)
4151 			hns3_reuse_buffer(rx_ring, j);
4152 
4153 		rx_ring->next_to_clean = 0;
4154 		rx_ring->next_to_use = 0;
4155 	}
4156 
4157 	hns3_init_tx_ring_tc(priv);
4158 
4159 	return 0;
4160 }
4161 
4162 static void hns3_store_coal(struct hns3_nic_priv *priv)
4163 {
4164 	/* ethtool only support setting and querying one coal
4165 	 * configuation for now, so save the vector 0' coal
4166 	 * configuation here in order to restore it.
4167 	 */
4168 	memcpy(&priv->tx_coal, &priv->tqp_vector[0].tx_group.coal,
4169 	       sizeof(struct hns3_enet_coalesce));
4170 	memcpy(&priv->rx_coal, &priv->tqp_vector[0].rx_group.coal,
4171 	       sizeof(struct hns3_enet_coalesce));
4172 }
4173 
4174 static void hns3_restore_coal(struct hns3_nic_priv *priv)
4175 {
4176 	u16 vector_num = priv->vector_num;
4177 	int i;
4178 
4179 	for (i = 0; i < vector_num; i++) {
4180 		memcpy(&priv->tqp_vector[i].tx_group.coal, &priv->tx_coal,
4181 		       sizeof(struct hns3_enet_coalesce));
4182 		memcpy(&priv->tqp_vector[i].rx_group.coal, &priv->rx_coal,
4183 		       sizeof(struct hns3_enet_coalesce));
4184 	}
4185 }
4186 
4187 static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
4188 {
4189 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4190 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
4191 	struct net_device *ndev = kinfo->netdev;
4192 	struct hns3_nic_priv *priv = netdev_priv(ndev);
4193 
4194 	if (test_and_set_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
4195 		return 0;
4196 
4197 	/* it is cumbersome for hardware to pick-and-choose entries for deletion
4198 	 * from table space. Hence, for function reset software intervention is
4199 	 * required to delete the entries
4200 	 */
4201 	if (hns3_dev_ongoing_func_reset(ae_dev)) {
4202 		hns3_remove_hw_addr(ndev);
4203 		hns3_del_all_fd_rules(ndev, false);
4204 	}
4205 
4206 	if (!netif_running(ndev))
4207 		return 0;
4208 
4209 	return hns3_nic_net_stop(ndev);
4210 }
4211 
4212 static int hns3_reset_notify_up_enet(struct hnae3_handle *handle)
4213 {
4214 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
4215 	struct hns3_nic_priv *priv = netdev_priv(kinfo->netdev);
4216 	int ret = 0;
4217 
4218 	clear_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
4219 
4220 	if (netif_running(kinfo->netdev)) {
4221 		ret = hns3_nic_net_open(kinfo->netdev);
4222 		if (ret) {
4223 			set_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
4224 			netdev_err(kinfo->netdev,
4225 				   "hns net up fail, ret=%d!\n", ret);
4226 			return ret;
4227 		}
4228 	}
4229 
4230 	return ret;
4231 }
4232 
4233 static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
4234 {
4235 	struct net_device *netdev = handle->kinfo.netdev;
4236 	struct hns3_nic_priv *priv = netdev_priv(netdev);
4237 	int ret;
4238 
4239 	/* Carrier off reporting is important to ethtool even BEFORE open */
4240 	netif_carrier_off(netdev);
4241 
4242 	ret = hns3_get_ring_config(priv);
4243 	if (ret)
4244 		return ret;
4245 
4246 	ret = hns3_nic_alloc_vector_data(priv);
4247 	if (ret)
4248 		goto err_put_ring;
4249 
4250 	hns3_restore_coal(priv);
4251 
4252 	ret = hns3_nic_init_vector_data(priv);
4253 	if (ret)
4254 		goto err_dealloc_vector;
4255 
4256 	ret = hns3_init_all_ring(priv);
4257 	if (ret)
4258 		goto err_uninit_vector;
4259 
4260 	ret = hns3_client_start(handle);
4261 	if (ret) {
4262 		dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret);
4263 		goto err_uninit_ring;
4264 	}
4265 
4266 	set_bit(HNS3_NIC_STATE_INITED, &priv->state);
4267 
4268 	return ret;
4269 
4270 err_uninit_ring:
4271 	hns3_uninit_all_ring(priv);
4272 err_uninit_vector:
4273 	hns3_nic_uninit_vector_data(priv);
4274 err_dealloc_vector:
4275 	hns3_nic_dealloc_vector_data(priv);
4276 err_put_ring:
4277 	hns3_put_ring_config(priv);
4278 
4279 	return ret;
4280 }
4281 
4282 static int hns3_reset_notify_restore_enet(struct hnae3_handle *handle)
4283 {
4284 	struct net_device *netdev = handle->kinfo.netdev;
4285 	bool vlan_filter_enable;
4286 	int ret;
4287 
4288 	ret = hns3_init_mac_addr(netdev, false);
4289 	if (ret)
4290 		return ret;
4291 
4292 	ret = hns3_recover_hw_addr(netdev);
4293 	if (ret)
4294 		return ret;
4295 
4296 	ret = hns3_update_promisc_mode(netdev, handle->netdev_flags);
4297 	if (ret)
4298 		return ret;
4299 
4300 	vlan_filter_enable = netdev->flags & IFF_PROMISC ? false : true;
4301 	hns3_enable_vlan_filter(netdev, vlan_filter_enable);
4302 
4303 	/* Hardware table is only clear when pf resets */
4304 	if (!(handle->flags & HNAE3_SUPPORT_VF)) {
4305 		ret = hns3_restore_vlan(netdev);
4306 		if (ret)
4307 			return ret;
4308 	}
4309 
4310 	return hns3_restore_fd_rules(netdev);
4311 }
4312 
4313 static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
4314 {
4315 	struct net_device *netdev = handle->kinfo.netdev;
4316 	struct hns3_nic_priv *priv = netdev_priv(netdev);
4317 	int ret;
4318 
4319 	if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
4320 		netdev_warn(netdev, "already uninitialized\n");
4321 		return 0;
4322 	}
4323 
4324 	hns3_force_clear_all_rx_ring(handle);
4325 
4326 	hns3_nic_uninit_vector_data(priv);
4327 
4328 	hns3_store_coal(priv);
4329 
4330 	ret = hns3_nic_dealloc_vector_data(priv);
4331 	if (ret)
4332 		netdev_err(netdev, "dealloc vector error\n");
4333 
4334 	ret = hns3_uninit_all_ring(priv);
4335 	if (ret)
4336 		netdev_err(netdev, "uninit ring error\n");
4337 
4338 	hns3_put_ring_config(priv);
4339 
4340 	return ret;
4341 }
4342 
4343 static int hns3_reset_notify(struct hnae3_handle *handle,
4344 			     enum hnae3_reset_notify_type type)
4345 {
4346 	int ret = 0;
4347 
4348 	switch (type) {
4349 	case HNAE3_UP_CLIENT:
4350 		ret = hns3_reset_notify_up_enet(handle);
4351 		break;
4352 	case HNAE3_DOWN_CLIENT:
4353 		ret = hns3_reset_notify_down_enet(handle);
4354 		break;
4355 	case HNAE3_INIT_CLIENT:
4356 		ret = hns3_reset_notify_init_enet(handle);
4357 		break;
4358 	case HNAE3_UNINIT_CLIENT:
4359 		ret = hns3_reset_notify_uninit_enet(handle);
4360 		break;
4361 	case HNAE3_RESTORE_CLIENT:
4362 		ret = hns3_reset_notify_restore_enet(handle);
4363 		break;
4364 	default:
4365 		break;
4366 	}
4367 
4368 	return ret;
4369 }
4370 
4371 int hns3_set_channels(struct net_device *netdev,
4372 		      struct ethtool_channels *ch)
4373 {
4374 	struct hnae3_handle *h = hns3_get_handle(netdev);
4375 	struct hnae3_knic_private_info *kinfo = &h->kinfo;
4376 	bool rxfh_configured = netif_is_rxfh_configured(netdev);
4377 	u32 new_tqp_num = ch->combined_count;
4378 	u16 org_tqp_num;
4379 	int ret;
4380 
4381 	if (ch->rx_count || ch->tx_count)
4382 		return -EINVAL;
4383 
4384 	if (new_tqp_num > hns3_get_max_available_channels(h) ||
4385 	    new_tqp_num < 1) {
4386 		dev_err(&netdev->dev,
4387 			"Change tqps fail, the tqp range is from 1 to %d",
4388 			hns3_get_max_available_channels(h));
4389 		return -EINVAL;
4390 	}
4391 
4392 	if (kinfo->rss_size == new_tqp_num)
4393 		return 0;
4394 
4395 	ret = hns3_reset_notify(h, HNAE3_DOWN_CLIENT);
4396 	if (ret)
4397 		return ret;
4398 
4399 	ret = hns3_reset_notify(h, HNAE3_UNINIT_CLIENT);
4400 	if (ret)
4401 		return ret;
4402 
4403 	org_tqp_num = h->kinfo.num_tqps;
4404 	ret = h->ae_algo->ops->set_channels(h, new_tqp_num, rxfh_configured);
4405 	if (ret) {
4406 		ret = h->ae_algo->ops->set_channels(h, org_tqp_num,
4407 						    rxfh_configured);
4408 		if (ret) {
4409 			/* If revert to old tqp failed, fatal error occurred */
4410 			dev_err(&netdev->dev,
4411 				"Revert to old tqp num fail, ret=%d", ret);
4412 			return ret;
4413 		}
4414 		dev_info(&netdev->dev,
4415 			 "Change tqp num fail, Revert to old tqp num");
4416 	}
4417 	ret = hns3_reset_notify(h, HNAE3_INIT_CLIENT);
4418 	if (ret)
4419 		return ret;
4420 
4421 	return hns3_reset_notify(h, HNAE3_UP_CLIENT);
4422 }
4423 
4424 static const struct hnae3_client_ops client_ops = {
4425 	.init_instance = hns3_client_init,
4426 	.uninit_instance = hns3_client_uninit,
4427 	.link_status_change = hns3_link_status_change,
4428 	.setup_tc = hns3_client_setup_tc,
4429 	.reset_notify = hns3_reset_notify,
4430 };
4431 
4432 /* hns3_init_module - Driver registration routine
4433  * hns3_init_module is the first routine called when the driver is
4434  * loaded. All it does is register with the PCI subsystem.
4435  */
4436 static int __init hns3_init_module(void)
4437 {
4438 	int ret;
4439 
4440 	pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string);
4441 	pr_info("%s: %s\n", hns3_driver_name, hns3_copyright);
4442 
4443 	client.type = HNAE3_CLIENT_KNIC;
4444 	snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH - 1, "%s",
4445 		 hns3_driver_name);
4446 
4447 	client.ops = &client_ops;
4448 
4449 	INIT_LIST_HEAD(&client.node);
4450 
4451 	hns3_dbg_register_debugfs(hns3_driver_name);
4452 
4453 	ret = hnae3_register_client(&client);
4454 	if (ret)
4455 		goto err_reg_client;
4456 
4457 	ret = pci_register_driver(&hns3_driver);
4458 	if (ret)
4459 		goto err_reg_driver;
4460 
4461 	return ret;
4462 
4463 err_reg_driver:
4464 	hnae3_unregister_client(&client);
4465 err_reg_client:
4466 	hns3_dbg_unregister_debugfs();
4467 	return ret;
4468 }
4469 module_init(hns3_init_module);
4470 
4471 /* hns3_exit_module - Driver exit cleanup routine
4472  * hns3_exit_module is called just before the driver is removed
4473  * from memory.
4474  */
4475 static void __exit hns3_exit_module(void)
4476 {
4477 	pci_unregister_driver(&hns3_driver);
4478 	hnae3_unregister_client(&client);
4479 	hns3_dbg_unregister_debugfs();
4480 }
4481 module_exit(hns3_exit_module);
4482 
4483 MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver");
4484 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
4485 MODULE_LICENSE("GPL");
4486 MODULE_ALIAS("pci:hns-nic");
4487 MODULE_VERSION(HNS3_MOD_VERSION);
4488