1 /*
2  * Copyright (c) 2016~2017 Hisilicon Limited.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  */
9 
10 #include <linux/dma-mapping.h>
11 #include <linux/etherdevice.h>
12 #include <linux/interrupt.h>
13 #include <linux/if_vlan.h>
14 #include <linux/ip.h>
15 #include <linux/ipv6.h>
16 #include <linux/module.h>
17 #include <linux/pci.h>
18 #include <linux/skbuff.h>
19 #include <linux/sctp.h>
20 #include <linux/vermagic.h>
21 #include <net/gre.h>
22 #include <net/pkt_cls.h>
23 #include <net/vxlan.h>
24 
25 #include "hnae3.h"
26 #include "hns3_enet.h"
27 
28 static const char hns3_driver_name[] = "hns3";
29 const char hns3_driver_version[] = VERMAGIC_STRING;
30 static const char hns3_driver_string[] =
31 			"Hisilicon Ethernet Network Driver for Hip08 Family";
32 static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation.";
33 static struct hnae3_client client;
34 
35 /* hns3_pci_tbl - PCI Device ID Table
36  *
37  * Last entry must be all 0s
38  *
39  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
40  *   Class, Class Mask, private data (not used) }
41  */
42 static const struct pci_device_id hns3_pci_tbl[] = {
43 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
44 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
45 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA),
46 	 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
47 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC),
48 	 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
49 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA),
50 	 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
51 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC),
52 	 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
53 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC),
54 	 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
55 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0},
56 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0},
57 	/* required last entry */
58 	{0, }
59 };
60 MODULE_DEVICE_TABLE(pci, hns3_pci_tbl);
61 
62 static irqreturn_t hns3_irq_handle(int irq, void *dev)
63 {
64 	struct hns3_enet_tqp_vector *tqp_vector = dev;
65 
66 	napi_schedule(&tqp_vector->napi);
67 
68 	return IRQ_HANDLED;
69 }
70 
71 static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv)
72 {
73 	struct hns3_enet_tqp_vector *tqp_vectors;
74 	unsigned int i;
75 
76 	for (i = 0; i < priv->vector_num; i++) {
77 		tqp_vectors = &priv->tqp_vector[i];
78 
79 		if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED)
80 			continue;
81 
82 		/* release the irq resource */
83 		free_irq(tqp_vectors->vector_irq, tqp_vectors);
84 		tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED;
85 	}
86 }
87 
88 static int hns3_nic_init_irq(struct hns3_nic_priv *priv)
89 {
90 	struct hns3_enet_tqp_vector *tqp_vectors;
91 	int txrx_int_idx = 0;
92 	int rx_int_idx = 0;
93 	int tx_int_idx = 0;
94 	unsigned int i;
95 	int ret;
96 
97 	for (i = 0; i < priv->vector_num; i++) {
98 		tqp_vectors = &priv->tqp_vector[i];
99 
100 		if (tqp_vectors->irq_init_flag == HNS3_VECTOR_INITED)
101 			continue;
102 
103 		if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) {
104 			snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
105 				 "%s-%s-%d", priv->netdev->name, "TxRx",
106 				 txrx_int_idx++);
107 			txrx_int_idx++;
108 		} else if (tqp_vectors->rx_group.ring) {
109 			snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
110 				 "%s-%s-%d", priv->netdev->name, "Rx",
111 				 rx_int_idx++);
112 		} else if (tqp_vectors->tx_group.ring) {
113 			snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
114 				 "%s-%s-%d", priv->netdev->name, "Tx",
115 				 tx_int_idx++);
116 		} else {
117 			/* Skip this unused q_vector */
118 			continue;
119 		}
120 
121 		tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0';
122 
123 		ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0,
124 				  tqp_vectors->name,
125 				       tqp_vectors);
126 		if (ret) {
127 			netdev_err(priv->netdev, "request irq(%d) fail\n",
128 				   tqp_vectors->vector_irq);
129 			return ret;
130 		}
131 
132 		tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED;
133 	}
134 
135 	return 0;
136 }
137 
138 static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector,
139 				 u32 mask_en)
140 {
141 	writel(mask_en, tqp_vector->mask_addr);
142 }
143 
144 static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector)
145 {
146 	napi_enable(&tqp_vector->napi);
147 
148 	/* enable vector */
149 	hns3_mask_vector_irq(tqp_vector, 1);
150 }
151 
152 static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector)
153 {
154 	/* disable vector */
155 	hns3_mask_vector_irq(tqp_vector, 0);
156 
157 	disable_irq(tqp_vector->vector_irq);
158 	napi_disable(&tqp_vector->napi);
159 }
160 
161 void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
162 				 u32 rl_value)
163 {
164 	u32 rl_reg = hns3_rl_usec_to_reg(rl_value);
165 
166 	/* this defines the configuration for RL (Interrupt Rate Limiter).
167 	 * Rl defines rate of interrupts i.e. number of interrupts-per-second
168 	 * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
169 	 */
170 
171 	if (rl_reg > 0 && !tqp_vector->tx_group.gl_adapt_enable &&
172 	    !tqp_vector->rx_group.gl_adapt_enable)
173 		/* According to the hardware, the range of rl_reg is
174 		 * 0-59 and the unit is 4.
175 		 */
176 		rl_reg |=  HNS3_INT_RL_ENABLE_MASK;
177 
178 	writel(rl_reg, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET);
179 }
180 
181 void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector,
182 				    u32 gl_value)
183 {
184 	u32 rx_gl_reg = hns3_gl_usec_to_reg(gl_value);
185 
186 	writel(rx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET);
187 }
188 
189 void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector,
190 				    u32 gl_value)
191 {
192 	u32 tx_gl_reg = hns3_gl_usec_to_reg(gl_value);
193 
194 	writel(tx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET);
195 }
196 
197 static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector,
198 				   struct hns3_nic_priv *priv)
199 {
200 	struct hnae3_handle *h = priv->ae_handle;
201 
202 	/* initialize the configuration for interrupt coalescing.
203 	 * 1. GL (Interrupt Gap Limiter)
204 	 * 2. RL (Interrupt Rate Limiter)
205 	 */
206 
207 	/* Default: enable interrupt coalescing self-adaptive and GL */
208 	tqp_vector->tx_group.gl_adapt_enable = 1;
209 	tqp_vector->rx_group.gl_adapt_enable = 1;
210 
211 	tqp_vector->tx_group.int_gl = HNS3_INT_GL_50K;
212 	tqp_vector->rx_group.int_gl = HNS3_INT_GL_50K;
213 
214 	hns3_set_vector_coalesce_tx_gl(tqp_vector,
215 				       tqp_vector->tx_group.int_gl);
216 	hns3_set_vector_coalesce_rx_gl(tqp_vector,
217 				       tqp_vector->rx_group.int_gl);
218 
219 	/* Default: disable RL */
220 	h->kinfo.int_rl_setting = 0;
221 	hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting);
222 
223 	tqp_vector->rx_group.flow_level = HNS3_FLOW_LOW;
224 	tqp_vector->tx_group.flow_level = HNS3_FLOW_LOW;
225 }
226 
227 static int hns3_nic_set_real_num_queue(struct net_device *netdev)
228 {
229 	struct hnae3_handle *h = hns3_get_handle(netdev);
230 	struct hnae3_knic_private_info *kinfo = &h->kinfo;
231 	unsigned int queue_size = kinfo->rss_size * kinfo->num_tc;
232 	int ret;
233 
234 	ret = netif_set_real_num_tx_queues(netdev, queue_size);
235 	if (ret) {
236 		netdev_err(netdev,
237 			   "netif_set_real_num_tx_queues fail, ret=%d!\n",
238 			   ret);
239 		return ret;
240 	}
241 
242 	ret = netif_set_real_num_rx_queues(netdev, queue_size);
243 	if (ret) {
244 		netdev_err(netdev,
245 			   "netif_set_real_num_rx_queues fail, ret=%d!\n", ret);
246 		return ret;
247 	}
248 
249 	return 0;
250 }
251 
252 static int hns3_nic_net_up(struct net_device *netdev)
253 {
254 	struct hns3_nic_priv *priv = netdev_priv(netdev);
255 	struct hnae3_handle *h = priv->ae_handle;
256 	int i, j;
257 	int ret;
258 
259 	/* get irq resource for all vectors */
260 	ret = hns3_nic_init_irq(priv);
261 	if (ret) {
262 		netdev_err(netdev, "hns init irq failed! ret=%d\n", ret);
263 		return ret;
264 	}
265 
266 	/* enable the vectors */
267 	for (i = 0; i < priv->vector_num; i++)
268 		hns3_vector_enable(&priv->tqp_vector[i]);
269 
270 	/* start the ae_dev */
271 	ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0;
272 	if (ret)
273 		goto out_start_err;
274 
275 	clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
276 
277 	return 0;
278 
279 out_start_err:
280 	for (j = i - 1; j >= 0; j--)
281 		hns3_vector_disable(&priv->tqp_vector[j]);
282 
283 	hns3_nic_uninit_irq(priv);
284 
285 	return ret;
286 }
287 
288 static int hns3_nic_net_open(struct net_device *netdev)
289 {
290 	struct hns3_nic_priv *priv = netdev_priv(netdev);
291 	int ret;
292 
293 	netif_carrier_off(netdev);
294 
295 	ret = hns3_nic_set_real_num_queue(netdev);
296 	if (ret)
297 		return ret;
298 
299 	ret = hns3_nic_net_up(netdev);
300 	if (ret) {
301 		netdev_err(netdev,
302 			   "hns net up fail, ret=%d!\n", ret);
303 		return ret;
304 	}
305 
306 	priv->last_reset_time = jiffies;
307 	return 0;
308 }
309 
310 static void hns3_nic_net_down(struct net_device *netdev)
311 {
312 	struct hns3_nic_priv *priv = netdev_priv(netdev);
313 	const struct hnae3_ae_ops *ops;
314 	int i;
315 
316 	if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
317 		return;
318 
319 	/* stop ae_dev */
320 	ops = priv->ae_handle->ae_algo->ops;
321 	if (ops->stop)
322 		ops->stop(priv->ae_handle);
323 
324 	/* disable vectors */
325 	for (i = 0; i < priv->vector_num; i++)
326 		hns3_vector_disable(&priv->tqp_vector[i]);
327 
328 	/* free irq resources */
329 	hns3_nic_uninit_irq(priv);
330 }
331 
332 static int hns3_nic_net_stop(struct net_device *netdev)
333 {
334 	netif_tx_stop_all_queues(netdev);
335 	netif_carrier_off(netdev);
336 
337 	hns3_nic_net_down(netdev);
338 
339 	return 0;
340 }
341 
342 static int hns3_nic_uc_sync(struct net_device *netdev,
343 			    const unsigned char *addr)
344 {
345 	struct hnae3_handle *h = hns3_get_handle(netdev);
346 
347 	if (h->ae_algo->ops->add_uc_addr)
348 		return h->ae_algo->ops->add_uc_addr(h, addr);
349 
350 	return 0;
351 }
352 
353 static int hns3_nic_uc_unsync(struct net_device *netdev,
354 			      const unsigned char *addr)
355 {
356 	struct hnae3_handle *h = hns3_get_handle(netdev);
357 
358 	if (h->ae_algo->ops->rm_uc_addr)
359 		return h->ae_algo->ops->rm_uc_addr(h, addr);
360 
361 	return 0;
362 }
363 
364 static int hns3_nic_mc_sync(struct net_device *netdev,
365 			    const unsigned char *addr)
366 {
367 	struct hnae3_handle *h = hns3_get_handle(netdev);
368 
369 	if (h->ae_algo->ops->add_mc_addr)
370 		return h->ae_algo->ops->add_mc_addr(h, addr);
371 
372 	return 0;
373 }
374 
375 static int hns3_nic_mc_unsync(struct net_device *netdev,
376 			      const unsigned char *addr)
377 {
378 	struct hnae3_handle *h = hns3_get_handle(netdev);
379 
380 	if (h->ae_algo->ops->rm_mc_addr)
381 		return h->ae_algo->ops->rm_mc_addr(h, addr);
382 
383 	return 0;
384 }
385 
386 static void hns3_nic_set_rx_mode(struct net_device *netdev)
387 {
388 	struct hnae3_handle *h = hns3_get_handle(netdev);
389 
390 	if (h->ae_algo->ops->set_promisc_mode) {
391 		if (netdev->flags & IFF_PROMISC)
392 			h->ae_algo->ops->set_promisc_mode(h, 1);
393 		else
394 			h->ae_algo->ops->set_promisc_mode(h, 0);
395 	}
396 	if (__dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync))
397 		netdev_err(netdev, "sync uc address fail\n");
398 	if (netdev->flags & IFF_MULTICAST)
399 		if (__dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync))
400 			netdev_err(netdev, "sync mc address fail\n");
401 }
402 
403 static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
404 			u16 *mss, u32 *type_cs_vlan_tso)
405 {
406 	u32 l4_offset, hdr_len;
407 	union l3_hdr_info l3;
408 	union l4_hdr_info l4;
409 	u32 l4_paylen;
410 	int ret;
411 
412 	if (!skb_is_gso(skb))
413 		return 0;
414 
415 	ret = skb_cow_head(skb, 0);
416 	if (ret)
417 		return ret;
418 
419 	l3.hdr = skb_network_header(skb);
420 	l4.hdr = skb_transport_header(skb);
421 
422 	/* Software should clear the IPv4's checksum field when tso is
423 	 * needed.
424 	 */
425 	if (l3.v4->version == 4)
426 		l3.v4->check = 0;
427 
428 	/* tunnel packet.*/
429 	if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
430 					 SKB_GSO_GRE_CSUM |
431 					 SKB_GSO_UDP_TUNNEL |
432 					 SKB_GSO_UDP_TUNNEL_CSUM)) {
433 		if ((!(skb_shinfo(skb)->gso_type &
434 		    SKB_GSO_PARTIAL)) &&
435 		    (skb_shinfo(skb)->gso_type &
436 		    SKB_GSO_UDP_TUNNEL_CSUM)) {
437 			/* Software should clear the udp's checksum
438 			 * field when tso is needed.
439 			 */
440 			l4.udp->check = 0;
441 		}
442 		/* reset l3&l4 pointers from outer to inner headers */
443 		l3.hdr = skb_inner_network_header(skb);
444 		l4.hdr = skb_inner_transport_header(skb);
445 
446 		/* Software should clear the IPv4's checksum field when
447 		 * tso is needed.
448 		 */
449 		if (l3.v4->version == 4)
450 			l3.v4->check = 0;
451 	}
452 
453 	/* normal or tunnel packet*/
454 	l4_offset = l4.hdr - skb->data;
455 	hdr_len = (l4.tcp->doff * 4) + l4_offset;
456 
457 	/* remove payload length from inner pseudo checksum when tso*/
458 	l4_paylen = skb->len - l4_offset;
459 	csum_replace_by_diff(&l4.tcp->check,
460 			     (__force __wsum)htonl(l4_paylen));
461 
462 	/* find the txbd field values */
463 	*paylen = skb->len - hdr_len;
464 	hnae_set_bit(*type_cs_vlan_tso,
465 		     HNS3_TXD_TSO_B, 1);
466 
467 	/* get MSS for TSO */
468 	*mss = skb_shinfo(skb)->gso_size;
469 
470 	return 0;
471 }
472 
473 static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
474 				u8 *il4_proto)
475 {
476 	union {
477 		struct iphdr *v4;
478 		struct ipv6hdr *v6;
479 		unsigned char *hdr;
480 	} l3;
481 	unsigned char *l4_hdr;
482 	unsigned char *exthdr;
483 	u8 l4_proto_tmp;
484 	__be16 frag_off;
485 
486 	/* find outer header point */
487 	l3.hdr = skb_network_header(skb);
488 	l4_hdr = skb_inner_transport_header(skb);
489 
490 	if (skb->protocol == htons(ETH_P_IPV6)) {
491 		exthdr = l3.hdr + sizeof(*l3.v6);
492 		l4_proto_tmp = l3.v6->nexthdr;
493 		if (l4_hdr != exthdr)
494 			ipv6_skip_exthdr(skb, exthdr - skb->data,
495 					 &l4_proto_tmp, &frag_off);
496 	} else if (skb->protocol == htons(ETH_P_IP)) {
497 		l4_proto_tmp = l3.v4->protocol;
498 	} else {
499 		return -EINVAL;
500 	}
501 
502 	*ol4_proto = l4_proto_tmp;
503 
504 	/* tunnel packet */
505 	if (!skb->encapsulation) {
506 		*il4_proto = 0;
507 		return 0;
508 	}
509 
510 	/* find inner header point */
511 	l3.hdr = skb_inner_network_header(skb);
512 	l4_hdr = skb_inner_transport_header(skb);
513 
514 	if (l3.v6->version == 6) {
515 		exthdr = l3.hdr + sizeof(*l3.v6);
516 		l4_proto_tmp = l3.v6->nexthdr;
517 		if (l4_hdr != exthdr)
518 			ipv6_skip_exthdr(skb, exthdr - skb->data,
519 					 &l4_proto_tmp, &frag_off);
520 	} else if (l3.v4->version == 4) {
521 		l4_proto_tmp = l3.v4->protocol;
522 	}
523 
524 	*il4_proto = l4_proto_tmp;
525 
526 	return 0;
527 }
528 
529 static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
530 				u8 il4_proto, u32 *type_cs_vlan_tso,
531 				u32 *ol_type_vlan_len_msec)
532 {
533 	union {
534 		struct iphdr *v4;
535 		struct ipv6hdr *v6;
536 		unsigned char *hdr;
537 	} l3;
538 	union {
539 		struct tcphdr *tcp;
540 		struct udphdr *udp;
541 		struct gre_base_hdr *gre;
542 		unsigned char *hdr;
543 	} l4;
544 	unsigned char *l2_hdr;
545 	u8 l4_proto = ol4_proto;
546 	u32 ol2_len;
547 	u32 ol3_len;
548 	u32 ol4_len;
549 	u32 l2_len;
550 	u32 l3_len;
551 
552 	l3.hdr = skb_network_header(skb);
553 	l4.hdr = skb_transport_header(skb);
554 
555 	/* compute L2 header size for normal packet, defined in 2 Bytes */
556 	l2_len = l3.hdr - skb->data;
557 	hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
558 		       HNS3_TXD_L2LEN_S, l2_len >> 1);
559 
560 	/* tunnel packet*/
561 	if (skb->encapsulation) {
562 		/* compute OL2 header size, defined in 2 Bytes */
563 		ol2_len = l2_len;
564 		hnae_set_field(*ol_type_vlan_len_msec,
565 			       HNS3_TXD_L2LEN_M,
566 			       HNS3_TXD_L2LEN_S, ol2_len >> 1);
567 
568 		/* compute OL3 header size, defined in 4 Bytes */
569 		ol3_len = l4.hdr - l3.hdr;
570 		hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M,
571 			       HNS3_TXD_L3LEN_S, ol3_len >> 2);
572 
573 		/* MAC in UDP, MAC in GRE (0x6558)*/
574 		if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) {
575 			/* switch MAC header ptr from outer to inner header.*/
576 			l2_hdr = skb_inner_mac_header(skb);
577 
578 			/* compute OL4 header size, defined in 4 Bytes. */
579 			ol4_len = l2_hdr - l4.hdr;
580 			hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_M,
581 				       HNS3_TXD_L4LEN_S, ol4_len >> 2);
582 
583 			/* switch IP header ptr from outer to inner header */
584 			l3.hdr = skb_inner_network_header(skb);
585 
586 			/* compute inner l2 header size, defined in 2 Bytes. */
587 			l2_len = l3.hdr - l2_hdr;
588 			hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
589 				       HNS3_TXD_L2LEN_S, l2_len >> 1);
590 		} else {
591 			/* skb packet types not supported by hardware,
592 			 * txbd len fild doesn't be filled.
593 			 */
594 			return;
595 		}
596 
597 		/* switch L4 header pointer from outer to inner */
598 		l4.hdr = skb_inner_transport_header(skb);
599 
600 		l4_proto = il4_proto;
601 	}
602 
603 	/* compute inner(/normal) L3 header size, defined in 4 Bytes */
604 	l3_len = l4.hdr - l3.hdr;
605 	hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M,
606 		       HNS3_TXD_L3LEN_S, l3_len >> 2);
607 
608 	/* compute inner(/normal) L4 header size, defined in 4 Bytes */
609 	switch (l4_proto) {
610 	case IPPROTO_TCP:
611 		hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
612 			       HNS3_TXD_L4LEN_S, l4.tcp->doff);
613 		break;
614 	case IPPROTO_SCTP:
615 		hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
616 			       HNS3_TXD_L4LEN_S, (sizeof(struct sctphdr) >> 2));
617 		break;
618 	case IPPROTO_UDP:
619 		hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
620 			       HNS3_TXD_L4LEN_S, (sizeof(struct udphdr) >> 2));
621 		break;
622 	default:
623 		/* skb packet types not supported by hardware,
624 		 * txbd len fild doesn't be filled.
625 		 */
626 		return;
627 	}
628 }
629 
630 static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
631 				   u8 il4_proto, u32 *type_cs_vlan_tso,
632 				   u32 *ol_type_vlan_len_msec)
633 {
634 	union {
635 		struct iphdr *v4;
636 		struct ipv6hdr *v6;
637 		unsigned char *hdr;
638 	} l3;
639 	u32 l4_proto = ol4_proto;
640 
641 	l3.hdr = skb_network_header(skb);
642 
643 	/* define OL3 type and tunnel type(OL4).*/
644 	if (skb->encapsulation) {
645 		/* define outer network header type.*/
646 		if (skb->protocol == htons(ETH_P_IP)) {
647 			if (skb_is_gso(skb))
648 				hnae_set_field(*ol_type_vlan_len_msec,
649 					       HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
650 					       HNS3_OL3T_IPV4_CSUM);
651 			else
652 				hnae_set_field(*ol_type_vlan_len_msec,
653 					       HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
654 					       HNS3_OL3T_IPV4_NO_CSUM);
655 
656 		} else if (skb->protocol == htons(ETH_P_IPV6)) {
657 			hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M,
658 				       HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6);
659 		}
660 
661 		/* define tunnel type(OL4).*/
662 		switch (l4_proto) {
663 		case IPPROTO_UDP:
664 			hnae_set_field(*ol_type_vlan_len_msec,
665 				       HNS3_TXD_TUNTYPE_M,
666 				       HNS3_TXD_TUNTYPE_S,
667 				       HNS3_TUN_MAC_IN_UDP);
668 			break;
669 		case IPPROTO_GRE:
670 			hnae_set_field(*ol_type_vlan_len_msec,
671 				       HNS3_TXD_TUNTYPE_M,
672 				       HNS3_TXD_TUNTYPE_S,
673 				       HNS3_TUN_NVGRE);
674 			break;
675 		default:
676 			/* drop the skb tunnel packet if hardware don't support,
677 			 * because hardware can't calculate csum when TSO.
678 			 */
679 			if (skb_is_gso(skb))
680 				return -EDOM;
681 
682 			/* the stack computes the IP header already,
683 			 * driver calculate l4 checksum when not TSO.
684 			 */
685 			skb_checksum_help(skb);
686 			return 0;
687 		}
688 
689 		l3.hdr = skb_inner_network_header(skb);
690 		l4_proto = il4_proto;
691 	}
692 
693 	if (l3.v4->version == 4) {
694 		hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
695 			       HNS3_TXD_L3T_S, HNS3_L3T_IPV4);
696 
697 		/* the stack computes the IP header already, the only time we
698 		 * need the hardware to recompute it is in the case of TSO.
699 		 */
700 		if (skb_is_gso(skb))
701 			hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1);
702 
703 		hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
704 	} else if (l3.v6->version == 6) {
705 		hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
706 			       HNS3_TXD_L3T_S, HNS3_L3T_IPV6);
707 		hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
708 	}
709 
710 	switch (l4_proto) {
711 	case IPPROTO_TCP:
712 		hnae_set_field(*type_cs_vlan_tso,
713 			       HNS3_TXD_L4T_M,
714 			       HNS3_TXD_L4T_S,
715 			       HNS3_L4T_TCP);
716 		break;
717 	case IPPROTO_UDP:
718 		hnae_set_field(*type_cs_vlan_tso,
719 			       HNS3_TXD_L4T_M,
720 			       HNS3_TXD_L4T_S,
721 			       HNS3_L4T_UDP);
722 		break;
723 	case IPPROTO_SCTP:
724 		hnae_set_field(*type_cs_vlan_tso,
725 			       HNS3_TXD_L4T_M,
726 			       HNS3_TXD_L4T_S,
727 			       HNS3_L4T_SCTP);
728 		break;
729 	default:
730 		/* drop the skb tunnel packet if hardware don't support,
731 		 * because hardware can't calculate csum when TSO.
732 		 */
733 		if (skb_is_gso(skb))
734 			return -EDOM;
735 
736 		/* the stack computes the IP header already,
737 		 * driver calculate l4 checksum when not TSO.
738 		 */
739 		skb_checksum_help(skb);
740 		return 0;
741 	}
742 
743 	return 0;
744 }
745 
746 static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
747 {
748 	/* Config bd buffer end */
749 	hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M,
750 		       HNS3_TXD_BDTYPE_M, 0);
751 	hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end);
752 	hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1);
753 	hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0);
754 }
755 
756 static int hns3_fill_desc_vtags(struct sk_buff *skb,
757 				struct hns3_enet_ring *tx_ring,
758 				u32 *inner_vlan_flag,
759 				u32 *out_vlan_flag,
760 				u16 *inner_vtag,
761 				u16 *out_vtag)
762 {
763 #define HNS3_TX_VLAN_PRIO_SHIFT 13
764 
765 	if (skb->protocol == htons(ETH_P_8021Q) &&
766 	    !(tx_ring->tqp->handle->kinfo.netdev->features &
767 	    NETIF_F_HW_VLAN_CTAG_TX)) {
768 		/* When HW VLAN acceleration is turned off, and the stack
769 		 * sets the protocol to 802.1q, the driver just need to
770 		 * set the protocol to the encapsulated ethertype.
771 		 */
772 		skb->protocol = vlan_get_protocol(skb);
773 		return 0;
774 	}
775 
776 	if (skb_vlan_tag_present(skb)) {
777 		u16 vlan_tag;
778 
779 		vlan_tag = skb_vlan_tag_get(skb);
780 		vlan_tag |= (skb->priority & 0x7) << HNS3_TX_VLAN_PRIO_SHIFT;
781 
782 		/* Based on hw strategy, use out_vtag in two layer tag case,
783 		 * and use inner_vtag in one tag case.
784 		 */
785 		if (skb->protocol == htons(ETH_P_8021Q)) {
786 			hnae_set_bit(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1);
787 			*out_vtag = vlan_tag;
788 		} else {
789 			hnae_set_bit(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1);
790 			*inner_vtag = vlan_tag;
791 		}
792 	} else if (skb->protocol == htons(ETH_P_8021Q)) {
793 		struct vlan_ethhdr *vhdr;
794 		int rc;
795 
796 		rc = skb_cow_head(skb, 0);
797 		if (rc < 0)
798 			return rc;
799 		vhdr = (struct vlan_ethhdr *)skb->data;
800 		vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority & 0x7)
801 					<< HNS3_TX_VLAN_PRIO_SHIFT);
802 	}
803 
804 	skb->protocol = vlan_get_protocol(skb);
805 	return 0;
806 }
807 
808 static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
809 			  int size, dma_addr_t dma, int frag_end,
810 			  enum hns_desc_type type)
811 {
812 	struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
813 	struct hns3_desc *desc = &ring->desc[ring->next_to_use];
814 	u32 ol_type_vlan_len_msec = 0;
815 	u16 bdtp_fe_sc_vld_ra_ri = 0;
816 	u32 type_cs_vlan_tso = 0;
817 	struct sk_buff *skb;
818 	u16 inner_vtag = 0;
819 	u16 out_vtag = 0;
820 	u32 paylen = 0;
821 	u16 mss = 0;
822 	__be16 protocol;
823 	u8 ol4_proto;
824 	u8 il4_proto;
825 	int ret;
826 
827 	/* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
828 	desc_cb->priv = priv;
829 	desc_cb->length = size;
830 	desc_cb->dma = dma;
831 	desc_cb->type = type;
832 
833 	/* now, fill the descriptor */
834 	desc->addr = cpu_to_le64(dma);
835 	desc->tx.send_size = cpu_to_le16((u16)size);
836 	hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end);
837 	desc->tx.bdtp_fe_sc_vld_ra_ri = cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
838 
839 	if (type == DESC_TYPE_SKB) {
840 		skb = (struct sk_buff *)priv;
841 		paylen = skb->len;
842 
843 		ret = hns3_fill_desc_vtags(skb, ring, &type_cs_vlan_tso,
844 					   &ol_type_vlan_len_msec,
845 					   &inner_vtag, &out_vtag);
846 		if (unlikely(ret))
847 			return ret;
848 
849 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
850 			skb_reset_mac_len(skb);
851 			protocol = skb->protocol;
852 
853 			ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
854 			if (ret)
855 				return ret;
856 			hns3_set_l2l3l4_len(skb, ol4_proto, il4_proto,
857 					    &type_cs_vlan_tso,
858 					    &ol_type_vlan_len_msec);
859 			ret = hns3_set_l3l4_type_csum(skb, ol4_proto, il4_proto,
860 						      &type_cs_vlan_tso,
861 						      &ol_type_vlan_len_msec);
862 			if (ret)
863 				return ret;
864 
865 			ret = hns3_set_tso(skb, &paylen, &mss,
866 					   &type_cs_vlan_tso);
867 			if (ret)
868 				return ret;
869 		}
870 
871 		/* Set txbd */
872 		desc->tx.ol_type_vlan_len_msec =
873 			cpu_to_le32(ol_type_vlan_len_msec);
874 		desc->tx.type_cs_vlan_tso_len =
875 			cpu_to_le32(type_cs_vlan_tso);
876 		desc->tx.paylen = cpu_to_le32(paylen);
877 		desc->tx.mss = cpu_to_le16(mss);
878 		desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
879 		desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag);
880 	}
881 
882 	/* move ring pointer to next.*/
883 	ring_ptr_move_fw(ring, next_to_use);
884 
885 	return 0;
886 }
887 
888 static int hns3_fill_desc_tso(struct hns3_enet_ring *ring, void *priv,
889 			      int size, dma_addr_t dma, int frag_end,
890 			      enum hns_desc_type type)
891 {
892 	unsigned int frag_buf_num;
893 	unsigned int k;
894 	int sizeoflast;
895 	int ret;
896 
897 	frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
898 	sizeoflast = size % HNS3_MAX_BD_SIZE;
899 	sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE;
900 
901 	/* When the frag size is bigger than hardware, split this frag */
902 	for (k = 0; k < frag_buf_num; k++) {
903 		ret = hns3_fill_desc(ring, priv,
904 				     (k == frag_buf_num - 1) ?
905 				sizeoflast : HNS3_MAX_BD_SIZE,
906 				dma + HNS3_MAX_BD_SIZE * k,
907 				frag_end && (k == frag_buf_num - 1) ? 1 : 0,
908 				(type == DESC_TYPE_SKB && !k) ?
909 					DESC_TYPE_SKB : DESC_TYPE_PAGE);
910 		if (ret)
911 			return ret;
912 	}
913 
914 	return 0;
915 }
916 
917 static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum,
918 				   struct hns3_enet_ring *ring)
919 {
920 	struct sk_buff *skb = *out_skb;
921 	struct skb_frag_struct *frag;
922 	int bdnum_for_frag;
923 	int frag_num;
924 	int buf_num;
925 	int size;
926 	int i;
927 
928 	size = skb_headlen(skb);
929 	buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
930 
931 	frag_num = skb_shinfo(skb)->nr_frags;
932 	for (i = 0; i < frag_num; i++) {
933 		frag = &skb_shinfo(skb)->frags[i];
934 		size = skb_frag_size(frag);
935 		bdnum_for_frag =
936 			(size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
937 		if (bdnum_for_frag > HNS3_MAX_BD_PER_FRAG)
938 			return -ENOMEM;
939 
940 		buf_num += bdnum_for_frag;
941 	}
942 
943 	if (buf_num > ring_space(ring))
944 		return -EBUSY;
945 
946 	*bnum = buf_num;
947 	return 0;
948 }
949 
950 static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum,
951 				  struct hns3_enet_ring *ring)
952 {
953 	struct sk_buff *skb = *out_skb;
954 	int buf_num;
955 
956 	/* No. of segments (plus a header) */
957 	buf_num = skb_shinfo(skb)->nr_frags + 1;
958 
959 	if (buf_num > ring_space(ring))
960 		return -EBUSY;
961 
962 	*bnum = buf_num;
963 
964 	return 0;
965 }
966 
967 static void hns_nic_dma_unmap(struct hns3_enet_ring *ring, int next_to_use_orig)
968 {
969 	struct device *dev = ring_to_dev(ring);
970 	unsigned int i;
971 
972 	for (i = 0; i < ring->desc_num; i++) {
973 		/* check if this is where we started */
974 		if (ring->next_to_use == next_to_use_orig)
975 			break;
976 
977 		/* unmap the descriptor dma address */
978 		if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB)
979 			dma_unmap_single(dev,
980 					 ring->desc_cb[ring->next_to_use].dma,
981 					ring->desc_cb[ring->next_to_use].length,
982 					DMA_TO_DEVICE);
983 		else
984 			dma_unmap_page(dev,
985 				       ring->desc_cb[ring->next_to_use].dma,
986 				       ring->desc_cb[ring->next_to_use].length,
987 				       DMA_TO_DEVICE);
988 
989 		/* rollback one */
990 		ring_ptr_move_bw(ring, next_to_use);
991 	}
992 }
993 
994 netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
995 {
996 	struct hns3_nic_priv *priv = netdev_priv(netdev);
997 	struct hns3_nic_ring_data *ring_data =
998 		&tx_ring_data(priv, skb->queue_mapping);
999 	struct hns3_enet_ring *ring = ring_data->ring;
1000 	struct device *dev = priv->dev;
1001 	struct netdev_queue *dev_queue;
1002 	struct skb_frag_struct *frag;
1003 	int next_to_use_head;
1004 	int next_to_use_frag;
1005 	dma_addr_t dma;
1006 	int buf_num;
1007 	int seg_num;
1008 	int size;
1009 	int ret;
1010 	int i;
1011 
1012 	/* Prefetch the data used later */
1013 	prefetch(skb->data);
1014 
1015 	switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) {
1016 	case -EBUSY:
1017 		u64_stats_update_begin(&ring->syncp);
1018 		ring->stats.tx_busy++;
1019 		u64_stats_update_end(&ring->syncp);
1020 
1021 		goto out_net_tx_busy;
1022 	case -ENOMEM:
1023 		u64_stats_update_begin(&ring->syncp);
1024 		ring->stats.sw_err_cnt++;
1025 		u64_stats_update_end(&ring->syncp);
1026 		netdev_err(netdev, "no memory to xmit!\n");
1027 
1028 		goto out_err_tx_ok;
1029 	default:
1030 		break;
1031 	}
1032 
1033 	/* No. of segments (plus a header) */
1034 	seg_num = skb_shinfo(skb)->nr_frags + 1;
1035 	/* Fill the first part */
1036 	size = skb_headlen(skb);
1037 
1038 	next_to_use_head = ring->next_to_use;
1039 
1040 	dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
1041 	if (dma_mapping_error(dev, dma)) {
1042 		netdev_err(netdev, "TX head DMA map failed\n");
1043 		ring->stats.sw_err_cnt++;
1044 		goto out_err_tx_ok;
1045 	}
1046 
1047 	ret = priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0,
1048 			   DESC_TYPE_SKB);
1049 	if (ret)
1050 		goto head_dma_map_err;
1051 
1052 	next_to_use_frag = ring->next_to_use;
1053 	/* Fill the fragments */
1054 	for (i = 1; i < seg_num; i++) {
1055 		frag = &skb_shinfo(skb)->frags[i - 1];
1056 		size = skb_frag_size(frag);
1057 		dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
1058 		if (dma_mapping_error(dev, dma)) {
1059 			netdev_err(netdev, "TX frag(%d) DMA map failed\n", i);
1060 			ring->stats.sw_err_cnt++;
1061 			goto frag_dma_map_err;
1062 		}
1063 		ret = priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma,
1064 				    seg_num - 1 == i ? 1 : 0,
1065 				    DESC_TYPE_PAGE);
1066 
1067 		if (ret)
1068 			goto frag_dma_map_err;
1069 	}
1070 
1071 	/* Complete translate all packets */
1072 	dev_queue = netdev_get_tx_queue(netdev, ring_data->queue_index);
1073 	netdev_tx_sent_queue(dev_queue, skb->len);
1074 
1075 	wmb(); /* Commit all data before submit */
1076 
1077 	hnae_queue_xmit(ring->tqp, buf_num);
1078 
1079 	return NETDEV_TX_OK;
1080 
1081 frag_dma_map_err:
1082 	hns_nic_dma_unmap(ring, next_to_use_frag);
1083 
1084 head_dma_map_err:
1085 	hns_nic_dma_unmap(ring, next_to_use_head);
1086 
1087 out_err_tx_ok:
1088 	dev_kfree_skb_any(skb);
1089 	return NETDEV_TX_OK;
1090 
1091 out_net_tx_busy:
1092 	netif_stop_subqueue(netdev, ring_data->queue_index);
1093 	smp_mb(); /* Commit all data before submit */
1094 
1095 	return NETDEV_TX_BUSY;
1096 }
1097 
1098 static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
1099 {
1100 	struct hnae3_handle *h = hns3_get_handle(netdev);
1101 	struct sockaddr *mac_addr = p;
1102 	int ret;
1103 
1104 	if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data))
1105 		return -EADDRNOTAVAIL;
1106 
1107 	ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data);
1108 	if (ret) {
1109 		netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret);
1110 		return ret;
1111 	}
1112 
1113 	ether_addr_copy(netdev->dev_addr, mac_addr->sa_data);
1114 
1115 	return 0;
1116 }
1117 
1118 static int hns3_nic_set_features(struct net_device *netdev,
1119 				 netdev_features_t features)
1120 {
1121 	netdev_features_t changed = netdev->features ^ features;
1122 	struct hns3_nic_priv *priv = netdev_priv(netdev);
1123 	struct hnae3_handle *h = priv->ae_handle;
1124 	int ret;
1125 
1126 	if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) {
1127 		if (features & (NETIF_F_TSO | NETIF_F_TSO6)) {
1128 			priv->ops.fill_desc = hns3_fill_desc_tso;
1129 			priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
1130 		} else {
1131 			priv->ops.fill_desc = hns3_fill_desc;
1132 			priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
1133 		}
1134 	}
1135 
1136 	if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) &&
1137 	    h->ae_algo->ops->enable_vlan_filter) {
1138 		if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
1139 			h->ae_algo->ops->enable_vlan_filter(h, true);
1140 		else
1141 			h->ae_algo->ops->enable_vlan_filter(h, false);
1142 	}
1143 
1144 	if ((changed & NETIF_F_HW_VLAN_CTAG_RX) &&
1145 	    h->ae_algo->ops->enable_hw_strip_rxvtag) {
1146 		if (features & NETIF_F_HW_VLAN_CTAG_RX)
1147 			ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, true);
1148 		else
1149 			ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, false);
1150 
1151 		if (ret)
1152 			return ret;
1153 	}
1154 
1155 	netdev->features = features;
1156 	return 0;
1157 }
1158 
1159 static void hns3_nic_get_stats64(struct net_device *netdev,
1160 				 struct rtnl_link_stats64 *stats)
1161 {
1162 	struct hns3_nic_priv *priv = netdev_priv(netdev);
1163 	int queue_num = priv->ae_handle->kinfo.num_tqps;
1164 	struct hnae3_handle *handle = priv->ae_handle;
1165 	struct hns3_enet_ring *ring;
1166 	unsigned int start;
1167 	unsigned int idx;
1168 	u64 tx_bytes = 0;
1169 	u64 rx_bytes = 0;
1170 	u64 tx_pkts = 0;
1171 	u64 rx_pkts = 0;
1172 	u64 tx_drop = 0;
1173 	u64 rx_drop = 0;
1174 
1175 	if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state))
1176 		return;
1177 
1178 	handle->ae_algo->ops->update_stats(handle, &netdev->stats);
1179 
1180 	for (idx = 0; idx < queue_num; idx++) {
1181 		/* fetch the tx stats */
1182 		ring = priv->ring_data[idx].ring;
1183 		do {
1184 			start = u64_stats_fetch_begin_irq(&ring->syncp);
1185 			tx_bytes += ring->stats.tx_bytes;
1186 			tx_pkts += ring->stats.tx_pkts;
1187 			tx_drop += ring->stats.tx_busy;
1188 			tx_drop += ring->stats.sw_err_cnt;
1189 		} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1190 
1191 		/* fetch the rx stats */
1192 		ring = priv->ring_data[idx + queue_num].ring;
1193 		do {
1194 			start = u64_stats_fetch_begin_irq(&ring->syncp);
1195 			rx_bytes += ring->stats.rx_bytes;
1196 			rx_pkts += ring->stats.rx_pkts;
1197 			rx_drop += ring->stats.non_vld_descs;
1198 			rx_drop += ring->stats.err_pkt_len;
1199 			rx_drop += ring->stats.l2_err;
1200 		} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1201 	}
1202 
1203 	stats->tx_bytes = tx_bytes;
1204 	stats->tx_packets = tx_pkts;
1205 	stats->rx_bytes = rx_bytes;
1206 	stats->rx_packets = rx_pkts;
1207 
1208 	stats->rx_errors = netdev->stats.rx_errors;
1209 	stats->multicast = netdev->stats.multicast;
1210 	stats->rx_length_errors = netdev->stats.rx_length_errors;
1211 	stats->rx_crc_errors = netdev->stats.rx_crc_errors;
1212 	stats->rx_missed_errors = netdev->stats.rx_missed_errors;
1213 
1214 	stats->tx_errors = netdev->stats.tx_errors;
1215 	stats->rx_dropped = rx_drop + netdev->stats.rx_dropped;
1216 	stats->tx_dropped = tx_drop + netdev->stats.tx_dropped;
1217 	stats->collisions = netdev->stats.collisions;
1218 	stats->rx_over_errors = netdev->stats.rx_over_errors;
1219 	stats->rx_frame_errors = netdev->stats.rx_frame_errors;
1220 	stats->rx_fifo_errors = netdev->stats.rx_fifo_errors;
1221 	stats->tx_aborted_errors = netdev->stats.tx_aborted_errors;
1222 	stats->tx_carrier_errors = netdev->stats.tx_carrier_errors;
1223 	stats->tx_fifo_errors = netdev->stats.tx_fifo_errors;
1224 	stats->tx_heartbeat_errors = netdev->stats.tx_heartbeat_errors;
1225 	stats->tx_window_errors = netdev->stats.tx_window_errors;
1226 	stats->rx_compressed = netdev->stats.rx_compressed;
1227 	stats->tx_compressed = netdev->stats.tx_compressed;
1228 }
1229 
1230 static void hns3_add_tunnel_port(struct net_device *netdev, u16 port,
1231 				 enum hns3_udp_tnl_type type)
1232 {
1233 	struct hns3_nic_priv *priv = netdev_priv(netdev);
1234 	struct hns3_udp_tunnel *udp_tnl = &priv->udp_tnl[type];
1235 	struct hnae3_handle *h = priv->ae_handle;
1236 
1237 	if (udp_tnl->used && udp_tnl->dst_port == port) {
1238 		udp_tnl->used++;
1239 		return;
1240 	}
1241 
1242 	if (udp_tnl->used) {
1243 		netdev_warn(netdev,
1244 			    "UDP tunnel [%d], port [%d] offload\n", type, port);
1245 		return;
1246 	}
1247 
1248 	udp_tnl->dst_port = port;
1249 	udp_tnl->used = 1;
1250 	/* TBD send command to hardware to add port */
1251 	if (h->ae_algo->ops->add_tunnel_udp)
1252 		h->ae_algo->ops->add_tunnel_udp(h, port);
1253 }
1254 
1255 static void hns3_del_tunnel_port(struct net_device *netdev, u16 port,
1256 				 enum hns3_udp_tnl_type type)
1257 {
1258 	struct hns3_nic_priv *priv = netdev_priv(netdev);
1259 	struct hns3_udp_tunnel *udp_tnl = &priv->udp_tnl[type];
1260 	struct hnae3_handle *h = priv->ae_handle;
1261 
1262 	if (!udp_tnl->used || udp_tnl->dst_port != port) {
1263 		netdev_warn(netdev,
1264 			    "Invalid UDP tunnel port %d\n", port);
1265 		return;
1266 	}
1267 
1268 	udp_tnl->used--;
1269 	if (udp_tnl->used)
1270 		return;
1271 
1272 	udp_tnl->dst_port = 0;
1273 	/* TBD send command to hardware to del port  */
1274 	if (h->ae_algo->ops->del_tunnel_udp)
1275 		h->ae_algo->ops->del_tunnel_udp(h, port);
1276 }
1277 
1278 /* hns3_nic_udp_tunnel_add - Get notifiacetion about UDP tunnel ports
1279  * @netdev: This physical ports's netdev
1280  * @ti: Tunnel information
1281  */
1282 static void hns3_nic_udp_tunnel_add(struct net_device *netdev,
1283 				    struct udp_tunnel_info *ti)
1284 {
1285 	u16 port_n = ntohs(ti->port);
1286 
1287 	switch (ti->type) {
1288 	case UDP_TUNNEL_TYPE_VXLAN:
1289 		hns3_add_tunnel_port(netdev, port_n, HNS3_UDP_TNL_VXLAN);
1290 		break;
1291 	case UDP_TUNNEL_TYPE_GENEVE:
1292 		hns3_add_tunnel_port(netdev, port_n, HNS3_UDP_TNL_GENEVE);
1293 		break;
1294 	default:
1295 		netdev_err(netdev, "unsupported tunnel type %d\n", ti->type);
1296 		break;
1297 	}
1298 }
1299 
1300 static void hns3_nic_udp_tunnel_del(struct net_device *netdev,
1301 				    struct udp_tunnel_info *ti)
1302 {
1303 	u16 port_n = ntohs(ti->port);
1304 
1305 	switch (ti->type) {
1306 	case UDP_TUNNEL_TYPE_VXLAN:
1307 		hns3_del_tunnel_port(netdev, port_n, HNS3_UDP_TNL_VXLAN);
1308 		break;
1309 	case UDP_TUNNEL_TYPE_GENEVE:
1310 		hns3_del_tunnel_port(netdev, port_n, HNS3_UDP_TNL_GENEVE);
1311 		break;
1312 	default:
1313 		break;
1314 	}
1315 }
1316 
1317 static int hns3_setup_tc(struct net_device *netdev, void *type_data)
1318 {
1319 	struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
1320 	struct hnae3_handle *h = hns3_get_handle(netdev);
1321 	struct hnae3_knic_private_info *kinfo = &h->kinfo;
1322 	u8 *prio_tc = mqprio_qopt->qopt.prio_tc_map;
1323 	u8 tc = mqprio_qopt->qopt.num_tc;
1324 	u16 mode = mqprio_qopt->mode;
1325 	u8 hw = mqprio_qopt->qopt.hw;
1326 	bool if_running;
1327 	unsigned int i;
1328 	int ret;
1329 
1330 	if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS &&
1331 	       mode == TC_MQPRIO_MODE_CHANNEL) || (!hw && tc == 0)))
1332 		return -EOPNOTSUPP;
1333 
1334 	if (tc > HNAE3_MAX_TC)
1335 		return -EINVAL;
1336 
1337 	if (!netdev)
1338 		return -EINVAL;
1339 
1340 	if_running = netif_running(netdev);
1341 	if (if_running) {
1342 		hns3_nic_net_stop(netdev);
1343 		msleep(100);
1344 	}
1345 
1346 	ret = (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ?
1347 		kinfo->dcb_ops->setup_tc(h, tc, prio_tc) : -EOPNOTSUPP;
1348 	if (ret)
1349 		goto out;
1350 
1351 	if (tc <= 1) {
1352 		netdev_reset_tc(netdev);
1353 	} else {
1354 		ret = netdev_set_num_tc(netdev, tc);
1355 		if (ret)
1356 			goto out;
1357 
1358 		for (i = 0; i < HNAE3_MAX_TC; i++) {
1359 			if (!kinfo->tc_info[i].enable)
1360 				continue;
1361 
1362 			netdev_set_tc_queue(netdev,
1363 					    kinfo->tc_info[i].tc,
1364 					    kinfo->tc_info[i].tqp_count,
1365 					    kinfo->tc_info[i].tqp_offset);
1366 		}
1367 	}
1368 
1369 	ret = hns3_nic_set_real_num_queue(netdev);
1370 
1371 out:
1372 	if (if_running)
1373 		hns3_nic_net_open(netdev);
1374 
1375 	return ret;
1376 }
1377 
1378 static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type,
1379 			     void *type_data)
1380 {
1381 	if (type != TC_SETUP_QDISC_MQPRIO)
1382 		return -EOPNOTSUPP;
1383 
1384 	return hns3_setup_tc(dev, type_data);
1385 }
1386 
1387 static int hns3_vlan_rx_add_vid(struct net_device *netdev,
1388 				__be16 proto, u16 vid)
1389 {
1390 	struct hnae3_handle *h = hns3_get_handle(netdev);
1391 	int ret = -EIO;
1392 
1393 	if (h->ae_algo->ops->set_vlan_filter)
1394 		ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false);
1395 
1396 	return ret;
1397 }
1398 
1399 static int hns3_vlan_rx_kill_vid(struct net_device *netdev,
1400 				 __be16 proto, u16 vid)
1401 {
1402 	struct hnae3_handle *h = hns3_get_handle(netdev);
1403 	int ret = -EIO;
1404 
1405 	if (h->ae_algo->ops->set_vlan_filter)
1406 		ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true);
1407 
1408 	return ret;
1409 }
1410 
1411 static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
1412 				u8 qos, __be16 vlan_proto)
1413 {
1414 	struct hnae3_handle *h = hns3_get_handle(netdev);
1415 	int ret = -EIO;
1416 
1417 	if (h->ae_algo->ops->set_vf_vlan_filter)
1418 		ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan,
1419 						   qos, vlan_proto);
1420 
1421 	return ret;
1422 }
1423 
1424 static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
1425 {
1426 	struct hnae3_handle *h = hns3_get_handle(netdev);
1427 	bool if_running = netif_running(netdev);
1428 	int ret;
1429 
1430 	if (!h->ae_algo->ops->set_mtu)
1431 		return -EOPNOTSUPP;
1432 
1433 	/* if this was called with netdev up then bring netdevice down */
1434 	if (if_running) {
1435 		(void)hns3_nic_net_stop(netdev);
1436 		msleep(100);
1437 	}
1438 
1439 	ret = h->ae_algo->ops->set_mtu(h, new_mtu);
1440 	if (ret) {
1441 		netdev_err(netdev, "failed to change MTU in hardware %d\n",
1442 			   ret);
1443 		return ret;
1444 	}
1445 
1446 	netdev->mtu = new_mtu;
1447 
1448 	/* if the netdev was running earlier, bring it up again */
1449 	if (if_running && hns3_nic_net_open(netdev))
1450 		ret = -EINVAL;
1451 
1452 	return ret;
1453 }
1454 
1455 static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
1456 {
1457 	struct hns3_nic_priv *priv = netdev_priv(ndev);
1458 	struct hns3_enet_ring *tx_ring = NULL;
1459 	int timeout_queue = 0;
1460 	int hw_head, hw_tail;
1461 	int i;
1462 
1463 	/* Find the stopped queue the same way the stack does */
1464 	for (i = 0; i < ndev->real_num_tx_queues; i++) {
1465 		struct netdev_queue *q;
1466 		unsigned long trans_start;
1467 
1468 		q = netdev_get_tx_queue(ndev, i);
1469 		trans_start = q->trans_start;
1470 		if (netif_xmit_stopped(q) &&
1471 		    time_after(jiffies,
1472 			       (trans_start + ndev->watchdog_timeo))) {
1473 			timeout_queue = i;
1474 			break;
1475 		}
1476 	}
1477 
1478 	if (i == ndev->num_tx_queues) {
1479 		netdev_info(ndev,
1480 			    "no netdev TX timeout queue found, timeout count: %llu\n",
1481 			    priv->tx_timeout_count);
1482 		return false;
1483 	}
1484 
1485 	tx_ring = priv->ring_data[timeout_queue].ring;
1486 
1487 	hw_head = readl_relaxed(tx_ring->tqp->io_base +
1488 				HNS3_RING_TX_RING_HEAD_REG);
1489 	hw_tail = readl_relaxed(tx_ring->tqp->io_base +
1490 				HNS3_RING_TX_RING_TAIL_REG);
1491 	netdev_info(ndev,
1492 		    "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, HW_HEAD: 0x%x, HW_TAIL: 0x%x, INT: 0x%x\n",
1493 		    priv->tx_timeout_count,
1494 		    timeout_queue,
1495 		    tx_ring->next_to_use,
1496 		    tx_ring->next_to_clean,
1497 		    hw_head,
1498 		    hw_tail,
1499 		    readl(tx_ring->tqp_vector->mask_addr));
1500 
1501 	return true;
1502 }
1503 
1504 static void hns3_nic_net_timeout(struct net_device *ndev)
1505 {
1506 	struct hns3_nic_priv *priv = netdev_priv(ndev);
1507 	unsigned long last_reset_time = priv->last_reset_time;
1508 	struct hnae3_handle *h = priv->ae_handle;
1509 
1510 	if (!hns3_get_tx_timeo_queue_info(ndev))
1511 		return;
1512 
1513 	priv->tx_timeout_count++;
1514 
1515 	/* This timeout is far away enough from last timeout,
1516 	 * if timeout again,set the reset type to PF reset
1517 	 */
1518 	if (time_after(jiffies, (last_reset_time + 20 * HZ)))
1519 		priv->reset_level = HNAE3_FUNC_RESET;
1520 
1521 	/* Don't do any new action before the next timeout */
1522 	else if (time_before(jiffies, (last_reset_time + ndev->watchdog_timeo)))
1523 		return;
1524 
1525 	priv->last_reset_time = jiffies;
1526 
1527 	if (h->ae_algo->ops->reset_event)
1528 		h->ae_algo->ops->reset_event(h, priv->reset_level);
1529 
1530 	priv->reset_level++;
1531 	if (priv->reset_level > HNAE3_GLOBAL_RESET)
1532 		priv->reset_level = HNAE3_GLOBAL_RESET;
1533 }
1534 
1535 static const struct net_device_ops hns3_nic_netdev_ops = {
1536 	.ndo_open		= hns3_nic_net_open,
1537 	.ndo_stop		= hns3_nic_net_stop,
1538 	.ndo_start_xmit		= hns3_nic_net_xmit,
1539 	.ndo_tx_timeout		= hns3_nic_net_timeout,
1540 	.ndo_set_mac_address	= hns3_nic_net_set_mac_address,
1541 	.ndo_change_mtu		= hns3_nic_change_mtu,
1542 	.ndo_set_features	= hns3_nic_set_features,
1543 	.ndo_get_stats64	= hns3_nic_get_stats64,
1544 	.ndo_setup_tc		= hns3_nic_setup_tc,
1545 	.ndo_set_rx_mode	= hns3_nic_set_rx_mode,
1546 	.ndo_udp_tunnel_add	= hns3_nic_udp_tunnel_add,
1547 	.ndo_udp_tunnel_del	= hns3_nic_udp_tunnel_del,
1548 	.ndo_vlan_rx_add_vid	= hns3_vlan_rx_add_vid,
1549 	.ndo_vlan_rx_kill_vid	= hns3_vlan_rx_kill_vid,
1550 	.ndo_set_vf_vlan	= hns3_ndo_set_vf_vlan,
1551 };
1552 
1553 /* hns3_probe - Device initialization routine
1554  * @pdev: PCI device information struct
1555  * @ent: entry in hns3_pci_tbl
1556  *
1557  * hns3_probe initializes a PF identified by a pci_dev structure.
1558  * The OS initialization, configuring of the PF private structure,
1559  * and a hardware reset occur.
1560  *
1561  * Returns 0 on success, negative on failure
1562  */
1563 static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1564 {
1565 	struct hnae3_ae_dev *ae_dev;
1566 	int ret;
1567 
1568 	ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev),
1569 			      GFP_KERNEL);
1570 	if (!ae_dev) {
1571 		ret = -ENOMEM;
1572 		return ret;
1573 	}
1574 
1575 	ae_dev->pdev = pdev;
1576 	ae_dev->flag = ent->driver_data;
1577 	ae_dev->dev_type = HNAE3_DEV_KNIC;
1578 	pci_set_drvdata(pdev, ae_dev);
1579 
1580 	return hnae3_register_ae_dev(ae_dev);
1581 }
1582 
1583 /* hns3_remove - Device removal routine
1584  * @pdev: PCI device information struct
1585  */
1586 static void hns3_remove(struct pci_dev *pdev)
1587 {
1588 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1589 
1590 	hnae3_unregister_ae_dev(ae_dev);
1591 
1592 	devm_kfree(&pdev->dev, ae_dev);
1593 
1594 	pci_set_drvdata(pdev, NULL);
1595 }
1596 
1597 static struct pci_driver hns3_driver = {
1598 	.name     = hns3_driver_name,
1599 	.id_table = hns3_pci_tbl,
1600 	.probe    = hns3_probe,
1601 	.remove   = hns3_remove,
1602 };
1603 
1604 /* set default feature to hns3 */
1605 static void hns3_set_default_feature(struct net_device *netdev)
1606 {
1607 	struct hnae3_handle *h = hns3_get_handle(netdev);
1608 
1609 	netdev->priv_flags |= IFF_UNICAST_FLT;
1610 
1611 	netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1612 		NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1613 		NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1614 		NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1615 		NETIF_F_GSO_UDP_TUNNEL_CSUM;
1616 
1617 	netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
1618 
1619 	netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
1620 
1621 	netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1622 		NETIF_F_HW_VLAN_CTAG_FILTER |
1623 		NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
1624 		NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1625 		NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1626 		NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1627 		NETIF_F_GSO_UDP_TUNNEL_CSUM;
1628 
1629 	netdev->vlan_features |=
1630 		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
1631 		NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO |
1632 		NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1633 		NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1634 		NETIF_F_GSO_UDP_TUNNEL_CSUM;
1635 
1636 	netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1637 		NETIF_F_HW_VLAN_CTAG_TX |
1638 		NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1639 		NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1640 		NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1641 		NETIF_F_GSO_UDP_TUNNEL_CSUM;
1642 
1643 	if (!(h->flags & HNAE3_SUPPORT_VF))
1644 		netdev->hw_features |=
1645 			NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX;
1646 }
1647 
1648 static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
1649 			     struct hns3_desc_cb *cb)
1650 {
1651 	unsigned int order = hnae_page_order(ring);
1652 	struct page *p;
1653 
1654 	p = dev_alloc_pages(order);
1655 	if (!p)
1656 		return -ENOMEM;
1657 
1658 	cb->priv = p;
1659 	cb->page_offset = 0;
1660 	cb->reuse_flag = 0;
1661 	cb->buf  = page_address(p);
1662 	cb->length = hnae_page_size(ring);
1663 	cb->type = DESC_TYPE_PAGE;
1664 
1665 	return 0;
1666 }
1667 
1668 static void hns3_free_buffer(struct hns3_enet_ring *ring,
1669 			     struct hns3_desc_cb *cb)
1670 {
1671 	if (cb->type == DESC_TYPE_SKB)
1672 		dev_kfree_skb_any((struct sk_buff *)cb->priv);
1673 	else if (!HNAE3_IS_TX_RING(ring))
1674 		put_page((struct page *)cb->priv);
1675 	memset(cb, 0, sizeof(*cb));
1676 }
1677 
1678 static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb)
1679 {
1680 	cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0,
1681 			       cb->length, ring_to_dma_dir(ring));
1682 
1683 	if (dma_mapping_error(ring_to_dev(ring), cb->dma))
1684 		return -EIO;
1685 
1686 	return 0;
1687 }
1688 
1689 static void hns3_unmap_buffer(struct hns3_enet_ring *ring,
1690 			      struct hns3_desc_cb *cb)
1691 {
1692 	if (cb->type == DESC_TYPE_SKB)
1693 		dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
1694 				 ring_to_dma_dir(ring));
1695 	else
1696 		dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
1697 			       ring_to_dma_dir(ring));
1698 }
1699 
1700 static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i)
1701 {
1702 	hns3_unmap_buffer(ring, &ring->desc_cb[i]);
1703 	ring->desc[i].addr = 0;
1704 }
1705 
1706 static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i)
1707 {
1708 	struct hns3_desc_cb *cb = &ring->desc_cb[i];
1709 
1710 	if (!ring->desc_cb[i].dma)
1711 		return;
1712 
1713 	hns3_buffer_detach(ring, i);
1714 	hns3_free_buffer(ring, cb);
1715 }
1716 
1717 static void hns3_free_buffers(struct hns3_enet_ring *ring)
1718 {
1719 	int i;
1720 
1721 	for (i = 0; i < ring->desc_num; i++)
1722 		hns3_free_buffer_detach(ring, i);
1723 }
1724 
1725 /* free desc along with its attached buffer */
1726 static void hns3_free_desc(struct hns3_enet_ring *ring)
1727 {
1728 	hns3_free_buffers(ring);
1729 
1730 	dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr,
1731 			 ring->desc_num * sizeof(ring->desc[0]),
1732 			 DMA_BIDIRECTIONAL);
1733 	ring->desc_dma_addr = 0;
1734 	kfree(ring->desc);
1735 	ring->desc = NULL;
1736 }
1737 
1738 static int hns3_alloc_desc(struct hns3_enet_ring *ring)
1739 {
1740 	int size = ring->desc_num * sizeof(ring->desc[0]);
1741 
1742 	ring->desc = kzalloc(size, GFP_KERNEL);
1743 	if (!ring->desc)
1744 		return -ENOMEM;
1745 
1746 	ring->desc_dma_addr = dma_map_single(ring_to_dev(ring), ring->desc,
1747 					     size, DMA_BIDIRECTIONAL);
1748 	if (dma_mapping_error(ring_to_dev(ring), ring->desc_dma_addr)) {
1749 		ring->desc_dma_addr = 0;
1750 		kfree(ring->desc);
1751 		ring->desc = NULL;
1752 		return -ENOMEM;
1753 	}
1754 
1755 	return 0;
1756 }
1757 
1758 static int hns3_reserve_buffer_map(struct hns3_enet_ring *ring,
1759 				   struct hns3_desc_cb *cb)
1760 {
1761 	int ret;
1762 
1763 	ret = hns3_alloc_buffer(ring, cb);
1764 	if (ret)
1765 		goto out;
1766 
1767 	ret = hns3_map_buffer(ring, cb);
1768 	if (ret)
1769 		goto out_with_buf;
1770 
1771 	return 0;
1772 
1773 out_with_buf:
1774 	hns3_free_buffer(ring, cb);
1775 out:
1776 	return ret;
1777 }
1778 
1779 static int hns3_alloc_buffer_attach(struct hns3_enet_ring *ring, int i)
1780 {
1781 	int ret = hns3_reserve_buffer_map(ring, &ring->desc_cb[i]);
1782 
1783 	if (ret)
1784 		return ret;
1785 
1786 	ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
1787 
1788 	return 0;
1789 }
1790 
1791 /* Allocate memory for raw pkg, and map with dma */
1792 static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring)
1793 {
1794 	int i, j, ret;
1795 
1796 	for (i = 0; i < ring->desc_num; i++) {
1797 		ret = hns3_alloc_buffer_attach(ring, i);
1798 		if (ret)
1799 			goto out_buffer_fail;
1800 	}
1801 
1802 	return 0;
1803 
1804 out_buffer_fail:
1805 	for (j = i - 1; j >= 0; j--)
1806 		hns3_free_buffer_detach(ring, j);
1807 	return ret;
1808 }
1809 
1810 /* detach a in-used buffer and replace with a reserved one  */
1811 static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
1812 				struct hns3_desc_cb *res_cb)
1813 {
1814 	hns3_unmap_buffer(ring, &ring->desc_cb[i]);
1815 	ring->desc_cb[i] = *res_cb;
1816 	ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
1817 }
1818 
1819 static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
1820 {
1821 	ring->desc_cb[i].reuse_flag = 0;
1822 	ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma
1823 		+ ring->desc_cb[i].page_offset);
1824 }
1825 
1826 static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes,
1827 				      int *pkts)
1828 {
1829 	struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
1830 
1831 	(*pkts) += (desc_cb->type == DESC_TYPE_SKB);
1832 	(*bytes) += desc_cb->length;
1833 	/* desc_cb will be cleaned, after hnae_free_buffer_detach*/
1834 	hns3_free_buffer_detach(ring, ring->next_to_clean);
1835 
1836 	ring_ptr_move_fw(ring, next_to_clean);
1837 }
1838 
1839 static int is_valid_clean_head(struct hns3_enet_ring *ring, int h)
1840 {
1841 	int u = ring->next_to_use;
1842 	int c = ring->next_to_clean;
1843 
1844 	if (unlikely(h > ring->desc_num))
1845 		return 0;
1846 
1847 	return u > c ? (h > c && h <= u) : (h > c || h <= u);
1848 }
1849 
1850 bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget)
1851 {
1852 	struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
1853 	struct netdev_queue *dev_queue;
1854 	int bytes, pkts;
1855 	int head;
1856 
1857 	head = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG);
1858 	rmb(); /* Make sure head is ready before touch any data */
1859 
1860 	if (is_ring_empty(ring) || head == ring->next_to_clean)
1861 		return true; /* no data to poll */
1862 
1863 	if (!is_valid_clean_head(ring, head)) {
1864 		netdev_err(netdev, "wrong head (%d, %d-%d)\n", head,
1865 			   ring->next_to_use, ring->next_to_clean);
1866 
1867 		u64_stats_update_begin(&ring->syncp);
1868 		ring->stats.io_err_cnt++;
1869 		u64_stats_update_end(&ring->syncp);
1870 		return true;
1871 	}
1872 
1873 	bytes = 0;
1874 	pkts = 0;
1875 	while (head != ring->next_to_clean && budget) {
1876 		hns3_nic_reclaim_one_desc(ring, &bytes, &pkts);
1877 		/* Issue prefetch for next Tx descriptor */
1878 		prefetch(&ring->desc_cb[ring->next_to_clean]);
1879 		budget--;
1880 	}
1881 
1882 	ring->tqp_vector->tx_group.total_bytes += bytes;
1883 	ring->tqp_vector->tx_group.total_packets += pkts;
1884 
1885 	u64_stats_update_begin(&ring->syncp);
1886 	ring->stats.tx_bytes += bytes;
1887 	ring->stats.tx_pkts += pkts;
1888 	u64_stats_update_end(&ring->syncp);
1889 
1890 	dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index);
1891 	netdev_tx_completed_queue(dev_queue, pkts, bytes);
1892 
1893 	if (unlikely(pkts && netif_carrier_ok(netdev) &&
1894 		     (ring_space(ring) > HNS3_MAX_BD_PER_PKT))) {
1895 		/* Make sure that anybody stopping the queue after this
1896 		 * sees the new next_to_clean.
1897 		 */
1898 		smp_mb();
1899 		if (netif_tx_queue_stopped(dev_queue)) {
1900 			netif_tx_wake_queue(dev_queue);
1901 			ring->stats.restart_queue++;
1902 		}
1903 	}
1904 
1905 	return !!budget;
1906 }
1907 
1908 static int hns3_desc_unused(struct hns3_enet_ring *ring)
1909 {
1910 	int ntc = ring->next_to_clean;
1911 	int ntu = ring->next_to_use;
1912 
1913 	return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
1914 }
1915 
1916 static void
1917 hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count)
1918 {
1919 	struct hns3_desc_cb *desc_cb;
1920 	struct hns3_desc_cb res_cbs;
1921 	int i, ret;
1922 
1923 	for (i = 0; i < cleand_count; i++) {
1924 		desc_cb = &ring->desc_cb[ring->next_to_use];
1925 		if (desc_cb->reuse_flag) {
1926 			u64_stats_update_begin(&ring->syncp);
1927 			ring->stats.reuse_pg_cnt++;
1928 			u64_stats_update_end(&ring->syncp);
1929 
1930 			hns3_reuse_buffer(ring, ring->next_to_use);
1931 		} else {
1932 			ret = hns3_reserve_buffer_map(ring, &res_cbs);
1933 			if (ret) {
1934 				u64_stats_update_begin(&ring->syncp);
1935 				ring->stats.sw_err_cnt++;
1936 				u64_stats_update_end(&ring->syncp);
1937 
1938 				netdev_err(ring->tqp->handle->kinfo.netdev,
1939 					   "hnae reserve buffer map failed.\n");
1940 				break;
1941 			}
1942 			hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
1943 		}
1944 
1945 		ring_ptr_move_fw(ring, next_to_use);
1946 	}
1947 
1948 	wmb(); /* Make all data has been write before submit */
1949 	writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
1950 }
1951 
1952 /* hns3_nic_get_headlen - determine size of header for LRO/GRO
1953  * @data: pointer to the start of the headers
1954  * @max: total length of section to find headers in
1955  *
1956  * This function is meant to determine the length of headers that will
1957  * be recognized by hardware for LRO, GRO, and RSC offloads.  The main
1958  * motivation of doing this is to only perform one pull for IPv4 TCP
1959  * packets so that we can do basic things like calculating the gso_size
1960  * based on the average data per packet.
1961  */
1962 static unsigned int hns3_nic_get_headlen(unsigned char *data, u32 flag,
1963 					 unsigned int max_size)
1964 {
1965 	unsigned char *network;
1966 	u8 hlen;
1967 
1968 	/* This should never happen, but better safe than sorry */
1969 	if (max_size < ETH_HLEN)
1970 		return max_size;
1971 
1972 	/* Initialize network frame pointer */
1973 	network = data;
1974 
1975 	/* Set first protocol and move network header forward */
1976 	network += ETH_HLEN;
1977 
1978 	/* Handle any vlan tag if present */
1979 	if (hnae_get_field(flag, HNS3_RXD_VLAN_M, HNS3_RXD_VLAN_S)
1980 		== HNS3_RX_FLAG_VLAN_PRESENT) {
1981 		if ((typeof(max_size))(network - data) > (max_size - VLAN_HLEN))
1982 			return max_size;
1983 
1984 		network += VLAN_HLEN;
1985 	}
1986 
1987 	/* Handle L3 protocols */
1988 	if (hnae_get_field(flag, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S)
1989 		== HNS3_RX_FLAG_L3ID_IPV4) {
1990 		if ((typeof(max_size))(network - data) >
1991 		    (max_size - sizeof(struct iphdr)))
1992 			return max_size;
1993 
1994 		/* Access ihl as a u8 to avoid unaligned access on ia64 */
1995 		hlen = (network[0] & 0x0F) << 2;
1996 
1997 		/* Verify hlen meets minimum size requirements */
1998 		if (hlen < sizeof(struct iphdr))
1999 			return network - data;
2000 
2001 		/* Record next protocol if header is present */
2002 	} else if (hnae_get_field(flag, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S)
2003 		== HNS3_RX_FLAG_L3ID_IPV6) {
2004 		if ((typeof(max_size))(network - data) >
2005 		    (max_size - sizeof(struct ipv6hdr)))
2006 			return max_size;
2007 
2008 		/* Record next protocol */
2009 		hlen = sizeof(struct ipv6hdr);
2010 	} else {
2011 		return network - data;
2012 	}
2013 
2014 	/* Relocate pointer to start of L4 header */
2015 	network += hlen;
2016 
2017 	/* Finally sort out TCP/UDP */
2018 	if (hnae_get_field(flag, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S)
2019 		== HNS3_RX_FLAG_L4ID_TCP) {
2020 		if ((typeof(max_size))(network - data) >
2021 		    (max_size - sizeof(struct tcphdr)))
2022 			return max_size;
2023 
2024 		/* Access doff as a u8 to avoid unaligned access on ia64 */
2025 		hlen = (network[12] & 0xF0) >> 2;
2026 
2027 		/* Verify hlen meets minimum size requirements */
2028 		if (hlen < sizeof(struct tcphdr))
2029 			return network - data;
2030 
2031 		network += hlen;
2032 	} else if (hnae_get_field(flag, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S)
2033 		== HNS3_RX_FLAG_L4ID_UDP) {
2034 		if ((typeof(max_size))(network - data) >
2035 		    (max_size - sizeof(struct udphdr)))
2036 			return max_size;
2037 
2038 		network += sizeof(struct udphdr);
2039 	}
2040 
2041 	/* If everything has gone correctly network should be the
2042 	 * data section of the packet and will be the end of the header.
2043 	 * If not then it probably represents the end of the last recognized
2044 	 * header.
2045 	 */
2046 	if ((typeof(max_size))(network - data) < max_size)
2047 		return network - data;
2048 	else
2049 		return max_size;
2050 }
2051 
2052 static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
2053 				struct hns3_enet_ring *ring, int pull_len,
2054 				struct hns3_desc_cb *desc_cb)
2055 {
2056 	struct hns3_desc *desc;
2057 	int truesize, size;
2058 	int last_offset;
2059 	bool twobufs;
2060 
2061 	twobufs = ((PAGE_SIZE < 8192) &&
2062 		hnae_buf_size(ring) == HNS3_BUFFER_SIZE_2048);
2063 
2064 	desc = &ring->desc[ring->next_to_clean];
2065 	size = le16_to_cpu(desc->rx.size);
2066 
2067 	if (twobufs) {
2068 		truesize = hnae_buf_size(ring);
2069 	} else {
2070 		truesize = ALIGN(size, L1_CACHE_BYTES);
2071 		last_offset = hnae_page_size(ring) - hnae_buf_size(ring);
2072 	}
2073 
2074 	skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
2075 			size - pull_len, truesize - pull_len);
2076 
2077 	 /* Avoid re-using remote pages,flag default unreuse */
2078 	if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()))
2079 		return;
2080 
2081 	if (twobufs) {
2082 		/* If we are only owner of page we can reuse it */
2083 		if (likely(page_count(desc_cb->priv) == 1)) {
2084 			/* Flip page offset to other buffer */
2085 			desc_cb->page_offset ^= truesize;
2086 
2087 			desc_cb->reuse_flag = 1;
2088 			/* bump ref count on page before it is given*/
2089 			get_page(desc_cb->priv);
2090 		}
2091 		return;
2092 	}
2093 
2094 	/* Move offset up to the next cache line */
2095 	desc_cb->page_offset += truesize;
2096 
2097 	if (desc_cb->page_offset <= last_offset) {
2098 		desc_cb->reuse_flag = 1;
2099 		/* Bump ref count on page before it is given*/
2100 		get_page(desc_cb->priv);
2101 	}
2102 }
2103 
2104 static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
2105 			     struct hns3_desc *desc)
2106 {
2107 	struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2108 	int l3_type, l4_type;
2109 	u32 bd_base_info;
2110 	int ol4_type;
2111 	u32 l234info;
2112 
2113 	bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2114 	l234info = le32_to_cpu(desc->rx.l234_info);
2115 
2116 	skb->ip_summed = CHECKSUM_NONE;
2117 
2118 	skb_checksum_none_assert(skb);
2119 
2120 	if (!(netdev->features & NETIF_F_RXCSUM))
2121 		return;
2122 
2123 	/* check if hardware has done checksum */
2124 	if (!hnae_get_bit(bd_base_info, HNS3_RXD_L3L4P_B))
2125 		return;
2126 
2127 	if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L3E_B) ||
2128 		     hnae_get_bit(l234info, HNS3_RXD_L4E_B) ||
2129 		     hnae_get_bit(l234info, HNS3_RXD_OL3E_B) ||
2130 		     hnae_get_bit(l234info, HNS3_RXD_OL4E_B))) {
2131 		netdev_err(netdev, "L3/L4 error pkt\n");
2132 		u64_stats_update_begin(&ring->syncp);
2133 		ring->stats.l3l4_csum_err++;
2134 		u64_stats_update_end(&ring->syncp);
2135 
2136 		return;
2137 	}
2138 
2139 	l3_type = hnae_get_field(l234info, HNS3_RXD_L3ID_M,
2140 				 HNS3_RXD_L3ID_S);
2141 	l4_type = hnae_get_field(l234info, HNS3_RXD_L4ID_M,
2142 				 HNS3_RXD_L4ID_S);
2143 
2144 	ol4_type = hnae_get_field(l234info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S);
2145 	switch (ol4_type) {
2146 	case HNS3_OL4_TYPE_MAC_IN_UDP:
2147 	case HNS3_OL4_TYPE_NVGRE:
2148 		skb->csum_level = 1;
2149 	case HNS3_OL4_TYPE_NO_TUN:
2150 		/* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */
2151 		if (l3_type == HNS3_L3_TYPE_IPV4 ||
2152 		    (l3_type == HNS3_L3_TYPE_IPV6 &&
2153 		     (l4_type == HNS3_L4_TYPE_UDP ||
2154 		      l4_type == HNS3_L4_TYPE_TCP ||
2155 		      l4_type == HNS3_L4_TYPE_SCTP)))
2156 			skb->ip_summed = CHECKSUM_UNNECESSARY;
2157 		break;
2158 	}
2159 }
2160 
2161 static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb)
2162 {
2163 	napi_gro_receive(&ring->tqp_vector->napi, skb);
2164 }
2165 
2166 static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
2167 			     struct sk_buff **out_skb, int *out_bnum)
2168 {
2169 	struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2170 	struct hns3_desc_cb *desc_cb;
2171 	struct hns3_desc *desc;
2172 	struct sk_buff *skb;
2173 	unsigned char *va;
2174 	u32 bd_base_info;
2175 	int pull_len;
2176 	u32 l234info;
2177 	int length;
2178 	int bnum;
2179 
2180 	desc = &ring->desc[ring->next_to_clean];
2181 	desc_cb = &ring->desc_cb[ring->next_to_clean];
2182 
2183 	prefetch(desc);
2184 
2185 	length = le16_to_cpu(desc->rx.pkt_len);
2186 	bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2187 	l234info = le32_to_cpu(desc->rx.l234_info);
2188 
2189 	/* Check valid BD */
2190 	if (!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))
2191 		return -EFAULT;
2192 
2193 	va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
2194 
2195 	/* Prefetch first cache line of first page
2196 	 * Idea is to cache few bytes of the header of the packet. Our L1 Cache
2197 	 * line size is 64B so need to prefetch twice to make it 128B. But in
2198 	 * actual we can have greater size of caches with 128B Level 1 cache
2199 	 * lines. In such a case, single fetch would suffice to cache in the
2200 	 * relevant part of the header.
2201 	 */
2202 	prefetch(va);
2203 #if L1_CACHE_BYTES < 128
2204 	prefetch(va + L1_CACHE_BYTES);
2205 #endif
2206 
2207 	skb = *out_skb = napi_alloc_skb(&ring->tqp_vector->napi,
2208 					HNS3_RX_HEAD_SIZE);
2209 	if (unlikely(!skb)) {
2210 		netdev_err(netdev, "alloc rx skb fail\n");
2211 
2212 		u64_stats_update_begin(&ring->syncp);
2213 		ring->stats.sw_err_cnt++;
2214 		u64_stats_update_end(&ring->syncp);
2215 
2216 		return -ENOMEM;
2217 	}
2218 
2219 	prefetchw(skb->data);
2220 
2221 	/* Based on hw strategy, the tag offloaded will be stored at
2222 	 * ot_vlan_tag in two layer tag case, and stored at vlan_tag
2223 	 * in one layer tag case.
2224 	 */
2225 	if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
2226 		u16 vlan_tag;
2227 
2228 		vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
2229 		if (!(vlan_tag & VLAN_VID_MASK))
2230 			vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
2231 		if (vlan_tag & VLAN_VID_MASK)
2232 			__vlan_hwaccel_put_tag(skb,
2233 					       htons(ETH_P_8021Q),
2234 					       vlan_tag);
2235 	}
2236 
2237 	bnum = 1;
2238 	if (length <= HNS3_RX_HEAD_SIZE) {
2239 		memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
2240 
2241 		/* We can reuse buffer as-is, just make sure it is local */
2242 		if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
2243 			desc_cb->reuse_flag = 1;
2244 		else /* This page cannot be reused so discard it */
2245 			put_page(desc_cb->priv);
2246 
2247 		ring_ptr_move_fw(ring, next_to_clean);
2248 	} else {
2249 		u64_stats_update_begin(&ring->syncp);
2250 		ring->stats.seg_pkt_cnt++;
2251 		u64_stats_update_end(&ring->syncp);
2252 
2253 		pull_len = hns3_nic_get_headlen(va, l234info,
2254 						HNS3_RX_HEAD_SIZE);
2255 		memcpy(__skb_put(skb, pull_len), va,
2256 		       ALIGN(pull_len, sizeof(long)));
2257 
2258 		hns3_nic_reuse_page(skb, 0, ring, pull_len, desc_cb);
2259 		ring_ptr_move_fw(ring, next_to_clean);
2260 
2261 		while (!hnae_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
2262 			desc = &ring->desc[ring->next_to_clean];
2263 			desc_cb = &ring->desc_cb[ring->next_to_clean];
2264 			bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2265 			hns3_nic_reuse_page(skb, bnum, ring, 0, desc_cb);
2266 			ring_ptr_move_fw(ring, next_to_clean);
2267 			bnum++;
2268 		}
2269 	}
2270 
2271 	*out_bnum = bnum;
2272 
2273 	if (unlikely(!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))) {
2274 		netdev_err(netdev, "no valid bd,%016llx,%016llx\n",
2275 			   ((u64 *)desc)[0], ((u64 *)desc)[1]);
2276 		u64_stats_update_begin(&ring->syncp);
2277 		ring->stats.non_vld_descs++;
2278 		u64_stats_update_end(&ring->syncp);
2279 
2280 		dev_kfree_skb_any(skb);
2281 		return -EINVAL;
2282 	}
2283 
2284 	if (unlikely((!desc->rx.pkt_len) ||
2285 		     hnae_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) {
2286 		netdev_err(netdev, "truncated pkt\n");
2287 		u64_stats_update_begin(&ring->syncp);
2288 		ring->stats.err_pkt_len++;
2289 		u64_stats_update_end(&ring->syncp);
2290 
2291 		dev_kfree_skb_any(skb);
2292 		return -EFAULT;
2293 	}
2294 
2295 	if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L2E_B))) {
2296 		netdev_err(netdev, "L2 error pkt\n");
2297 		u64_stats_update_begin(&ring->syncp);
2298 		ring->stats.l2_err++;
2299 		u64_stats_update_end(&ring->syncp);
2300 
2301 		dev_kfree_skb_any(skb);
2302 		return -EFAULT;
2303 	}
2304 
2305 	u64_stats_update_begin(&ring->syncp);
2306 	ring->stats.rx_pkts++;
2307 	ring->stats.rx_bytes += skb->len;
2308 	u64_stats_update_end(&ring->syncp);
2309 
2310 	ring->tqp_vector->rx_group.total_bytes += skb->len;
2311 
2312 	hns3_rx_checksum(ring, skb, desc);
2313 	return 0;
2314 }
2315 
2316 int hns3_clean_rx_ring(
2317 		struct hns3_enet_ring *ring, int budget,
2318 		void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *))
2319 {
2320 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
2321 	struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2322 	int recv_pkts, recv_bds, clean_count, err;
2323 	int unused_count = hns3_desc_unused(ring);
2324 	struct sk_buff *skb = NULL;
2325 	int num, bnum = 0;
2326 
2327 	num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG);
2328 	rmb(); /* Make sure num taken effect before the other data is touched */
2329 
2330 	recv_pkts = 0, recv_bds = 0, clean_count = 0;
2331 	num -= unused_count;
2332 
2333 	while (recv_pkts < budget && recv_bds < num) {
2334 		/* Reuse or realloc buffers */
2335 		if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
2336 			hns3_nic_alloc_rx_buffers(ring,
2337 						  clean_count + unused_count);
2338 			clean_count = 0;
2339 			unused_count = hns3_desc_unused(ring);
2340 		}
2341 
2342 		/* Poll one pkt */
2343 		err = hns3_handle_rx_bd(ring, &skb, &bnum);
2344 		if (unlikely(!skb)) /* This fault cannot be repaired */
2345 			goto out;
2346 
2347 		recv_bds += bnum;
2348 		clean_count += bnum;
2349 		if (unlikely(err)) {  /* Do jump the err */
2350 			recv_pkts++;
2351 			continue;
2352 		}
2353 
2354 		/* Do update ip stack process */
2355 		skb->protocol = eth_type_trans(skb, netdev);
2356 		rx_fn(ring, skb);
2357 
2358 		recv_pkts++;
2359 	}
2360 
2361 out:
2362 	/* Make all data has been write before submit */
2363 	if (clean_count + unused_count > 0)
2364 		hns3_nic_alloc_rx_buffers(ring,
2365 					  clean_count + unused_count);
2366 
2367 	return recv_pkts;
2368 }
2369 
2370 static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
2371 {
2372 #define HNS3_RX_ULTRA_PACKET_RATE 40000
2373 	enum hns3_flow_level_range new_flow_level;
2374 	struct hns3_enet_tqp_vector *tqp_vector;
2375 	int packets_per_secs;
2376 	int bytes_per_usecs;
2377 	u16 new_int_gl;
2378 	int usecs;
2379 
2380 	if (!ring_group->int_gl)
2381 		return false;
2382 
2383 	if (ring_group->total_packets == 0) {
2384 		ring_group->int_gl = HNS3_INT_GL_50K;
2385 		ring_group->flow_level = HNS3_FLOW_LOW;
2386 		return true;
2387 	}
2388 
2389 	/* Simple throttlerate management
2390 	 * 0-10MB/s   lower     (50000 ints/s)
2391 	 * 10-20MB/s   middle    (20000 ints/s)
2392 	 * 20-1249MB/s high      (18000 ints/s)
2393 	 * > 40000pps  ultra     (8000 ints/s)
2394 	 */
2395 	new_flow_level = ring_group->flow_level;
2396 	new_int_gl = ring_group->int_gl;
2397 	tqp_vector = ring_group->ring->tqp_vector;
2398 	usecs = (ring_group->int_gl << 1);
2399 	bytes_per_usecs = ring_group->total_bytes / usecs;
2400 	/* 1000000 microseconds */
2401 	packets_per_secs = ring_group->total_packets * 1000000 / usecs;
2402 
2403 	switch (new_flow_level) {
2404 	case HNS3_FLOW_LOW:
2405 		if (bytes_per_usecs > 10)
2406 			new_flow_level = HNS3_FLOW_MID;
2407 		break;
2408 	case HNS3_FLOW_MID:
2409 		if (bytes_per_usecs > 20)
2410 			new_flow_level = HNS3_FLOW_HIGH;
2411 		else if (bytes_per_usecs <= 10)
2412 			new_flow_level = HNS3_FLOW_LOW;
2413 		break;
2414 	case HNS3_FLOW_HIGH:
2415 	case HNS3_FLOW_ULTRA:
2416 	default:
2417 		if (bytes_per_usecs <= 20)
2418 			new_flow_level = HNS3_FLOW_MID;
2419 		break;
2420 	}
2421 
2422 	if ((packets_per_secs > HNS3_RX_ULTRA_PACKET_RATE) &&
2423 	    (&tqp_vector->rx_group == ring_group))
2424 		new_flow_level = HNS3_FLOW_ULTRA;
2425 
2426 	switch (new_flow_level) {
2427 	case HNS3_FLOW_LOW:
2428 		new_int_gl = HNS3_INT_GL_50K;
2429 		break;
2430 	case HNS3_FLOW_MID:
2431 		new_int_gl = HNS3_INT_GL_20K;
2432 		break;
2433 	case HNS3_FLOW_HIGH:
2434 		new_int_gl = HNS3_INT_GL_18K;
2435 		break;
2436 	case HNS3_FLOW_ULTRA:
2437 		new_int_gl = HNS3_INT_GL_8K;
2438 		break;
2439 	default:
2440 		break;
2441 	}
2442 
2443 	ring_group->total_bytes = 0;
2444 	ring_group->total_packets = 0;
2445 	ring_group->flow_level = new_flow_level;
2446 	if (new_int_gl != ring_group->int_gl) {
2447 		ring_group->int_gl = new_int_gl;
2448 		return true;
2449 	}
2450 	return false;
2451 }
2452 
2453 static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector)
2454 {
2455 	struct hns3_enet_ring_group *rx_group = &tqp_vector->rx_group;
2456 	struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group;
2457 	bool rx_update, tx_update;
2458 
2459 	if (rx_group->gl_adapt_enable) {
2460 		rx_update = hns3_get_new_int_gl(rx_group);
2461 		if (rx_update)
2462 			hns3_set_vector_coalesce_rx_gl(tqp_vector,
2463 						       rx_group->int_gl);
2464 	}
2465 
2466 	if (tx_group->gl_adapt_enable) {
2467 		tx_update = hns3_get_new_int_gl(&tqp_vector->tx_group);
2468 		if (tx_update)
2469 			hns3_set_vector_coalesce_tx_gl(tqp_vector,
2470 						       tx_group->int_gl);
2471 	}
2472 }
2473 
2474 static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
2475 {
2476 	struct hns3_enet_ring *ring;
2477 	int rx_pkt_total = 0;
2478 
2479 	struct hns3_enet_tqp_vector *tqp_vector =
2480 		container_of(napi, struct hns3_enet_tqp_vector, napi);
2481 	bool clean_complete = true;
2482 	int rx_budget;
2483 
2484 	/* Since the actual Tx work is minimal, we can give the Tx a larger
2485 	 * budget and be more aggressive about cleaning up the Tx descriptors.
2486 	 */
2487 	hns3_for_each_ring(ring, tqp_vector->tx_group) {
2488 		if (!hns3_clean_tx_ring(ring, budget))
2489 			clean_complete = false;
2490 	}
2491 
2492 	/* make sure rx ring budget not smaller than 1 */
2493 	rx_budget = max(budget / tqp_vector->num_tqps, 1);
2494 
2495 	hns3_for_each_ring(ring, tqp_vector->rx_group) {
2496 		int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget,
2497 						    hns3_rx_skb);
2498 
2499 		if (rx_cleaned >= rx_budget)
2500 			clean_complete = false;
2501 
2502 		rx_pkt_total += rx_cleaned;
2503 	}
2504 
2505 	tqp_vector->rx_group.total_packets += rx_pkt_total;
2506 
2507 	if (!clean_complete)
2508 		return budget;
2509 
2510 	napi_complete(napi);
2511 	hns3_update_new_int_gl(tqp_vector);
2512 	hns3_mask_vector_irq(tqp_vector, 1);
2513 
2514 	return rx_pkt_total;
2515 }
2516 
2517 static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
2518 				      struct hnae3_ring_chain_node *head)
2519 {
2520 	struct pci_dev *pdev = tqp_vector->handle->pdev;
2521 	struct hnae3_ring_chain_node *cur_chain = head;
2522 	struct hnae3_ring_chain_node *chain;
2523 	struct hns3_enet_ring *tx_ring;
2524 	struct hns3_enet_ring *rx_ring;
2525 
2526 	tx_ring = tqp_vector->tx_group.ring;
2527 	if (tx_ring) {
2528 		cur_chain->tqp_index = tx_ring->tqp->tqp_index;
2529 		hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
2530 			     HNAE3_RING_TYPE_TX);
2531 		hnae_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
2532 			       HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX);
2533 
2534 		cur_chain->next = NULL;
2535 
2536 		while (tx_ring->next) {
2537 			tx_ring = tx_ring->next;
2538 
2539 			chain = devm_kzalloc(&pdev->dev, sizeof(*chain),
2540 					     GFP_KERNEL);
2541 			if (!chain)
2542 				return -ENOMEM;
2543 
2544 			cur_chain->next = chain;
2545 			chain->tqp_index = tx_ring->tqp->tqp_index;
2546 			hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B,
2547 				     HNAE3_RING_TYPE_TX);
2548 			hnae_set_field(chain->int_gl_idx,
2549 				       HNAE3_RING_GL_IDX_M,
2550 				       HNAE3_RING_GL_IDX_S,
2551 				       HNAE3_RING_GL_TX);
2552 
2553 			cur_chain = chain;
2554 		}
2555 	}
2556 
2557 	rx_ring = tqp_vector->rx_group.ring;
2558 	if (!tx_ring && rx_ring) {
2559 		cur_chain->next = NULL;
2560 		cur_chain->tqp_index = rx_ring->tqp->tqp_index;
2561 		hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
2562 			     HNAE3_RING_TYPE_RX);
2563 		hnae_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
2564 			       HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
2565 
2566 		rx_ring = rx_ring->next;
2567 	}
2568 
2569 	while (rx_ring) {
2570 		chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL);
2571 		if (!chain)
2572 			return -ENOMEM;
2573 
2574 		cur_chain->next = chain;
2575 		chain->tqp_index = rx_ring->tqp->tqp_index;
2576 		hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B,
2577 			     HNAE3_RING_TYPE_RX);
2578 		hnae_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
2579 			       HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
2580 
2581 		cur_chain = chain;
2582 
2583 		rx_ring = rx_ring->next;
2584 	}
2585 
2586 	return 0;
2587 }
2588 
2589 static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
2590 					struct hnae3_ring_chain_node *head)
2591 {
2592 	struct pci_dev *pdev = tqp_vector->handle->pdev;
2593 	struct hnae3_ring_chain_node *chain_tmp, *chain;
2594 
2595 	chain = head->next;
2596 
2597 	while (chain) {
2598 		chain_tmp = chain->next;
2599 		devm_kfree(&pdev->dev, chain);
2600 		chain = chain_tmp;
2601 	}
2602 }
2603 
2604 static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group,
2605 				   struct hns3_enet_ring *ring)
2606 {
2607 	ring->next = group->ring;
2608 	group->ring = ring;
2609 
2610 	group->count++;
2611 }
2612 
2613 static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
2614 {
2615 	struct hnae3_ring_chain_node vector_ring_chain;
2616 	struct hnae3_handle *h = priv->ae_handle;
2617 	struct hns3_enet_tqp_vector *tqp_vector;
2618 	struct hnae3_vector_info *vector;
2619 	struct pci_dev *pdev = h->pdev;
2620 	u16 tqp_num = h->kinfo.num_tqps;
2621 	u16 vector_num;
2622 	int ret = 0;
2623 	u16 i;
2624 
2625 	/* RSS size, cpu online and vector_num should be the same */
2626 	/* Should consider 2p/4p later */
2627 	vector_num = min_t(u16, num_online_cpus(), tqp_num);
2628 	vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector),
2629 			      GFP_KERNEL);
2630 	if (!vector)
2631 		return -ENOMEM;
2632 
2633 	vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector);
2634 
2635 	priv->vector_num = vector_num;
2636 	priv->tqp_vector = (struct hns3_enet_tqp_vector *)
2637 		devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector),
2638 			     GFP_KERNEL);
2639 	if (!priv->tqp_vector)
2640 		return -ENOMEM;
2641 
2642 	for (i = 0; i < tqp_num; i++) {
2643 		u16 vector_i = i % vector_num;
2644 
2645 		tqp_vector = &priv->tqp_vector[vector_i];
2646 
2647 		hns3_add_ring_to_group(&tqp_vector->tx_group,
2648 				       priv->ring_data[i].ring);
2649 
2650 		hns3_add_ring_to_group(&tqp_vector->rx_group,
2651 				       priv->ring_data[i + tqp_num].ring);
2652 
2653 		tqp_vector->idx = vector_i;
2654 		tqp_vector->mask_addr = vector[vector_i].io_addr;
2655 		tqp_vector->vector_irq = vector[vector_i].vector;
2656 		tqp_vector->num_tqps++;
2657 
2658 		priv->ring_data[i].ring->tqp_vector = tqp_vector;
2659 		priv->ring_data[i + tqp_num].ring->tqp_vector = tqp_vector;
2660 	}
2661 
2662 	for (i = 0; i < vector_num; i++) {
2663 		tqp_vector = &priv->tqp_vector[i];
2664 
2665 		tqp_vector->rx_group.total_bytes = 0;
2666 		tqp_vector->rx_group.total_packets = 0;
2667 		tqp_vector->tx_group.total_bytes = 0;
2668 		tqp_vector->tx_group.total_packets = 0;
2669 		hns3_vector_gl_rl_init(tqp_vector, priv);
2670 		tqp_vector->handle = h;
2671 
2672 		ret = hns3_get_vector_ring_chain(tqp_vector,
2673 						 &vector_ring_chain);
2674 		if (ret)
2675 			goto out;
2676 
2677 		ret = h->ae_algo->ops->map_ring_to_vector(h,
2678 			tqp_vector->vector_irq, &vector_ring_chain);
2679 		if (ret)
2680 			goto out;
2681 
2682 		hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
2683 
2684 		netif_napi_add(priv->netdev, &tqp_vector->napi,
2685 			       hns3_nic_common_poll, NAPI_POLL_WEIGHT);
2686 	}
2687 
2688 out:
2689 	devm_kfree(&pdev->dev, vector);
2690 	return ret;
2691 }
2692 
2693 static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
2694 {
2695 	struct hnae3_ring_chain_node vector_ring_chain;
2696 	struct hnae3_handle *h = priv->ae_handle;
2697 	struct hns3_enet_tqp_vector *tqp_vector;
2698 	struct pci_dev *pdev = h->pdev;
2699 	int i, ret;
2700 
2701 	for (i = 0; i < priv->vector_num; i++) {
2702 		tqp_vector = &priv->tqp_vector[i];
2703 
2704 		ret = hns3_get_vector_ring_chain(tqp_vector,
2705 						 &vector_ring_chain);
2706 		if (ret)
2707 			return ret;
2708 
2709 		ret = h->ae_algo->ops->unmap_ring_from_vector(h,
2710 			tqp_vector->vector_irq, &vector_ring_chain);
2711 		if (ret)
2712 			return ret;
2713 
2714 		hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
2715 
2716 		if (priv->tqp_vector[i].irq_init_flag == HNS3_VECTOR_INITED) {
2717 			(void)irq_set_affinity_hint(
2718 				priv->tqp_vector[i].vector_irq,
2719 						    NULL);
2720 			free_irq(priv->tqp_vector[i].vector_irq,
2721 				 &priv->tqp_vector[i]);
2722 		}
2723 
2724 		priv->ring_data[i].ring->irq_init_flag = HNS3_VECTOR_NOT_INITED;
2725 
2726 		netif_napi_del(&priv->tqp_vector[i].napi);
2727 	}
2728 
2729 	devm_kfree(&pdev->dev, priv->tqp_vector);
2730 
2731 	return 0;
2732 }
2733 
2734 static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
2735 			     int ring_type)
2736 {
2737 	struct hns3_nic_ring_data *ring_data = priv->ring_data;
2738 	int queue_num = priv->ae_handle->kinfo.num_tqps;
2739 	struct pci_dev *pdev = priv->ae_handle->pdev;
2740 	struct hns3_enet_ring *ring;
2741 
2742 	ring = devm_kzalloc(&pdev->dev, sizeof(*ring), GFP_KERNEL);
2743 	if (!ring)
2744 		return -ENOMEM;
2745 
2746 	if (ring_type == HNAE3_RING_TYPE_TX) {
2747 		ring_data[q->tqp_index].ring = ring;
2748 		ring_data[q->tqp_index].queue_index = q->tqp_index;
2749 		ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET;
2750 	} else {
2751 		ring_data[q->tqp_index + queue_num].ring = ring;
2752 		ring_data[q->tqp_index + queue_num].queue_index = q->tqp_index;
2753 		ring->io_base = q->io_base;
2754 	}
2755 
2756 	hnae_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type);
2757 
2758 	ring->tqp = q;
2759 	ring->desc = NULL;
2760 	ring->desc_cb = NULL;
2761 	ring->dev = priv->dev;
2762 	ring->desc_dma_addr = 0;
2763 	ring->buf_size = q->buf_size;
2764 	ring->desc_num = q->desc_num;
2765 	ring->next_to_use = 0;
2766 	ring->next_to_clean = 0;
2767 
2768 	return 0;
2769 }
2770 
2771 static int hns3_queue_to_ring(struct hnae3_queue *tqp,
2772 			      struct hns3_nic_priv *priv)
2773 {
2774 	int ret;
2775 
2776 	ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX);
2777 	if (ret)
2778 		return ret;
2779 
2780 	ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX);
2781 	if (ret)
2782 		return ret;
2783 
2784 	return 0;
2785 }
2786 
2787 static int hns3_get_ring_config(struct hns3_nic_priv *priv)
2788 {
2789 	struct hnae3_handle *h = priv->ae_handle;
2790 	struct pci_dev *pdev = h->pdev;
2791 	int i, ret;
2792 
2793 	priv->ring_data =  devm_kzalloc(&pdev->dev, h->kinfo.num_tqps *
2794 					sizeof(*priv->ring_data) * 2,
2795 					GFP_KERNEL);
2796 	if (!priv->ring_data)
2797 		return -ENOMEM;
2798 
2799 	for (i = 0; i < h->kinfo.num_tqps; i++) {
2800 		ret = hns3_queue_to_ring(h->kinfo.tqp[i], priv);
2801 		if (ret)
2802 			goto err;
2803 	}
2804 
2805 	return 0;
2806 err:
2807 	devm_kfree(&pdev->dev, priv->ring_data);
2808 	return ret;
2809 }
2810 
2811 static void hns3_put_ring_config(struct hns3_nic_priv *priv)
2812 {
2813 	struct hnae3_handle *h = priv->ae_handle;
2814 	int i;
2815 
2816 	for (i = 0; i < h->kinfo.num_tqps; i++) {
2817 		devm_kfree(priv->dev, priv->ring_data[i].ring);
2818 		devm_kfree(priv->dev,
2819 			   priv->ring_data[i + h->kinfo.num_tqps].ring);
2820 	}
2821 	devm_kfree(priv->dev, priv->ring_data);
2822 }
2823 
2824 static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
2825 {
2826 	int ret;
2827 
2828 	if (ring->desc_num <= 0 || ring->buf_size <= 0)
2829 		return -EINVAL;
2830 
2831 	ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]),
2832 				GFP_KERNEL);
2833 	if (!ring->desc_cb) {
2834 		ret = -ENOMEM;
2835 		goto out;
2836 	}
2837 
2838 	ret = hns3_alloc_desc(ring);
2839 	if (ret)
2840 		goto out_with_desc_cb;
2841 
2842 	if (!HNAE3_IS_TX_RING(ring)) {
2843 		ret = hns3_alloc_ring_buffers(ring);
2844 		if (ret)
2845 			goto out_with_desc;
2846 	}
2847 
2848 	return 0;
2849 
2850 out_with_desc:
2851 	hns3_free_desc(ring);
2852 out_with_desc_cb:
2853 	kfree(ring->desc_cb);
2854 	ring->desc_cb = NULL;
2855 out:
2856 	return ret;
2857 }
2858 
2859 static void hns3_fini_ring(struct hns3_enet_ring *ring)
2860 {
2861 	hns3_free_desc(ring);
2862 	kfree(ring->desc_cb);
2863 	ring->desc_cb = NULL;
2864 	ring->next_to_clean = 0;
2865 	ring->next_to_use = 0;
2866 }
2867 
2868 static int hns3_buf_size2type(u32 buf_size)
2869 {
2870 	int bd_size_type;
2871 
2872 	switch (buf_size) {
2873 	case 512:
2874 		bd_size_type = HNS3_BD_SIZE_512_TYPE;
2875 		break;
2876 	case 1024:
2877 		bd_size_type = HNS3_BD_SIZE_1024_TYPE;
2878 		break;
2879 	case 2048:
2880 		bd_size_type = HNS3_BD_SIZE_2048_TYPE;
2881 		break;
2882 	case 4096:
2883 		bd_size_type = HNS3_BD_SIZE_4096_TYPE;
2884 		break;
2885 	default:
2886 		bd_size_type = HNS3_BD_SIZE_2048_TYPE;
2887 	}
2888 
2889 	return bd_size_type;
2890 }
2891 
2892 static void hns3_init_ring_hw(struct hns3_enet_ring *ring)
2893 {
2894 	dma_addr_t dma = ring->desc_dma_addr;
2895 	struct hnae3_queue *q = ring->tqp;
2896 
2897 	if (!HNAE3_IS_TX_RING(ring)) {
2898 		hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG,
2899 			       (u32)dma);
2900 		hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG,
2901 			       (u32)((dma >> 31) >> 1));
2902 
2903 		hns3_write_dev(q, HNS3_RING_RX_RING_BD_LEN_REG,
2904 			       hns3_buf_size2type(ring->buf_size));
2905 		hns3_write_dev(q, HNS3_RING_RX_RING_BD_NUM_REG,
2906 			       ring->desc_num / 8 - 1);
2907 
2908 	} else {
2909 		hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_L_REG,
2910 			       (u32)dma);
2911 		hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG,
2912 			       (u32)((dma >> 31) >> 1));
2913 
2914 		hns3_write_dev(q, HNS3_RING_TX_RING_BD_LEN_REG,
2915 			       hns3_buf_size2type(ring->buf_size));
2916 		hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG,
2917 			       ring->desc_num / 8 - 1);
2918 	}
2919 }
2920 
2921 int hns3_init_all_ring(struct hns3_nic_priv *priv)
2922 {
2923 	struct hnae3_handle *h = priv->ae_handle;
2924 	int ring_num = h->kinfo.num_tqps * 2;
2925 	int i, j;
2926 	int ret;
2927 
2928 	for (i = 0; i < ring_num; i++) {
2929 		ret = hns3_alloc_ring_memory(priv->ring_data[i].ring);
2930 		if (ret) {
2931 			dev_err(priv->dev,
2932 				"Alloc ring memory fail! ret=%d\n", ret);
2933 			goto out_when_alloc_ring_memory;
2934 		}
2935 
2936 		hns3_init_ring_hw(priv->ring_data[i].ring);
2937 
2938 		u64_stats_init(&priv->ring_data[i].ring->syncp);
2939 	}
2940 
2941 	return 0;
2942 
2943 out_when_alloc_ring_memory:
2944 	for (j = i - 1; j >= 0; j--)
2945 		hns3_fini_ring(priv->ring_data[j].ring);
2946 
2947 	return -ENOMEM;
2948 }
2949 
2950 int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
2951 {
2952 	struct hnae3_handle *h = priv->ae_handle;
2953 	int i;
2954 
2955 	for (i = 0; i < h->kinfo.num_tqps; i++) {
2956 		if (h->ae_algo->ops->reset_queue)
2957 			h->ae_algo->ops->reset_queue(h, i);
2958 
2959 		hns3_fini_ring(priv->ring_data[i].ring);
2960 		devm_kfree(priv->dev, priv->ring_data[i].ring);
2961 		hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring);
2962 		devm_kfree(priv->dev,
2963 			   priv->ring_data[i + h->kinfo.num_tqps].ring);
2964 	}
2965 	devm_kfree(priv->dev, priv->ring_data);
2966 
2967 	return 0;
2968 }
2969 
2970 /* Set mac addr if it is configured. or leave it to the AE driver */
2971 static void hns3_init_mac_addr(struct net_device *netdev)
2972 {
2973 	struct hns3_nic_priv *priv = netdev_priv(netdev);
2974 	struct hnae3_handle *h = priv->ae_handle;
2975 	u8 mac_addr_temp[ETH_ALEN];
2976 
2977 	if (h->ae_algo->ops->get_mac_addr) {
2978 		h->ae_algo->ops->get_mac_addr(h, mac_addr_temp);
2979 		ether_addr_copy(netdev->dev_addr, mac_addr_temp);
2980 	}
2981 
2982 	/* Check if the MAC address is valid, if not get a random one */
2983 	if (!is_valid_ether_addr(netdev->dev_addr)) {
2984 		eth_hw_addr_random(netdev);
2985 		dev_warn(priv->dev, "using random MAC address %pM\n",
2986 			 netdev->dev_addr);
2987 	}
2988 
2989 	if (h->ae_algo->ops->set_mac_addr)
2990 		h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr);
2991 
2992 }
2993 
2994 static void hns3_nic_set_priv_ops(struct net_device *netdev)
2995 {
2996 	struct hns3_nic_priv *priv = netdev_priv(netdev);
2997 
2998 	if ((netdev->features & NETIF_F_TSO) ||
2999 	    (netdev->features & NETIF_F_TSO6)) {
3000 		priv->ops.fill_desc = hns3_fill_desc_tso;
3001 		priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
3002 	} else {
3003 		priv->ops.fill_desc = hns3_fill_desc;
3004 		priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
3005 	}
3006 }
3007 
3008 static int hns3_client_init(struct hnae3_handle *handle)
3009 {
3010 	struct pci_dev *pdev = handle->pdev;
3011 	struct hns3_nic_priv *priv;
3012 	struct net_device *netdev;
3013 	int ret;
3014 
3015 	netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv),
3016 				   handle->kinfo.num_tqps);
3017 	if (!netdev)
3018 		return -ENOMEM;
3019 
3020 	priv = netdev_priv(netdev);
3021 	priv->dev = &pdev->dev;
3022 	priv->netdev = netdev;
3023 	priv->ae_handle = handle;
3024 	priv->last_reset_time = jiffies;
3025 	priv->reset_level = HNAE3_FUNC_RESET;
3026 	priv->tx_timeout_count = 0;
3027 
3028 	handle->kinfo.netdev = netdev;
3029 	handle->priv = (void *)priv;
3030 
3031 	hns3_init_mac_addr(netdev);
3032 
3033 	hns3_set_default_feature(netdev);
3034 
3035 	netdev->watchdog_timeo = HNS3_TX_TIMEOUT;
3036 	netdev->priv_flags |= IFF_UNICAST_FLT;
3037 	netdev->netdev_ops = &hns3_nic_netdev_ops;
3038 	SET_NETDEV_DEV(netdev, &pdev->dev);
3039 	hns3_ethtool_set_ops(netdev);
3040 	hns3_nic_set_priv_ops(netdev);
3041 
3042 	/* Carrier off reporting is important to ethtool even BEFORE open */
3043 	netif_carrier_off(netdev);
3044 
3045 	ret = hns3_get_ring_config(priv);
3046 	if (ret) {
3047 		ret = -ENOMEM;
3048 		goto out_get_ring_cfg;
3049 	}
3050 
3051 	ret = hns3_nic_init_vector_data(priv);
3052 	if (ret) {
3053 		ret = -ENOMEM;
3054 		goto out_init_vector_data;
3055 	}
3056 
3057 	ret = hns3_init_all_ring(priv);
3058 	if (ret) {
3059 		ret = -ENOMEM;
3060 		goto out_init_ring_data;
3061 	}
3062 
3063 	ret = register_netdev(netdev);
3064 	if (ret) {
3065 		dev_err(priv->dev, "probe register netdev fail!\n");
3066 		goto out_reg_netdev_fail;
3067 	}
3068 
3069 	hns3_dcbnl_setup(handle);
3070 
3071 	/* MTU range: (ETH_MIN_MTU(kernel default) - 9706) */
3072 	netdev->max_mtu = HNS3_MAX_MTU - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
3073 
3074 	return ret;
3075 
3076 out_reg_netdev_fail:
3077 out_init_ring_data:
3078 	(void)hns3_nic_uninit_vector_data(priv);
3079 	priv->ring_data = NULL;
3080 out_init_vector_data:
3081 out_get_ring_cfg:
3082 	priv->ae_handle = NULL;
3083 	free_netdev(netdev);
3084 	return ret;
3085 }
3086 
3087 static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
3088 {
3089 	struct net_device *netdev = handle->kinfo.netdev;
3090 	struct hns3_nic_priv *priv = netdev_priv(netdev);
3091 	int ret;
3092 
3093 	if (netdev->reg_state != NETREG_UNINITIALIZED)
3094 		unregister_netdev(netdev);
3095 
3096 	ret = hns3_nic_uninit_vector_data(priv);
3097 	if (ret)
3098 		netdev_err(netdev, "uninit vector error\n");
3099 
3100 	ret = hns3_uninit_all_ring(priv);
3101 	if (ret)
3102 		netdev_err(netdev, "uninit ring error\n");
3103 
3104 	priv->ring_data = NULL;
3105 
3106 	free_netdev(netdev);
3107 }
3108 
3109 static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup)
3110 {
3111 	struct net_device *netdev = handle->kinfo.netdev;
3112 
3113 	if (!netdev)
3114 		return;
3115 
3116 	if (linkup) {
3117 		netif_carrier_on(netdev);
3118 		netif_tx_wake_all_queues(netdev);
3119 		netdev_info(netdev, "link up\n");
3120 	} else {
3121 		netif_carrier_off(netdev);
3122 		netif_tx_stop_all_queues(netdev);
3123 		netdev_info(netdev, "link down\n");
3124 	}
3125 }
3126 
3127 static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
3128 {
3129 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
3130 	struct net_device *ndev = kinfo->netdev;
3131 	bool if_running;
3132 	int ret;
3133 	u8 i;
3134 
3135 	if (tc > HNAE3_MAX_TC)
3136 		return -EINVAL;
3137 
3138 	if (!ndev)
3139 		return -ENODEV;
3140 
3141 	if_running = netif_running(ndev);
3142 
3143 	ret = netdev_set_num_tc(ndev, tc);
3144 	if (ret)
3145 		return ret;
3146 
3147 	if (if_running) {
3148 		(void)hns3_nic_net_stop(ndev);
3149 		msleep(100);
3150 	}
3151 
3152 	ret = (kinfo->dcb_ops && kinfo->dcb_ops->map_update) ?
3153 		kinfo->dcb_ops->map_update(handle) : -EOPNOTSUPP;
3154 	if (ret)
3155 		goto err_out;
3156 
3157 	if (tc <= 1) {
3158 		netdev_reset_tc(ndev);
3159 		goto out;
3160 	}
3161 
3162 	for (i = 0; i < HNAE3_MAX_TC; i++) {
3163 		struct hnae3_tc_info *tc_info = &kinfo->tc_info[i];
3164 
3165 		if (tc_info->enable)
3166 			netdev_set_tc_queue(ndev,
3167 					    tc_info->tc,
3168 					    tc_info->tqp_count,
3169 					    tc_info->tqp_offset);
3170 	}
3171 
3172 	for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
3173 		netdev_set_prio_tc_map(ndev, i,
3174 				       kinfo->prio_tc[i]);
3175 	}
3176 
3177 out:
3178 	ret = hns3_nic_set_real_num_queue(ndev);
3179 
3180 err_out:
3181 	if (if_running)
3182 		(void)hns3_nic_net_open(ndev);
3183 
3184 	return ret;
3185 }
3186 
3187 static void hns3_recover_hw_addr(struct net_device *ndev)
3188 {
3189 	struct netdev_hw_addr_list *list;
3190 	struct netdev_hw_addr *ha, *tmp;
3191 
3192 	/* go through and sync uc_addr entries to the device */
3193 	list = &ndev->uc;
3194 	list_for_each_entry_safe(ha, tmp, &list->list, list)
3195 		hns3_nic_uc_sync(ndev, ha->addr);
3196 
3197 	/* go through and sync mc_addr entries to the device */
3198 	list = &ndev->mc;
3199 	list_for_each_entry_safe(ha, tmp, &list->list, list)
3200 		hns3_nic_mc_sync(ndev, ha->addr);
3201 }
3202 
3203 static void hns3_drop_skb_data(struct hns3_enet_ring *ring, struct sk_buff *skb)
3204 {
3205 	dev_kfree_skb_any(skb);
3206 }
3207 
3208 static void hns3_clear_all_ring(struct hnae3_handle *h)
3209 {
3210 	struct net_device *ndev = h->kinfo.netdev;
3211 	struct hns3_nic_priv *priv = netdev_priv(ndev);
3212 	u32 i;
3213 
3214 	for (i = 0; i < h->kinfo.num_tqps; i++) {
3215 		struct netdev_queue *dev_queue;
3216 		struct hns3_enet_ring *ring;
3217 
3218 		ring = priv->ring_data[i].ring;
3219 		hns3_clean_tx_ring(ring, ring->desc_num);
3220 		dev_queue = netdev_get_tx_queue(ndev,
3221 						priv->ring_data[i].queue_index);
3222 		netdev_tx_reset_queue(dev_queue);
3223 
3224 		ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
3225 		hns3_clean_rx_ring(ring, ring->desc_num, hns3_drop_skb_data);
3226 	}
3227 }
3228 
3229 static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
3230 {
3231 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
3232 	struct net_device *ndev = kinfo->netdev;
3233 
3234 	if (!netif_running(ndev))
3235 		return -EIO;
3236 
3237 	return hns3_nic_net_stop(ndev);
3238 }
3239 
3240 static int hns3_reset_notify_up_enet(struct hnae3_handle *handle)
3241 {
3242 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
3243 	struct hns3_nic_priv *priv = netdev_priv(kinfo->netdev);
3244 	int ret = 0;
3245 
3246 	if (netif_running(kinfo->netdev)) {
3247 		ret = hns3_nic_net_up(kinfo->netdev);
3248 		if (ret) {
3249 			netdev_err(kinfo->netdev,
3250 				   "hns net up fail, ret=%d!\n", ret);
3251 			return ret;
3252 		}
3253 
3254 		priv->last_reset_time = jiffies;
3255 	}
3256 
3257 	return ret;
3258 }
3259 
3260 static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
3261 {
3262 	struct net_device *netdev = handle->kinfo.netdev;
3263 	struct hns3_nic_priv *priv = netdev_priv(netdev);
3264 	int ret;
3265 
3266 	priv->reset_level = 1;
3267 	hns3_init_mac_addr(netdev);
3268 	hns3_nic_set_rx_mode(netdev);
3269 	hns3_recover_hw_addr(netdev);
3270 
3271 	/* Carrier off reporting is important to ethtool even BEFORE open */
3272 	netif_carrier_off(netdev);
3273 
3274 	ret = hns3_get_ring_config(priv);
3275 	if (ret)
3276 		return ret;
3277 
3278 	ret = hns3_nic_init_vector_data(priv);
3279 	if (ret)
3280 		return ret;
3281 
3282 	ret = hns3_init_all_ring(priv);
3283 	if (ret) {
3284 		hns3_nic_uninit_vector_data(priv);
3285 		priv->ring_data = NULL;
3286 	}
3287 
3288 	return ret;
3289 }
3290 
3291 static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
3292 {
3293 	struct net_device *netdev = handle->kinfo.netdev;
3294 	struct hns3_nic_priv *priv = netdev_priv(netdev);
3295 	int ret;
3296 
3297 	hns3_clear_all_ring(handle);
3298 
3299 	ret = hns3_nic_uninit_vector_data(priv);
3300 	if (ret) {
3301 		netdev_err(netdev, "uninit vector error\n");
3302 		return ret;
3303 	}
3304 
3305 	ret = hns3_uninit_all_ring(priv);
3306 	if (ret)
3307 		netdev_err(netdev, "uninit ring error\n");
3308 
3309 	priv->ring_data = NULL;
3310 
3311 	return ret;
3312 }
3313 
3314 static int hns3_reset_notify(struct hnae3_handle *handle,
3315 			     enum hnae3_reset_notify_type type)
3316 {
3317 	int ret = 0;
3318 
3319 	switch (type) {
3320 	case HNAE3_UP_CLIENT:
3321 		ret = hns3_reset_notify_up_enet(handle);
3322 		break;
3323 	case HNAE3_DOWN_CLIENT:
3324 		ret = hns3_reset_notify_down_enet(handle);
3325 		break;
3326 	case HNAE3_INIT_CLIENT:
3327 		ret = hns3_reset_notify_init_enet(handle);
3328 		break;
3329 	case HNAE3_UNINIT_CLIENT:
3330 		ret = hns3_reset_notify_uninit_enet(handle);
3331 		break;
3332 	default:
3333 		break;
3334 	}
3335 
3336 	return ret;
3337 }
3338 
3339 static u16 hns3_get_max_available_channels(struct net_device *netdev)
3340 {
3341 	struct hnae3_handle *h = hns3_get_handle(netdev);
3342 	u16 free_tqps, max_rss_size, max_tqps;
3343 
3344 	h->ae_algo->ops->get_tqps_and_rss_info(h, &free_tqps, &max_rss_size);
3345 	max_tqps = h->kinfo.num_tc * max_rss_size;
3346 
3347 	return min_t(u16, max_tqps, (free_tqps + h->kinfo.num_tqps));
3348 }
3349 
3350 static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num)
3351 {
3352 	struct hns3_nic_priv *priv = netdev_priv(netdev);
3353 	struct hnae3_handle *h = hns3_get_handle(netdev);
3354 	int ret;
3355 
3356 	ret = h->ae_algo->ops->set_channels(h, new_tqp_num);
3357 	if (ret)
3358 		return ret;
3359 
3360 	ret = hns3_get_ring_config(priv);
3361 	if (ret)
3362 		return ret;
3363 
3364 	ret = hns3_nic_init_vector_data(priv);
3365 	if (ret)
3366 		goto err_uninit_vector;
3367 
3368 	ret = hns3_init_all_ring(priv);
3369 	if (ret)
3370 		goto err_put_ring;
3371 
3372 	return 0;
3373 
3374 err_put_ring:
3375 	hns3_put_ring_config(priv);
3376 err_uninit_vector:
3377 	hns3_nic_uninit_vector_data(priv);
3378 	return ret;
3379 }
3380 
3381 static int hns3_adjust_tqps_num(u8 num_tc, u32 new_tqp_num)
3382 {
3383 	return (new_tqp_num / num_tc) * num_tc;
3384 }
3385 
3386 int hns3_set_channels(struct net_device *netdev,
3387 		      struct ethtool_channels *ch)
3388 {
3389 	struct hns3_nic_priv *priv = netdev_priv(netdev);
3390 	struct hnae3_handle *h = hns3_get_handle(netdev);
3391 	struct hnae3_knic_private_info *kinfo = &h->kinfo;
3392 	bool if_running = netif_running(netdev);
3393 	u32 new_tqp_num = ch->combined_count;
3394 	u16 org_tqp_num;
3395 	int ret;
3396 
3397 	if (ch->rx_count || ch->tx_count)
3398 		return -EINVAL;
3399 
3400 	if (new_tqp_num > hns3_get_max_available_channels(netdev) ||
3401 	    new_tqp_num < kinfo->num_tc) {
3402 		dev_err(&netdev->dev,
3403 			"Change tqps fail, the tqp range is from %d to %d",
3404 			kinfo->num_tc,
3405 			hns3_get_max_available_channels(netdev));
3406 		return -EINVAL;
3407 	}
3408 
3409 	new_tqp_num = hns3_adjust_tqps_num(kinfo->num_tc, new_tqp_num);
3410 	if (kinfo->num_tqps == new_tqp_num)
3411 		return 0;
3412 
3413 	if (if_running)
3414 		dev_close(netdev);
3415 
3416 	hns3_clear_all_ring(h);
3417 
3418 	ret = hns3_nic_uninit_vector_data(priv);
3419 	if (ret) {
3420 		dev_err(&netdev->dev,
3421 			"Unbind vector with tqp fail, nothing is changed");
3422 		goto open_netdev;
3423 	}
3424 
3425 	hns3_uninit_all_ring(priv);
3426 
3427 	org_tqp_num = h->kinfo.num_tqps;
3428 	ret = hns3_modify_tqp_num(netdev, new_tqp_num);
3429 	if (ret) {
3430 		ret = hns3_modify_tqp_num(netdev, org_tqp_num);
3431 		if (ret) {
3432 			/* If revert to old tqp failed, fatal error occurred */
3433 			dev_err(&netdev->dev,
3434 				"Revert to old tqp num fail, ret=%d", ret);
3435 			return ret;
3436 		}
3437 		dev_info(&netdev->dev,
3438 			 "Change tqp num fail, Revert to old tqp num");
3439 	}
3440 
3441 open_netdev:
3442 	if (if_running)
3443 		dev_open(netdev);
3444 
3445 	return ret;
3446 }
3447 
3448 static const struct hnae3_client_ops client_ops = {
3449 	.init_instance = hns3_client_init,
3450 	.uninit_instance = hns3_client_uninit,
3451 	.link_status_change = hns3_link_status_change,
3452 	.setup_tc = hns3_client_setup_tc,
3453 	.reset_notify = hns3_reset_notify,
3454 };
3455 
3456 /* hns3_init_module - Driver registration routine
3457  * hns3_init_module is the first routine called when the driver is
3458  * loaded. All it does is register with the PCI subsystem.
3459  */
3460 static int __init hns3_init_module(void)
3461 {
3462 	int ret;
3463 
3464 	pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string);
3465 	pr_info("%s: %s\n", hns3_driver_name, hns3_copyright);
3466 
3467 	client.type = HNAE3_CLIENT_KNIC;
3468 	snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH - 1, "%s",
3469 		 hns3_driver_name);
3470 
3471 	client.ops = &client_ops;
3472 
3473 	ret = hnae3_register_client(&client);
3474 	if (ret)
3475 		return ret;
3476 
3477 	ret = pci_register_driver(&hns3_driver);
3478 	if (ret)
3479 		hnae3_unregister_client(&client);
3480 
3481 	return ret;
3482 }
3483 module_init(hns3_init_module);
3484 
3485 /* hns3_exit_module - Driver exit cleanup routine
3486  * hns3_exit_module is called just before the driver is removed
3487  * from memory.
3488  */
3489 static void __exit hns3_exit_module(void)
3490 {
3491 	pci_unregister_driver(&hns3_driver);
3492 	hnae3_unregister_client(&client);
3493 }
3494 module_exit(hns3_exit_module);
3495 
3496 MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver");
3497 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
3498 MODULE_LICENSE("GPL");
3499 MODULE_ALIAS("pci:hns-nic");
3500