1 /* 2 * Copyright (c) 2016~2017 Hisilicon Limited. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 */ 9 10 #include <linux/dma-mapping.h> 11 #include <linux/etherdevice.h> 12 #include <linux/interrupt.h> 13 #include <linux/if_vlan.h> 14 #include <linux/ip.h> 15 #include <linux/ipv6.h> 16 #include <linux/module.h> 17 #include <linux/pci.h> 18 #include <linux/skbuff.h> 19 #include <linux/sctp.h> 20 #include <linux/vermagic.h> 21 #include <net/gre.h> 22 #include <net/pkt_cls.h> 23 #include <net/vxlan.h> 24 25 #include "hnae3.h" 26 #include "hns3_enet.h" 27 28 static const char hns3_driver_name[] = "hns3"; 29 const char hns3_driver_version[] = VERMAGIC_STRING; 30 static const char hns3_driver_string[] = 31 "Hisilicon Ethernet Network Driver for Hip08 Family"; 32 static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation."; 33 static struct hnae3_client client; 34 35 /* hns3_pci_tbl - PCI Device ID Table 36 * 37 * Last entry must be all 0s 38 * 39 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 40 * Class, Class Mask, private data (not used) } 41 */ 42 static const struct pci_device_id hns3_pci_tbl[] = { 43 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0}, 44 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0}, 45 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 46 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 47 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 48 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 49 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 50 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 51 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 52 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 53 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 54 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 55 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0}, 56 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0}, 57 /* required last entry */ 58 {0, } 59 }; 60 MODULE_DEVICE_TABLE(pci, hns3_pci_tbl); 61 62 static irqreturn_t hns3_irq_handle(int irq, void *dev) 63 { 64 struct hns3_enet_tqp_vector *tqp_vector = dev; 65 66 napi_schedule(&tqp_vector->napi); 67 68 return IRQ_HANDLED; 69 } 70 71 static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv) 72 { 73 struct hns3_enet_tqp_vector *tqp_vectors; 74 unsigned int i; 75 76 for (i = 0; i < priv->vector_num; i++) { 77 tqp_vectors = &priv->tqp_vector[i]; 78 79 if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED) 80 continue; 81 82 /* release the irq resource */ 83 free_irq(tqp_vectors->vector_irq, tqp_vectors); 84 tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED; 85 } 86 } 87 88 static int hns3_nic_init_irq(struct hns3_nic_priv *priv) 89 { 90 struct hns3_enet_tqp_vector *tqp_vectors; 91 int txrx_int_idx = 0; 92 int rx_int_idx = 0; 93 int tx_int_idx = 0; 94 unsigned int i; 95 int ret; 96 97 for (i = 0; i < priv->vector_num; i++) { 98 tqp_vectors = &priv->tqp_vector[i]; 99 100 if (tqp_vectors->irq_init_flag == HNS3_VECTOR_INITED) 101 continue; 102 103 if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) { 104 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1, 105 "%s-%s-%d", priv->netdev->name, "TxRx", 106 txrx_int_idx++); 107 txrx_int_idx++; 108 } else if (tqp_vectors->rx_group.ring) { 109 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1, 110 "%s-%s-%d", priv->netdev->name, "Rx", 111 rx_int_idx++); 112 } else if (tqp_vectors->tx_group.ring) { 113 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1, 114 "%s-%s-%d", priv->netdev->name, "Tx", 115 tx_int_idx++); 116 } else { 117 /* Skip this unused q_vector */ 118 continue; 119 } 120 121 tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0'; 122 123 ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0, 124 tqp_vectors->name, 125 tqp_vectors); 126 if (ret) { 127 netdev_err(priv->netdev, "request irq(%d) fail\n", 128 tqp_vectors->vector_irq); 129 return ret; 130 } 131 132 tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED; 133 } 134 135 return 0; 136 } 137 138 static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector, 139 u32 mask_en) 140 { 141 writel(mask_en, tqp_vector->mask_addr); 142 } 143 144 static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector) 145 { 146 napi_enable(&tqp_vector->napi); 147 148 /* enable vector */ 149 hns3_mask_vector_irq(tqp_vector, 1); 150 } 151 152 static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector) 153 { 154 /* disable vector */ 155 hns3_mask_vector_irq(tqp_vector, 0); 156 157 disable_irq(tqp_vector->vector_irq); 158 napi_disable(&tqp_vector->napi); 159 } 160 161 void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector, 162 u32 rl_value) 163 { 164 u32 rl_reg = hns3_rl_usec_to_reg(rl_value); 165 166 /* this defines the configuration for RL (Interrupt Rate Limiter). 167 * Rl defines rate of interrupts i.e. number of interrupts-per-second 168 * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing 169 */ 170 171 if (rl_reg > 0 && !tqp_vector->tx_group.coal.gl_adapt_enable && 172 !tqp_vector->rx_group.coal.gl_adapt_enable) 173 /* According to the hardware, the range of rl_reg is 174 * 0-59 and the unit is 4. 175 */ 176 rl_reg |= HNS3_INT_RL_ENABLE_MASK; 177 178 writel(rl_reg, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET); 179 } 180 181 void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector, 182 u32 gl_value) 183 { 184 u32 rx_gl_reg = hns3_gl_usec_to_reg(gl_value); 185 186 writel(rx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET); 187 } 188 189 void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector, 190 u32 gl_value) 191 { 192 u32 tx_gl_reg = hns3_gl_usec_to_reg(gl_value); 193 194 writel(tx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET); 195 } 196 197 static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector, 198 struct hns3_nic_priv *priv) 199 { 200 struct hnae3_handle *h = priv->ae_handle; 201 202 /* initialize the configuration for interrupt coalescing. 203 * 1. GL (Interrupt Gap Limiter) 204 * 2. RL (Interrupt Rate Limiter) 205 */ 206 207 /* Default: enable interrupt coalescing self-adaptive and GL */ 208 tqp_vector->tx_group.coal.gl_adapt_enable = 1; 209 tqp_vector->rx_group.coal.gl_adapt_enable = 1; 210 211 tqp_vector->tx_group.coal.int_gl = HNS3_INT_GL_50K; 212 tqp_vector->rx_group.coal.int_gl = HNS3_INT_GL_50K; 213 214 /* Default: disable RL */ 215 h->kinfo.int_rl_setting = 0; 216 217 tqp_vector->int_adapt_down = HNS3_INT_ADAPT_DOWN_START; 218 tqp_vector->rx_group.coal.flow_level = HNS3_FLOW_LOW; 219 tqp_vector->tx_group.coal.flow_level = HNS3_FLOW_LOW; 220 } 221 222 static void hns3_vector_gl_rl_init_hw(struct hns3_enet_tqp_vector *tqp_vector, 223 struct hns3_nic_priv *priv) 224 { 225 struct hnae3_handle *h = priv->ae_handle; 226 227 hns3_set_vector_coalesce_tx_gl(tqp_vector, 228 tqp_vector->tx_group.coal.int_gl); 229 hns3_set_vector_coalesce_rx_gl(tqp_vector, 230 tqp_vector->rx_group.coal.int_gl); 231 hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting); 232 } 233 234 static int hns3_nic_set_real_num_queue(struct net_device *netdev) 235 { 236 struct hnae3_handle *h = hns3_get_handle(netdev); 237 struct hnae3_knic_private_info *kinfo = &h->kinfo; 238 unsigned int queue_size = kinfo->rss_size * kinfo->num_tc; 239 int ret; 240 241 ret = netif_set_real_num_tx_queues(netdev, queue_size); 242 if (ret) { 243 netdev_err(netdev, 244 "netif_set_real_num_tx_queues fail, ret=%d!\n", 245 ret); 246 return ret; 247 } 248 249 ret = netif_set_real_num_rx_queues(netdev, queue_size); 250 if (ret) { 251 netdev_err(netdev, 252 "netif_set_real_num_rx_queues fail, ret=%d!\n", ret); 253 return ret; 254 } 255 256 return 0; 257 } 258 259 static u16 hns3_get_max_available_channels(struct hnae3_handle *h) 260 { 261 u16 free_tqps, max_rss_size, max_tqps; 262 263 h->ae_algo->ops->get_tqps_and_rss_info(h, &free_tqps, &max_rss_size); 264 max_tqps = h->kinfo.num_tc * max_rss_size; 265 266 return min_t(u16, max_tqps, (free_tqps + h->kinfo.num_tqps)); 267 } 268 269 static int hns3_nic_net_up(struct net_device *netdev) 270 { 271 struct hns3_nic_priv *priv = netdev_priv(netdev); 272 struct hnae3_handle *h = priv->ae_handle; 273 int i, j; 274 int ret; 275 276 /* get irq resource for all vectors */ 277 ret = hns3_nic_init_irq(priv); 278 if (ret) { 279 netdev_err(netdev, "hns init irq failed! ret=%d\n", ret); 280 return ret; 281 } 282 283 /* enable the vectors */ 284 for (i = 0; i < priv->vector_num; i++) 285 hns3_vector_enable(&priv->tqp_vector[i]); 286 287 /* start the ae_dev */ 288 ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0; 289 if (ret) 290 goto out_start_err; 291 292 clear_bit(HNS3_NIC_STATE_DOWN, &priv->state); 293 294 return 0; 295 296 out_start_err: 297 for (j = i - 1; j >= 0; j--) 298 hns3_vector_disable(&priv->tqp_vector[j]); 299 300 hns3_nic_uninit_irq(priv); 301 302 return ret; 303 } 304 305 static int hns3_nic_net_open(struct net_device *netdev) 306 { 307 struct hns3_nic_priv *priv = netdev_priv(netdev); 308 int ret; 309 310 netif_carrier_off(netdev); 311 312 ret = hns3_nic_set_real_num_queue(netdev); 313 if (ret) 314 return ret; 315 316 ret = hns3_nic_net_up(netdev); 317 if (ret) { 318 netdev_err(netdev, 319 "hns net up fail, ret=%d!\n", ret); 320 return ret; 321 } 322 323 priv->ae_handle->last_reset_time = jiffies; 324 return 0; 325 } 326 327 static void hns3_nic_net_down(struct net_device *netdev) 328 { 329 struct hns3_nic_priv *priv = netdev_priv(netdev); 330 const struct hnae3_ae_ops *ops; 331 int i; 332 333 if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state)) 334 return; 335 336 /* stop ae_dev */ 337 ops = priv->ae_handle->ae_algo->ops; 338 if (ops->stop) 339 ops->stop(priv->ae_handle); 340 341 /* disable vectors */ 342 for (i = 0; i < priv->vector_num; i++) 343 hns3_vector_disable(&priv->tqp_vector[i]); 344 345 /* free irq resources */ 346 hns3_nic_uninit_irq(priv); 347 } 348 349 static int hns3_nic_net_stop(struct net_device *netdev) 350 { 351 netif_tx_stop_all_queues(netdev); 352 netif_carrier_off(netdev); 353 354 hns3_nic_net_down(netdev); 355 356 return 0; 357 } 358 359 static int hns3_nic_uc_sync(struct net_device *netdev, 360 const unsigned char *addr) 361 { 362 struct hnae3_handle *h = hns3_get_handle(netdev); 363 364 if (h->ae_algo->ops->add_uc_addr) 365 return h->ae_algo->ops->add_uc_addr(h, addr); 366 367 return 0; 368 } 369 370 static int hns3_nic_uc_unsync(struct net_device *netdev, 371 const unsigned char *addr) 372 { 373 struct hnae3_handle *h = hns3_get_handle(netdev); 374 375 if (h->ae_algo->ops->rm_uc_addr) 376 return h->ae_algo->ops->rm_uc_addr(h, addr); 377 378 return 0; 379 } 380 381 static int hns3_nic_mc_sync(struct net_device *netdev, 382 const unsigned char *addr) 383 { 384 struct hnae3_handle *h = hns3_get_handle(netdev); 385 386 if (h->ae_algo->ops->add_mc_addr) 387 return h->ae_algo->ops->add_mc_addr(h, addr); 388 389 return 0; 390 } 391 392 static int hns3_nic_mc_unsync(struct net_device *netdev, 393 const unsigned char *addr) 394 { 395 struct hnae3_handle *h = hns3_get_handle(netdev); 396 397 if (h->ae_algo->ops->rm_mc_addr) 398 return h->ae_algo->ops->rm_mc_addr(h, addr); 399 400 return 0; 401 } 402 403 static void hns3_nic_set_rx_mode(struct net_device *netdev) 404 { 405 struct hnae3_handle *h = hns3_get_handle(netdev); 406 407 if (h->ae_algo->ops->set_promisc_mode) { 408 if (netdev->flags & IFF_PROMISC) 409 h->ae_algo->ops->set_promisc_mode(h, 1); 410 else 411 h->ae_algo->ops->set_promisc_mode(h, 0); 412 } 413 if (__dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync)) 414 netdev_err(netdev, "sync uc address fail\n"); 415 if (netdev->flags & IFF_MULTICAST) 416 if (__dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync)) 417 netdev_err(netdev, "sync mc address fail\n"); 418 } 419 420 static int hns3_set_tso(struct sk_buff *skb, u32 *paylen, 421 u16 *mss, u32 *type_cs_vlan_tso) 422 { 423 u32 l4_offset, hdr_len; 424 union l3_hdr_info l3; 425 union l4_hdr_info l4; 426 u32 l4_paylen; 427 int ret; 428 429 if (!skb_is_gso(skb)) 430 return 0; 431 432 ret = skb_cow_head(skb, 0); 433 if (ret) 434 return ret; 435 436 l3.hdr = skb_network_header(skb); 437 l4.hdr = skb_transport_header(skb); 438 439 /* Software should clear the IPv4's checksum field when tso is 440 * needed. 441 */ 442 if (l3.v4->version == 4) 443 l3.v4->check = 0; 444 445 /* tunnel packet.*/ 446 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | 447 SKB_GSO_GRE_CSUM | 448 SKB_GSO_UDP_TUNNEL | 449 SKB_GSO_UDP_TUNNEL_CSUM)) { 450 if ((!(skb_shinfo(skb)->gso_type & 451 SKB_GSO_PARTIAL)) && 452 (skb_shinfo(skb)->gso_type & 453 SKB_GSO_UDP_TUNNEL_CSUM)) { 454 /* Software should clear the udp's checksum 455 * field when tso is needed. 456 */ 457 l4.udp->check = 0; 458 } 459 /* reset l3&l4 pointers from outer to inner headers */ 460 l3.hdr = skb_inner_network_header(skb); 461 l4.hdr = skb_inner_transport_header(skb); 462 463 /* Software should clear the IPv4's checksum field when 464 * tso is needed. 465 */ 466 if (l3.v4->version == 4) 467 l3.v4->check = 0; 468 } 469 470 /* normal or tunnel packet*/ 471 l4_offset = l4.hdr - skb->data; 472 hdr_len = (l4.tcp->doff * 4) + l4_offset; 473 474 /* remove payload length from inner pseudo checksum when tso*/ 475 l4_paylen = skb->len - l4_offset; 476 csum_replace_by_diff(&l4.tcp->check, 477 (__force __wsum)htonl(l4_paylen)); 478 479 /* find the txbd field values */ 480 *paylen = skb->len - hdr_len; 481 hnae_set_bit(*type_cs_vlan_tso, 482 HNS3_TXD_TSO_B, 1); 483 484 /* get MSS for TSO */ 485 *mss = skb_shinfo(skb)->gso_size; 486 487 return 0; 488 } 489 490 static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto, 491 u8 *il4_proto) 492 { 493 union { 494 struct iphdr *v4; 495 struct ipv6hdr *v6; 496 unsigned char *hdr; 497 } l3; 498 unsigned char *l4_hdr; 499 unsigned char *exthdr; 500 u8 l4_proto_tmp; 501 __be16 frag_off; 502 503 /* find outer header point */ 504 l3.hdr = skb_network_header(skb); 505 l4_hdr = skb_transport_header(skb); 506 507 if (skb->protocol == htons(ETH_P_IPV6)) { 508 exthdr = l3.hdr + sizeof(*l3.v6); 509 l4_proto_tmp = l3.v6->nexthdr; 510 if (l4_hdr != exthdr) 511 ipv6_skip_exthdr(skb, exthdr - skb->data, 512 &l4_proto_tmp, &frag_off); 513 } else if (skb->protocol == htons(ETH_P_IP)) { 514 l4_proto_tmp = l3.v4->protocol; 515 } else { 516 return -EINVAL; 517 } 518 519 *ol4_proto = l4_proto_tmp; 520 521 /* tunnel packet */ 522 if (!skb->encapsulation) { 523 *il4_proto = 0; 524 return 0; 525 } 526 527 /* find inner header point */ 528 l3.hdr = skb_inner_network_header(skb); 529 l4_hdr = skb_inner_transport_header(skb); 530 531 if (l3.v6->version == 6) { 532 exthdr = l3.hdr + sizeof(*l3.v6); 533 l4_proto_tmp = l3.v6->nexthdr; 534 if (l4_hdr != exthdr) 535 ipv6_skip_exthdr(skb, exthdr - skb->data, 536 &l4_proto_tmp, &frag_off); 537 } else if (l3.v4->version == 4) { 538 l4_proto_tmp = l3.v4->protocol; 539 } 540 541 *il4_proto = l4_proto_tmp; 542 543 return 0; 544 } 545 546 static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto, 547 u8 il4_proto, u32 *type_cs_vlan_tso, 548 u32 *ol_type_vlan_len_msec) 549 { 550 union { 551 struct iphdr *v4; 552 struct ipv6hdr *v6; 553 unsigned char *hdr; 554 } l3; 555 union { 556 struct tcphdr *tcp; 557 struct udphdr *udp; 558 struct gre_base_hdr *gre; 559 unsigned char *hdr; 560 } l4; 561 unsigned char *l2_hdr; 562 u8 l4_proto = ol4_proto; 563 u32 ol2_len; 564 u32 ol3_len; 565 u32 ol4_len; 566 u32 l2_len; 567 u32 l3_len; 568 569 l3.hdr = skb_network_header(skb); 570 l4.hdr = skb_transport_header(skb); 571 572 /* compute L2 header size for normal packet, defined in 2 Bytes */ 573 l2_len = l3.hdr - skb->data; 574 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M, 575 HNS3_TXD_L2LEN_S, l2_len >> 1); 576 577 /* tunnel packet*/ 578 if (skb->encapsulation) { 579 /* compute OL2 header size, defined in 2 Bytes */ 580 ol2_len = l2_len; 581 hnae_set_field(*ol_type_vlan_len_msec, 582 HNS3_TXD_L2LEN_M, 583 HNS3_TXD_L2LEN_S, ol2_len >> 1); 584 585 /* compute OL3 header size, defined in 4 Bytes */ 586 ol3_len = l4.hdr - l3.hdr; 587 hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M, 588 HNS3_TXD_L3LEN_S, ol3_len >> 2); 589 590 /* MAC in UDP, MAC in GRE (0x6558)*/ 591 if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) { 592 /* switch MAC header ptr from outer to inner header.*/ 593 l2_hdr = skb_inner_mac_header(skb); 594 595 /* compute OL4 header size, defined in 4 Bytes. */ 596 ol4_len = l2_hdr - l4.hdr; 597 hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_M, 598 HNS3_TXD_L4LEN_S, ol4_len >> 2); 599 600 /* switch IP header ptr from outer to inner header */ 601 l3.hdr = skb_inner_network_header(skb); 602 603 /* compute inner l2 header size, defined in 2 Bytes. */ 604 l2_len = l3.hdr - l2_hdr; 605 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M, 606 HNS3_TXD_L2LEN_S, l2_len >> 1); 607 } else { 608 /* skb packet types not supported by hardware, 609 * txbd len fild doesn't be filled. 610 */ 611 return; 612 } 613 614 /* switch L4 header pointer from outer to inner */ 615 l4.hdr = skb_inner_transport_header(skb); 616 617 l4_proto = il4_proto; 618 } 619 620 /* compute inner(/normal) L3 header size, defined in 4 Bytes */ 621 l3_len = l4.hdr - l3.hdr; 622 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M, 623 HNS3_TXD_L3LEN_S, l3_len >> 2); 624 625 /* compute inner(/normal) L4 header size, defined in 4 Bytes */ 626 switch (l4_proto) { 627 case IPPROTO_TCP: 628 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, 629 HNS3_TXD_L4LEN_S, l4.tcp->doff); 630 break; 631 case IPPROTO_SCTP: 632 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, 633 HNS3_TXD_L4LEN_S, (sizeof(struct sctphdr) >> 2)); 634 break; 635 case IPPROTO_UDP: 636 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, 637 HNS3_TXD_L4LEN_S, (sizeof(struct udphdr) >> 2)); 638 break; 639 default: 640 /* skb packet types not supported by hardware, 641 * txbd len fild doesn't be filled. 642 */ 643 return; 644 } 645 } 646 647 static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, 648 u8 il4_proto, u32 *type_cs_vlan_tso, 649 u32 *ol_type_vlan_len_msec) 650 { 651 union { 652 struct iphdr *v4; 653 struct ipv6hdr *v6; 654 unsigned char *hdr; 655 } l3; 656 u32 l4_proto = ol4_proto; 657 658 l3.hdr = skb_network_header(skb); 659 660 /* define OL3 type and tunnel type(OL4).*/ 661 if (skb->encapsulation) { 662 /* define outer network header type.*/ 663 if (skb->protocol == htons(ETH_P_IP)) { 664 if (skb_is_gso(skb)) 665 hnae_set_field(*ol_type_vlan_len_msec, 666 HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S, 667 HNS3_OL3T_IPV4_CSUM); 668 else 669 hnae_set_field(*ol_type_vlan_len_msec, 670 HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S, 671 HNS3_OL3T_IPV4_NO_CSUM); 672 673 } else if (skb->protocol == htons(ETH_P_IPV6)) { 674 hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M, 675 HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6); 676 } 677 678 /* define tunnel type(OL4).*/ 679 switch (l4_proto) { 680 case IPPROTO_UDP: 681 hnae_set_field(*ol_type_vlan_len_msec, 682 HNS3_TXD_TUNTYPE_M, 683 HNS3_TXD_TUNTYPE_S, 684 HNS3_TUN_MAC_IN_UDP); 685 break; 686 case IPPROTO_GRE: 687 hnae_set_field(*ol_type_vlan_len_msec, 688 HNS3_TXD_TUNTYPE_M, 689 HNS3_TXD_TUNTYPE_S, 690 HNS3_TUN_NVGRE); 691 break; 692 default: 693 /* drop the skb tunnel packet if hardware don't support, 694 * because hardware can't calculate csum when TSO. 695 */ 696 if (skb_is_gso(skb)) 697 return -EDOM; 698 699 /* the stack computes the IP header already, 700 * driver calculate l4 checksum when not TSO. 701 */ 702 skb_checksum_help(skb); 703 return 0; 704 } 705 706 l3.hdr = skb_inner_network_header(skb); 707 l4_proto = il4_proto; 708 } 709 710 if (l3.v4->version == 4) { 711 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M, 712 HNS3_TXD_L3T_S, HNS3_L3T_IPV4); 713 714 /* the stack computes the IP header already, the only time we 715 * need the hardware to recompute it is in the case of TSO. 716 */ 717 if (skb_is_gso(skb)) 718 hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1); 719 720 hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); 721 } else if (l3.v6->version == 6) { 722 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M, 723 HNS3_TXD_L3T_S, HNS3_L3T_IPV6); 724 hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); 725 } 726 727 switch (l4_proto) { 728 case IPPROTO_TCP: 729 hnae_set_field(*type_cs_vlan_tso, 730 HNS3_TXD_L4T_M, 731 HNS3_TXD_L4T_S, 732 HNS3_L4T_TCP); 733 break; 734 case IPPROTO_UDP: 735 hnae_set_field(*type_cs_vlan_tso, 736 HNS3_TXD_L4T_M, 737 HNS3_TXD_L4T_S, 738 HNS3_L4T_UDP); 739 break; 740 case IPPROTO_SCTP: 741 hnae_set_field(*type_cs_vlan_tso, 742 HNS3_TXD_L4T_M, 743 HNS3_TXD_L4T_S, 744 HNS3_L4T_SCTP); 745 break; 746 default: 747 /* drop the skb tunnel packet if hardware don't support, 748 * because hardware can't calculate csum when TSO. 749 */ 750 if (skb_is_gso(skb)) 751 return -EDOM; 752 753 /* the stack computes the IP header already, 754 * driver calculate l4 checksum when not TSO. 755 */ 756 skb_checksum_help(skb); 757 return 0; 758 } 759 760 return 0; 761 } 762 763 static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end) 764 { 765 /* Config bd buffer end */ 766 hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M, 767 HNS3_TXD_BDTYPE_S, 0); 768 hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end); 769 hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1); 770 hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0); 771 } 772 773 static int hns3_fill_desc_vtags(struct sk_buff *skb, 774 struct hns3_enet_ring *tx_ring, 775 u32 *inner_vlan_flag, 776 u32 *out_vlan_flag, 777 u16 *inner_vtag, 778 u16 *out_vtag) 779 { 780 #define HNS3_TX_VLAN_PRIO_SHIFT 13 781 782 if (skb->protocol == htons(ETH_P_8021Q) && 783 !(tx_ring->tqp->handle->kinfo.netdev->features & 784 NETIF_F_HW_VLAN_CTAG_TX)) { 785 /* When HW VLAN acceleration is turned off, and the stack 786 * sets the protocol to 802.1q, the driver just need to 787 * set the protocol to the encapsulated ethertype. 788 */ 789 skb->protocol = vlan_get_protocol(skb); 790 return 0; 791 } 792 793 if (skb_vlan_tag_present(skb)) { 794 u16 vlan_tag; 795 796 vlan_tag = skb_vlan_tag_get(skb); 797 vlan_tag |= (skb->priority & 0x7) << HNS3_TX_VLAN_PRIO_SHIFT; 798 799 /* Based on hw strategy, use out_vtag in two layer tag case, 800 * and use inner_vtag in one tag case. 801 */ 802 if (skb->protocol == htons(ETH_P_8021Q)) { 803 hnae_set_bit(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1); 804 *out_vtag = vlan_tag; 805 } else { 806 hnae_set_bit(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1); 807 *inner_vtag = vlan_tag; 808 } 809 } else if (skb->protocol == htons(ETH_P_8021Q)) { 810 struct vlan_ethhdr *vhdr; 811 int rc; 812 813 rc = skb_cow_head(skb, 0); 814 if (rc < 0) 815 return rc; 816 vhdr = (struct vlan_ethhdr *)skb->data; 817 vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority & 0x7) 818 << HNS3_TX_VLAN_PRIO_SHIFT); 819 } 820 821 skb->protocol = vlan_get_protocol(skb); 822 return 0; 823 } 824 825 static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, 826 int size, dma_addr_t dma, int frag_end, 827 enum hns_desc_type type) 828 { 829 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; 830 struct hns3_desc *desc = &ring->desc[ring->next_to_use]; 831 u32 ol_type_vlan_len_msec = 0; 832 u16 bdtp_fe_sc_vld_ra_ri = 0; 833 u32 type_cs_vlan_tso = 0; 834 struct sk_buff *skb; 835 u16 inner_vtag = 0; 836 u16 out_vtag = 0; 837 u32 paylen = 0; 838 u16 mss = 0; 839 __be16 protocol; 840 u8 ol4_proto; 841 u8 il4_proto; 842 int ret; 843 844 /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */ 845 desc_cb->priv = priv; 846 desc_cb->length = size; 847 desc_cb->dma = dma; 848 desc_cb->type = type; 849 850 /* now, fill the descriptor */ 851 desc->addr = cpu_to_le64(dma); 852 desc->tx.send_size = cpu_to_le16((u16)size); 853 hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end); 854 desc->tx.bdtp_fe_sc_vld_ra_ri = cpu_to_le16(bdtp_fe_sc_vld_ra_ri); 855 856 if (type == DESC_TYPE_SKB) { 857 skb = (struct sk_buff *)priv; 858 paylen = skb->len; 859 860 ret = hns3_fill_desc_vtags(skb, ring, &type_cs_vlan_tso, 861 &ol_type_vlan_len_msec, 862 &inner_vtag, &out_vtag); 863 if (unlikely(ret)) 864 return ret; 865 866 if (skb->ip_summed == CHECKSUM_PARTIAL) { 867 skb_reset_mac_len(skb); 868 protocol = skb->protocol; 869 870 ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto); 871 if (ret) 872 return ret; 873 hns3_set_l2l3l4_len(skb, ol4_proto, il4_proto, 874 &type_cs_vlan_tso, 875 &ol_type_vlan_len_msec); 876 ret = hns3_set_l3l4_type_csum(skb, ol4_proto, il4_proto, 877 &type_cs_vlan_tso, 878 &ol_type_vlan_len_msec); 879 if (ret) 880 return ret; 881 882 ret = hns3_set_tso(skb, &paylen, &mss, 883 &type_cs_vlan_tso); 884 if (ret) 885 return ret; 886 } 887 888 /* Set txbd */ 889 desc->tx.ol_type_vlan_len_msec = 890 cpu_to_le32(ol_type_vlan_len_msec); 891 desc->tx.type_cs_vlan_tso_len = 892 cpu_to_le32(type_cs_vlan_tso); 893 desc->tx.paylen = cpu_to_le32(paylen); 894 desc->tx.mss = cpu_to_le16(mss); 895 desc->tx.vlan_tag = cpu_to_le16(inner_vtag); 896 desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag); 897 } 898 899 /* move ring pointer to next.*/ 900 ring_ptr_move_fw(ring, next_to_use); 901 902 return 0; 903 } 904 905 static int hns3_fill_desc_tso(struct hns3_enet_ring *ring, void *priv, 906 int size, dma_addr_t dma, int frag_end, 907 enum hns_desc_type type) 908 { 909 unsigned int frag_buf_num; 910 unsigned int k; 911 int sizeoflast; 912 int ret; 913 914 frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE; 915 sizeoflast = size % HNS3_MAX_BD_SIZE; 916 sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE; 917 918 /* When the frag size is bigger than hardware, split this frag */ 919 for (k = 0; k < frag_buf_num; k++) { 920 ret = hns3_fill_desc(ring, priv, 921 (k == frag_buf_num - 1) ? 922 sizeoflast : HNS3_MAX_BD_SIZE, 923 dma + HNS3_MAX_BD_SIZE * k, 924 frag_end && (k == frag_buf_num - 1) ? 1 : 0, 925 (type == DESC_TYPE_SKB && !k) ? 926 DESC_TYPE_SKB : DESC_TYPE_PAGE); 927 if (ret) 928 return ret; 929 } 930 931 return 0; 932 } 933 934 static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum, 935 struct hns3_enet_ring *ring) 936 { 937 struct sk_buff *skb = *out_skb; 938 struct skb_frag_struct *frag; 939 int bdnum_for_frag; 940 int frag_num; 941 int buf_num; 942 int size; 943 int i; 944 945 size = skb_headlen(skb); 946 buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE; 947 948 frag_num = skb_shinfo(skb)->nr_frags; 949 for (i = 0; i < frag_num; i++) { 950 frag = &skb_shinfo(skb)->frags[i]; 951 size = skb_frag_size(frag); 952 bdnum_for_frag = 953 (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE; 954 if (bdnum_for_frag > HNS3_MAX_BD_PER_FRAG) 955 return -ENOMEM; 956 957 buf_num += bdnum_for_frag; 958 } 959 960 if (buf_num > ring_space(ring)) 961 return -EBUSY; 962 963 *bnum = buf_num; 964 return 0; 965 } 966 967 static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum, 968 struct hns3_enet_ring *ring) 969 { 970 struct sk_buff *skb = *out_skb; 971 int buf_num; 972 973 /* No. of segments (plus a header) */ 974 buf_num = skb_shinfo(skb)->nr_frags + 1; 975 976 if (buf_num > ring_space(ring)) 977 return -EBUSY; 978 979 *bnum = buf_num; 980 981 return 0; 982 } 983 984 static void hns_nic_dma_unmap(struct hns3_enet_ring *ring, int next_to_use_orig) 985 { 986 struct device *dev = ring_to_dev(ring); 987 unsigned int i; 988 989 for (i = 0; i < ring->desc_num; i++) { 990 /* check if this is where we started */ 991 if (ring->next_to_use == next_to_use_orig) 992 break; 993 994 /* unmap the descriptor dma address */ 995 if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB) 996 dma_unmap_single(dev, 997 ring->desc_cb[ring->next_to_use].dma, 998 ring->desc_cb[ring->next_to_use].length, 999 DMA_TO_DEVICE); 1000 else 1001 dma_unmap_page(dev, 1002 ring->desc_cb[ring->next_to_use].dma, 1003 ring->desc_cb[ring->next_to_use].length, 1004 DMA_TO_DEVICE); 1005 1006 /* rollback one */ 1007 ring_ptr_move_bw(ring, next_to_use); 1008 } 1009 } 1010 1011 netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) 1012 { 1013 struct hns3_nic_priv *priv = netdev_priv(netdev); 1014 struct hns3_nic_ring_data *ring_data = 1015 &tx_ring_data(priv, skb->queue_mapping); 1016 struct hns3_enet_ring *ring = ring_data->ring; 1017 struct device *dev = priv->dev; 1018 struct netdev_queue *dev_queue; 1019 struct skb_frag_struct *frag; 1020 int next_to_use_head; 1021 int next_to_use_frag; 1022 dma_addr_t dma; 1023 int buf_num; 1024 int seg_num; 1025 int size; 1026 int ret; 1027 int i; 1028 1029 /* Prefetch the data used later */ 1030 prefetch(skb->data); 1031 1032 switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) { 1033 case -EBUSY: 1034 u64_stats_update_begin(&ring->syncp); 1035 ring->stats.tx_busy++; 1036 u64_stats_update_end(&ring->syncp); 1037 1038 goto out_net_tx_busy; 1039 case -ENOMEM: 1040 u64_stats_update_begin(&ring->syncp); 1041 ring->stats.sw_err_cnt++; 1042 u64_stats_update_end(&ring->syncp); 1043 netdev_err(netdev, "no memory to xmit!\n"); 1044 1045 goto out_err_tx_ok; 1046 default: 1047 break; 1048 } 1049 1050 /* No. of segments (plus a header) */ 1051 seg_num = skb_shinfo(skb)->nr_frags + 1; 1052 /* Fill the first part */ 1053 size = skb_headlen(skb); 1054 1055 next_to_use_head = ring->next_to_use; 1056 1057 dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); 1058 if (dma_mapping_error(dev, dma)) { 1059 netdev_err(netdev, "TX head DMA map failed\n"); 1060 ring->stats.sw_err_cnt++; 1061 goto out_err_tx_ok; 1062 } 1063 1064 ret = priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0, 1065 DESC_TYPE_SKB); 1066 if (ret) 1067 goto head_dma_map_err; 1068 1069 next_to_use_frag = ring->next_to_use; 1070 /* Fill the fragments */ 1071 for (i = 1; i < seg_num; i++) { 1072 frag = &skb_shinfo(skb)->frags[i - 1]; 1073 size = skb_frag_size(frag); 1074 dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE); 1075 if (dma_mapping_error(dev, dma)) { 1076 netdev_err(netdev, "TX frag(%d) DMA map failed\n", i); 1077 ring->stats.sw_err_cnt++; 1078 goto frag_dma_map_err; 1079 } 1080 ret = priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma, 1081 seg_num - 1 == i ? 1 : 0, 1082 DESC_TYPE_PAGE); 1083 1084 if (ret) 1085 goto frag_dma_map_err; 1086 } 1087 1088 /* Complete translate all packets */ 1089 dev_queue = netdev_get_tx_queue(netdev, ring_data->queue_index); 1090 netdev_tx_sent_queue(dev_queue, skb->len); 1091 1092 wmb(); /* Commit all data before submit */ 1093 1094 hnae_queue_xmit(ring->tqp, buf_num); 1095 1096 return NETDEV_TX_OK; 1097 1098 frag_dma_map_err: 1099 hns_nic_dma_unmap(ring, next_to_use_frag); 1100 1101 head_dma_map_err: 1102 hns_nic_dma_unmap(ring, next_to_use_head); 1103 1104 out_err_tx_ok: 1105 dev_kfree_skb_any(skb); 1106 return NETDEV_TX_OK; 1107 1108 out_net_tx_busy: 1109 netif_stop_subqueue(netdev, ring_data->queue_index); 1110 smp_mb(); /* Commit all data before submit */ 1111 1112 return NETDEV_TX_BUSY; 1113 } 1114 1115 static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p) 1116 { 1117 struct hnae3_handle *h = hns3_get_handle(netdev); 1118 struct sockaddr *mac_addr = p; 1119 int ret; 1120 1121 if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data)) 1122 return -EADDRNOTAVAIL; 1123 1124 ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data, false); 1125 if (ret) { 1126 netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret); 1127 return ret; 1128 } 1129 1130 ether_addr_copy(netdev->dev_addr, mac_addr->sa_data); 1131 1132 return 0; 1133 } 1134 1135 static int hns3_nic_set_features(struct net_device *netdev, 1136 netdev_features_t features) 1137 { 1138 netdev_features_t changed = netdev->features ^ features; 1139 struct hns3_nic_priv *priv = netdev_priv(netdev); 1140 struct hnae3_handle *h = priv->ae_handle; 1141 int ret; 1142 1143 if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) { 1144 if (features & (NETIF_F_TSO | NETIF_F_TSO6)) { 1145 priv->ops.fill_desc = hns3_fill_desc_tso; 1146 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso; 1147 } else { 1148 priv->ops.fill_desc = hns3_fill_desc; 1149 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx; 1150 } 1151 } 1152 1153 if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) && 1154 h->ae_algo->ops->enable_vlan_filter) { 1155 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) 1156 h->ae_algo->ops->enable_vlan_filter(h, true); 1157 else 1158 h->ae_algo->ops->enable_vlan_filter(h, false); 1159 } 1160 1161 if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && 1162 h->ae_algo->ops->enable_hw_strip_rxvtag) { 1163 if (features & NETIF_F_HW_VLAN_CTAG_RX) 1164 ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, true); 1165 else 1166 ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, false); 1167 1168 if (ret) 1169 return ret; 1170 } 1171 1172 netdev->features = features; 1173 return 0; 1174 } 1175 1176 static void hns3_nic_get_stats64(struct net_device *netdev, 1177 struct rtnl_link_stats64 *stats) 1178 { 1179 struct hns3_nic_priv *priv = netdev_priv(netdev); 1180 int queue_num = priv->ae_handle->kinfo.num_tqps; 1181 struct hnae3_handle *handle = priv->ae_handle; 1182 struct hns3_enet_ring *ring; 1183 unsigned int start; 1184 unsigned int idx; 1185 u64 tx_bytes = 0; 1186 u64 rx_bytes = 0; 1187 u64 tx_pkts = 0; 1188 u64 rx_pkts = 0; 1189 u64 tx_drop = 0; 1190 u64 rx_drop = 0; 1191 1192 if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) 1193 return; 1194 1195 handle->ae_algo->ops->update_stats(handle, &netdev->stats); 1196 1197 for (idx = 0; idx < queue_num; idx++) { 1198 /* fetch the tx stats */ 1199 ring = priv->ring_data[idx].ring; 1200 do { 1201 start = u64_stats_fetch_begin_irq(&ring->syncp); 1202 tx_bytes += ring->stats.tx_bytes; 1203 tx_pkts += ring->stats.tx_pkts; 1204 tx_drop += ring->stats.tx_busy; 1205 tx_drop += ring->stats.sw_err_cnt; 1206 } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); 1207 1208 /* fetch the rx stats */ 1209 ring = priv->ring_data[idx + queue_num].ring; 1210 do { 1211 start = u64_stats_fetch_begin_irq(&ring->syncp); 1212 rx_bytes += ring->stats.rx_bytes; 1213 rx_pkts += ring->stats.rx_pkts; 1214 rx_drop += ring->stats.non_vld_descs; 1215 rx_drop += ring->stats.err_pkt_len; 1216 rx_drop += ring->stats.l2_err; 1217 } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); 1218 } 1219 1220 stats->tx_bytes = tx_bytes; 1221 stats->tx_packets = tx_pkts; 1222 stats->rx_bytes = rx_bytes; 1223 stats->rx_packets = rx_pkts; 1224 1225 stats->rx_errors = netdev->stats.rx_errors; 1226 stats->multicast = netdev->stats.multicast; 1227 stats->rx_length_errors = netdev->stats.rx_length_errors; 1228 stats->rx_crc_errors = netdev->stats.rx_crc_errors; 1229 stats->rx_missed_errors = netdev->stats.rx_missed_errors; 1230 1231 stats->tx_errors = netdev->stats.tx_errors; 1232 stats->rx_dropped = rx_drop + netdev->stats.rx_dropped; 1233 stats->tx_dropped = tx_drop + netdev->stats.tx_dropped; 1234 stats->collisions = netdev->stats.collisions; 1235 stats->rx_over_errors = netdev->stats.rx_over_errors; 1236 stats->rx_frame_errors = netdev->stats.rx_frame_errors; 1237 stats->rx_fifo_errors = netdev->stats.rx_fifo_errors; 1238 stats->tx_aborted_errors = netdev->stats.tx_aborted_errors; 1239 stats->tx_carrier_errors = netdev->stats.tx_carrier_errors; 1240 stats->tx_fifo_errors = netdev->stats.tx_fifo_errors; 1241 stats->tx_heartbeat_errors = netdev->stats.tx_heartbeat_errors; 1242 stats->tx_window_errors = netdev->stats.tx_window_errors; 1243 stats->rx_compressed = netdev->stats.rx_compressed; 1244 stats->tx_compressed = netdev->stats.tx_compressed; 1245 } 1246 1247 static int hns3_setup_tc(struct net_device *netdev, void *type_data) 1248 { 1249 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data; 1250 struct hnae3_handle *h = hns3_get_handle(netdev); 1251 struct hnae3_knic_private_info *kinfo = &h->kinfo; 1252 u8 *prio_tc = mqprio_qopt->qopt.prio_tc_map; 1253 u8 tc = mqprio_qopt->qopt.num_tc; 1254 u16 mode = mqprio_qopt->mode; 1255 u8 hw = mqprio_qopt->qopt.hw; 1256 bool if_running; 1257 unsigned int i; 1258 int ret; 1259 1260 if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS && 1261 mode == TC_MQPRIO_MODE_CHANNEL) || (!hw && tc == 0))) 1262 return -EOPNOTSUPP; 1263 1264 if (tc > HNAE3_MAX_TC) 1265 return -EINVAL; 1266 1267 if (!netdev) 1268 return -EINVAL; 1269 1270 if_running = netif_running(netdev); 1271 if (if_running) { 1272 hns3_nic_net_stop(netdev); 1273 msleep(100); 1274 } 1275 1276 ret = (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ? 1277 kinfo->dcb_ops->setup_tc(h, tc, prio_tc) : -EOPNOTSUPP; 1278 if (ret) 1279 goto out; 1280 1281 if (tc <= 1) { 1282 netdev_reset_tc(netdev); 1283 } else { 1284 ret = netdev_set_num_tc(netdev, tc); 1285 if (ret) 1286 goto out; 1287 1288 for (i = 0; i < HNAE3_MAX_TC; i++) { 1289 if (!kinfo->tc_info[i].enable) 1290 continue; 1291 1292 netdev_set_tc_queue(netdev, 1293 kinfo->tc_info[i].tc, 1294 kinfo->tc_info[i].tqp_count, 1295 kinfo->tc_info[i].tqp_offset); 1296 } 1297 } 1298 1299 ret = hns3_nic_set_real_num_queue(netdev); 1300 1301 out: 1302 if (if_running) 1303 hns3_nic_net_open(netdev); 1304 1305 return ret; 1306 } 1307 1308 static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type, 1309 void *type_data) 1310 { 1311 if (type != TC_SETUP_QDISC_MQPRIO) 1312 return -EOPNOTSUPP; 1313 1314 return hns3_setup_tc(dev, type_data); 1315 } 1316 1317 static int hns3_vlan_rx_add_vid(struct net_device *netdev, 1318 __be16 proto, u16 vid) 1319 { 1320 struct hnae3_handle *h = hns3_get_handle(netdev); 1321 struct hns3_nic_priv *priv = netdev_priv(netdev); 1322 int ret = -EIO; 1323 1324 if (h->ae_algo->ops->set_vlan_filter) 1325 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false); 1326 1327 if (!ret) 1328 set_bit(vid, priv->active_vlans); 1329 1330 return ret; 1331 } 1332 1333 static int hns3_vlan_rx_kill_vid(struct net_device *netdev, 1334 __be16 proto, u16 vid) 1335 { 1336 struct hnae3_handle *h = hns3_get_handle(netdev); 1337 struct hns3_nic_priv *priv = netdev_priv(netdev); 1338 int ret = -EIO; 1339 1340 if (h->ae_algo->ops->set_vlan_filter) 1341 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true); 1342 1343 if (!ret) 1344 clear_bit(vid, priv->active_vlans); 1345 1346 return ret; 1347 } 1348 1349 static void hns3_restore_vlan(struct net_device *netdev) 1350 { 1351 struct hns3_nic_priv *priv = netdev_priv(netdev); 1352 u16 vid; 1353 int ret; 1354 1355 for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) { 1356 ret = hns3_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid); 1357 if (ret) 1358 netdev_warn(netdev, "Restore vlan: %d filter, ret:%d\n", 1359 vid, ret); 1360 } 1361 } 1362 1363 static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, 1364 u8 qos, __be16 vlan_proto) 1365 { 1366 struct hnae3_handle *h = hns3_get_handle(netdev); 1367 int ret = -EIO; 1368 1369 if (h->ae_algo->ops->set_vf_vlan_filter) 1370 ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan, 1371 qos, vlan_proto); 1372 1373 return ret; 1374 } 1375 1376 static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu) 1377 { 1378 struct hnae3_handle *h = hns3_get_handle(netdev); 1379 bool if_running = netif_running(netdev); 1380 int ret; 1381 1382 if (!h->ae_algo->ops->set_mtu) 1383 return -EOPNOTSUPP; 1384 1385 /* if this was called with netdev up then bring netdevice down */ 1386 if (if_running) { 1387 (void)hns3_nic_net_stop(netdev); 1388 msleep(100); 1389 } 1390 1391 ret = h->ae_algo->ops->set_mtu(h, new_mtu); 1392 if (ret) { 1393 netdev_err(netdev, "failed to change MTU in hardware %d\n", 1394 ret); 1395 return ret; 1396 } 1397 1398 netdev->mtu = new_mtu; 1399 1400 /* if the netdev was running earlier, bring it up again */ 1401 if (if_running && hns3_nic_net_open(netdev)) 1402 ret = -EINVAL; 1403 1404 return ret; 1405 } 1406 1407 static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) 1408 { 1409 struct hns3_nic_priv *priv = netdev_priv(ndev); 1410 struct hns3_enet_ring *tx_ring = NULL; 1411 int timeout_queue = 0; 1412 int hw_head, hw_tail; 1413 int i; 1414 1415 /* Find the stopped queue the same way the stack does */ 1416 for (i = 0; i < ndev->real_num_tx_queues; i++) { 1417 struct netdev_queue *q; 1418 unsigned long trans_start; 1419 1420 q = netdev_get_tx_queue(ndev, i); 1421 trans_start = q->trans_start; 1422 if (netif_xmit_stopped(q) && 1423 time_after(jiffies, 1424 (trans_start + ndev->watchdog_timeo))) { 1425 timeout_queue = i; 1426 break; 1427 } 1428 } 1429 1430 if (i == ndev->num_tx_queues) { 1431 netdev_info(ndev, 1432 "no netdev TX timeout queue found, timeout count: %llu\n", 1433 priv->tx_timeout_count); 1434 return false; 1435 } 1436 1437 tx_ring = priv->ring_data[timeout_queue].ring; 1438 1439 hw_head = readl_relaxed(tx_ring->tqp->io_base + 1440 HNS3_RING_TX_RING_HEAD_REG); 1441 hw_tail = readl_relaxed(tx_ring->tqp->io_base + 1442 HNS3_RING_TX_RING_TAIL_REG); 1443 netdev_info(ndev, 1444 "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, HW_HEAD: 0x%x, HW_TAIL: 0x%x, INT: 0x%x\n", 1445 priv->tx_timeout_count, 1446 timeout_queue, 1447 tx_ring->next_to_use, 1448 tx_ring->next_to_clean, 1449 hw_head, 1450 hw_tail, 1451 readl(tx_ring->tqp_vector->mask_addr)); 1452 1453 return true; 1454 } 1455 1456 static void hns3_nic_net_timeout(struct net_device *ndev) 1457 { 1458 struct hns3_nic_priv *priv = netdev_priv(ndev); 1459 struct hnae3_handle *h = priv->ae_handle; 1460 1461 if (!hns3_get_tx_timeo_queue_info(ndev)) 1462 return; 1463 1464 priv->tx_timeout_count++; 1465 1466 if (time_before(jiffies, (h->last_reset_time + ndev->watchdog_timeo))) 1467 return; 1468 1469 /* request the reset */ 1470 if (h->ae_algo->ops->reset_event) 1471 h->ae_algo->ops->reset_event(h); 1472 } 1473 1474 static const struct net_device_ops hns3_nic_netdev_ops = { 1475 .ndo_open = hns3_nic_net_open, 1476 .ndo_stop = hns3_nic_net_stop, 1477 .ndo_start_xmit = hns3_nic_net_xmit, 1478 .ndo_tx_timeout = hns3_nic_net_timeout, 1479 .ndo_set_mac_address = hns3_nic_net_set_mac_address, 1480 .ndo_change_mtu = hns3_nic_change_mtu, 1481 .ndo_set_features = hns3_nic_set_features, 1482 .ndo_get_stats64 = hns3_nic_get_stats64, 1483 .ndo_setup_tc = hns3_nic_setup_tc, 1484 .ndo_set_rx_mode = hns3_nic_set_rx_mode, 1485 .ndo_vlan_rx_add_vid = hns3_vlan_rx_add_vid, 1486 .ndo_vlan_rx_kill_vid = hns3_vlan_rx_kill_vid, 1487 .ndo_set_vf_vlan = hns3_ndo_set_vf_vlan, 1488 }; 1489 1490 static bool hns3_is_phys_func(struct pci_dev *pdev) 1491 { 1492 u32 dev_id = pdev->device; 1493 1494 switch (dev_id) { 1495 case HNAE3_DEV_ID_GE: 1496 case HNAE3_DEV_ID_25GE: 1497 case HNAE3_DEV_ID_25GE_RDMA: 1498 case HNAE3_DEV_ID_25GE_RDMA_MACSEC: 1499 case HNAE3_DEV_ID_50GE_RDMA: 1500 case HNAE3_DEV_ID_50GE_RDMA_MACSEC: 1501 case HNAE3_DEV_ID_100G_RDMA_MACSEC: 1502 return true; 1503 case HNAE3_DEV_ID_100G_VF: 1504 case HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF: 1505 return false; 1506 default: 1507 dev_warn(&pdev->dev, "un-recognized pci device-id %d", 1508 dev_id); 1509 } 1510 1511 return false; 1512 } 1513 1514 static void hns3_disable_sriov(struct pci_dev *pdev) 1515 { 1516 /* If our VFs are assigned we cannot shut down SR-IOV 1517 * without causing issues, so just leave the hardware 1518 * available but disabled 1519 */ 1520 if (pci_vfs_assigned(pdev)) { 1521 dev_warn(&pdev->dev, 1522 "disabling driver while VFs are assigned\n"); 1523 return; 1524 } 1525 1526 pci_disable_sriov(pdev); 1527 } 1528 1529 /* hns3_probe - Device initialization routine 1530 * @pdev: PCI device information struct 1531 * @ent: entry in hns3_pci_tbl 1532 * 1533 * hns3_probe initializes a PF identified by a pci_dev structure. 1534 * The OS initialization, configuring of the PF private structure, 1535 * and a hardware reset occur. 1536 * 1537 * Returns 0 on success, negative on failure 1538 */ 1539 static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 1540 { 1541 struct hnae3_ae_dev *ae_dev; 1542 int ret; 1543 1544 ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev), 1545 GFP_KERNEL); 1546 if (!ae_dev) { 1547 ret = -ENOMEM; 1548 return ret; 1549 } 1550 1551 ae_dev->pdev = pdev; 1552 ae_dev->flag = ent->driver_data; 1553 ae_dev->dev_type = HNAE3_DEV_KNIC; 1554 pci_set_drvdata(pdev, ae_dev); 1555 1556 hnae3_register_ae_dev(ae_dev); 1557 1558 return 0; 1559 } 1560 1561 /* hns3_remove - Device removal routine 1562 * @pdev: PCI device information struct 1563 */ 1564 static void hns3_remove(struct pci_dev *pdev) 1565 { 1566 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 1567 1568 if (hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV)) 1569 hns3_disable_sriov(pdev); 1570 1571 hnae3_unregister_ae_dev(ae_dev); 1572 } 1573 1574 /** 1575 * hns3_pci_sriov_configure 1576 * @pdev: pointer to a pci_dev structure 1577 * @num_vfs: number of VFs to allocate 1578 * 1579 * Enable or change the number of VFs. Called when the user updates the number 1580 * of VFs in sysfs. 1581 **/ 1582 static int hns3_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) 1583 { 1584 int ret; 1585 1586 if (!(hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))) { 1587 dev_warn(&pdev->dev, "Can not config SRIOV\n"); 1588 return -EINVAL; 1589 } 1590 1591 if (num_vfs) { 1592 ret = pci_enable_sriov(pdev, num_vfs); 1593 if (ret) 1594 dev_err(&pdev->dev, "SRIOV enable failed %d\n", ret); 1595 else 1596 return num_vfs; 1597 } else if (!pci_vfs_assigned(pdev)) { 1598 pci_disable_sriov(pdev); 1599 } else { 1600 dev_warn(&pdev->dev, 1601 "Unable to free VFs because some are assigned to VMs.\n"); 1602 } 1603 1604 return 0; 1605 } 1606 1607 static struct pci_driver hns3_driver = { 1608 .name = hns3_driver_name, 1609 .id_table = hns3_pci_tbl, 1610 .probe = hns3_probe, 1611 .remove = hns3_remove, 1612 .sriov_configure = hns3_pci_sriov_configure, 1613 }; 1614 1615 /* set default feature to hns3 */ 1616 static void hns3_set_default_feature(struct net_device *netdev) 1617 { 1618 netdev->priv_flags |= IFF_UNICAST_FLT; 1619 1620 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 1621 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | 1622 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | 1623 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | 1624 NETIF_F_GSO_UDP_TUNNEL_CSUM; 1625 1626 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID; 1627 1628 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; 1629 1630 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 1631 NETIF_F_HW_VLAN_CTAG_FILTER | 1632 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | 1633 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | 1634 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | 1635 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | 1636 NETIF_F_GSO_UDP_TUNNEL_CSUM; 1637 1638 netdev->vlan_features |= 1639 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | 1640 NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO | 1641 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | 1642 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | 1643 NETIF_F_GSO_UDP_TUNNEL_CSUM; 1644 1645 netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 1646 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | 1647 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | 1648 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | 1649 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | 1650 NETIF_F_GSO_UDP_TUNNEL_CSUM; 1651 } 1652 1653 static int hns3_alloc_buffer(struct hns3_enet_ring *ring, 1654 struct hns3_desc_cb *cb) 1655 { 1656 unsigned int order = hnae_page_order(ring); 1657 struct page *p; 1658 1659 p = dev_alloc_pages(order); 1660 if (!p) 1661 return -ENOMEM; 1662 1663 cb->priv = p; 1664 cb->page_offset = 0; 1665 cb->reuse_flag = 0; 1666 cb->buf = page_address(p); 1667 cb->length = hnae_page_size(ring); 1668 cb->type = DESC_TYPE_PAGE; 1669 1670 return 0; 1671 } 1672 1673 static void hns3_free_buffer(struct hns3_enet_ring *ring, 1674 struct hns3_desc_cb *cb) 1675 { 1676 if (cb->type == DESC_TYPE_SKB) 1677 dev_kfree_skb_any((struct sk_buff *)cb->priv); 1678 else if (!HNAE3_IS_TX_RING(ring)) 1679 put_page((struct page *)cb->priv); 1680 memset(cb, 0, sizeof(*cb)); 1681 } 1682 1683 static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb) 1684 { 1685 cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0, 1686 cb->length, ring_to_dma_dir(ring)); 1687 1688 if (dma_mapping_error(ring_to_dev(ring), cb->dma)) 1689 return -EIO; 1690 1691 return 0; 1692 } 1693 1694 static void hns3_unmap_buffer(struct hns3_enet_ring *ring, 1695 struct hns3_desc_cb *cb) 1696 { 1697 if (cb->type == DESC_TYPE_SKB) 1698 dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length, 1699 ring_to_dma_dir(ring)); 1700 else 1701 dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length, 1702 ring_to_dma_dir(ring)); 1703 } 1704 1705 static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i) 1706 { 1707 hns3_unmap_buffer(ring, &ring->desc_cb[i]); 1708 ring->desc[i].addr = 0; 1709 } 1710 1711 static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i) 1712 { 1713 struct hns3_desc_cb *cb = &ring->desc_cb[i]; 1714 1715 if (!ring->desc_cb[i].dma) 1716 return; 1717 1718 hns3_buffer_detach(ring, i); 1719 hns3_free_buffer(ring, cb); 1720 } 1721 1722 static void hns3_free_buffers(struct hns3_enet_ring *ring) 1723 { 1724 int i; 1725 1726 for (i = 0; i < ring->desc_num; i++) 1727 hns3_free_buffer_detach(ring, i); 1728 } 1729 1730 /* free desc along with its attached buffer */ 1731 static void hns3_free_desc(struct hns3_enet_ring *ring) 1732 { 1733 hns3_free_buffers(ring); 1734 1735 dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr, 1736 ring->desc_num * sizeof(ring->desc[0]), 1737 DMA_BIDIRECTIONAL); 1738 ring->desc_dma_addr = 0; 1739 kfree(ring->desc); 1740 ring->desc = NULL; 1741 } 1742 1743 static int hns3_alloc_desc(struct hns3_enet_ring *ring) 1744 { 1745 int size = ring->desc_num * sizeof(ring->desc[0]); 1746 1747 ring->desc = kzalloc(size, GFP_KERNEL); 1748 if (!ring->desc) 1749 return -ENOMEM; 1750 1751 ring->desc_dma_addr = dma_map_single(ring_to_dev(ring), ring->desc, 1752 size, DMA_BIDIRECTIONAL); 1753 if (dma_mapping_error(ring_to_dev(ring), ring->desc_dma_addr)) { 1754 ring->desc_dma_addr = 0; 1755 kfree(ring->desc); 1756 ring->desc = NULL; 1757 return -ENOMEM; 1758 } 1759 1760 return 0; 1761 } 1762 1763 static int hns3_reserve_buffer_map(struct hns3_enet_ring *ring, 1764 struct hns3_desc_cb *cb) 1765 { 1766 int ret; 1767 1768 ret = hns3_alloc_buffer(ring, cb); 1769 if (ret) 1770 goto out; 1771 1772 ret = hns3_map_buffer(ring, cb); 1773 if (ret) 1774 goto out_with_buf; 1775 1776 return 0; 1777 1778 out_with_buf: 1779 hns3_free_buffer(ring, cb); 1780 out: 1781 return ret; 1782 } 1783 1784 static int hns3_alloc_buffer_attach(struct hns3_enet_ring *ring, int i) 1785 { 1786 int ret = hns3_reserve_buffer_map(ring, &ring->desc_cb[i]); 1787 1788 if (ret) 1789 return ret; 1790 1791 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma); 1792 1793 return 0; 1794 } 1795 1796 /* Allocate memory for raw pkg, and map with dma */ 1797 static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring) 1798 { 1799 int i, j, ret; 1800 1801 for (i = 0; i < ring->desc_num; i++) { 1802 ret = hns3_alloc_buffer_attach(ring, i); 1803 if (ret) 1804 goto out_buffer_fail; 1805 } 1806 1807 return 0; 1808 1809 out_buffer_fail: 1810 for (j = i - 1; j >= 0; j--) 1811 hns3_free_buffer_detach(ring, j); 1812 return ret; 1813 } 1814 1815 /* detach a in-used buffer and replace with a reserved one */ 1816 static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i, 1817 struct hns3_desc_cb *res_cb) 1818 { 1819 hns3_unmap_buffer(ring, &ring->desc_cb[i]); 1820 ring->desc_cb[i] = *res_cb; 1821 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma); 1822 } 1823 1824 static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i) 1825 { 1826 ring->desc_cb[i].reuse_flag = 0; 1827 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma 1828 + ring->desc_cb[i].page_offset); 1829 } 1830 1831 static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes, 1832 int *pkts) 1833 { 1834 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean]; 1835 1836 (*pkts) += (desc_cb->type == DESC_TYPE_SKB); 1837 (*bytes) += desc_cb->length; 1838 /* desc_cb will be cleaned, after hnae_free_buffer_detach*/ 1839 hns3_free_buffer_detach(ring, ring->next_to_clean); 1840 1841 ring_ptr_move_fw(ring, next_to_clean); 1842 } 1843 1844 static int is_valid_clean_head(struct hns3_enet_ring *ring, int h) 1845 { 1846 int u = ring->next_to_use; 1847 int c = ring->next_to_clean; 1848 1849 if (unlikely(h > ring->desc_num)) 1850 return 0; 1851 1852 return u > c ? (h > c && h <= u) : (h > c || h <= u); 1853 } 1854 1855 bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget) 1856 { 1857 struct net_device *netdev = ring->tqp->handle->kinfo.netdev; 1858 struct netdev_queue *dev_queue; 1859 int bytes, pkts; 1860 int head; 1861 1862 head = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG); 1863 rmb(); /* Make sure head is ready before touch any data */ 1864 1865 if (is_ring_empty(ring) || head == ring->next_to_clean) 1866 return true; /* no data to poll */ 1867 1868 if (!is_valid_clean_head(ring, head)) { 1869 netdev_err(netdev, "wrong head (%d, %d-%d)\n", head, 1870 ring->next_to_use, ring->next_to_clean); 1871 1872 u64_stats_update_begin(&ring->syncp); 1873 ring->stats.io_err_cnt++; 1874 u64_stats_update_end(&ring->syncp); 1875 return true; 1876 } 1877 1878 bytes = 0; 1879 pkts = 0; 1880 while (head != ring->next_to_clean && budget) { 1881 hns3_nic_reclaim_one_desc(ring, &bytes, &pkts); 1882 /* Issue prefetch for next Tx descriptor */ 1883 prefetch(&ring->desc_cb[ring->next_to_clean]); 1884 budget--; 1885 } 1886 1887 ring->tqp_vector->tx_group.total_bytes += bytes; 1888 ring->tqp_vector->tx_group.total_packets += pkts; 1889 1890 u64_stats_update_begin(&ring->syncp); 1891 ring->stats.tx_bytes += bytes; 1892 ring->stats.tx_pkts += pkts; 1893 u64_stats_update_end(&ring->syncp); 1894 1895 dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index); 1896 netdev_tx_completed_queue(dev_queue, pkts, bytes); 1897 1898 if (unlikely(pkts && netif_carrier_ok(netdev) && 1899 (ring_space(ring) > HNS3_MAX_BD_PER_PKT))) { 1900 /* Make sure that anybody stopping the queue after this 1901 * sees the new next_to_clean. 1902 */ 1903 smp_mb(); 1904 if (netif_tx_queue_stopped(dev_queue)) { 1905 netif_tx_wake_queue(dev_queue); 1906 ring->stats.restart_queue++; 1907 } 1908 } 1909 1910 return !!budget; 1911 } 1912 1913 static int hns3_desc_unused(struct hns3_enet_ring *ring) 1914 { 1915 int ntc = ring->next_to_clean; 1916 int ntu = ring->next_to_use; 1917 1918 return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu; 1919 } 1920 1921 static void 1922 hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count) 1923 { 1924 struct hns3_desc_cb *desc_cb; 1925 struct hns3_desc_cb res_cbs; 1926 int i, ret; 1927 1928 for (i = 0; i < cleand_count; i++) { 1929 desc_cb = &ring->desc_cb[ring->next_to_use]; 1930 if (desc_cb->reuse_flag) { 1931 u64_stats_update_begin(&ring->syncp); 1932 ring->stats.reuse_pg_cnt++; 1933 u64_stats_update_end(&ring->syncp); 1934 1935 hns3_reuse_buffer(ring, ring->next_to_use); 1936 } else { 1937 ret = hns3_reserve_buffer_map(ring, &res_cbs); 1938 if (ret) { 1939 u64_stats_update_begin(&ring->syncp); 1940 ring->stats.sw_err_cnt++; 1941 u64_stats_update_end(&ring->syncp); 1942 1943 netdev_err(ring->tqp->handle->kinfo.netdev, 1944 "hnae reserve buffer map failed.\n"); 1945 break; 1946 } 1947 hns3_replace_buffer(ring, ring->next_to_use, &res_cbs); 1948 } 1949 1950 ring_ptr_move_fw(ring, next_to_use); 1951 } 1952 1953 wmb(); /* Make all data has been write before submit */ 1954 writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG); 1955 } 1956 1957 static void hns3_nic_reuse_page(struct sk_buff *skb, int i, 1958 struct hns3_enet_ring *ring, int pull_len, 1959 struct hns3_desc_cb *desc_cb) 1960 { 1961 struct hns3_desc *desc; 1962 int truesize, size; 1963 int last_offset; 1964 bool twobufs; 1965 1966 twobufs = ((PAGE_SIZE < 8192) && 1967 hnae_buf_size(ring) == HNS3_BUFFER_SIZE_2048); 1968 1969 desc = &ring->desc[ring->next_to_clean]; 1970 size = le16_to_cpu(desc->rx.size); 1971 1972 truesize = hnae_buf_size(ring); 1973 1974 if (!twobufs) 1975 last_offset = hnae_page_size(ring) - hnae_buf_size(ring); 1976 1977 skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len, 1978 size - pull_len, truesize); 1979 1980 /* Avoid re-using remote pages,flag default unreuse */ 1981 if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id())) 1982 return; 1983 1984 if (twobufs) { 1985 /* If we are only owner of page we can reuse it */ 1986 if (likely(page_count(desc_cb->priv) == 1)) { 1987 /* Flip page offset to other buffer */ 1988 desc_cb->page_offset ^= truesize; 1989 1990 desc_cb->reuse_flag = 1; 1991 /* bump ref count on page before it is given*/ 1992 get_page(desc_cb->priv); 1993 } 1994 return; 1995 } 1996 1997 /* Move offset up to the next cache line */ 1998 desc_cb->page_offset += truesize; 1999 2000 if (desc_cb->page_offset <= last_offset) { 2001 desc_cb->reuse_flag = 1; 2002 /* Bump ref count on page before it is given*/ 2003 get_page(desc_cb->priv); 2004 } 2005 } 2006 2007 static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, 2008 struct hns3_desc *desc) 2009 { 2010 struct net_device *netdev = ring->tqp->handle->kinfo.netdev; 2011 int l3_type, l4_type; 2012 u32 bd_base_info; 2013 int ol4_type; 2014 u32 l234info; 2015 2016 bd_base_info = le32_to_cpu(desc->rx.bd_base_info); 2017 l234info = le32_to_cpu(desc->rx.l234_info); 2018 2019 skb->ip_summed = CHECKSUM_NONE; 2020 2021 skb_checksum_none_assert(skb); 2022 2023 if (!(netdev->features & NETIF_F_RXCSUM)) 2024 return; 2025 2026 /* check if hardware has done checksum */ 2027 if (!hnae_get_bit(bd_base_info, HNS3_RXD_L3L4P_B)) 2028 return; 2029 2030 if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L3E_B) || 2031 hnae_get_bit(l234info, HNS3_RXD_L4E_B) || 2032 hnae_get_bit(l234info, HNS3_RXD_OL3E_B) || 2033 hnae_get_bit(l234info, HNS3_RXD_OL4E_B))) { 2034 netdev_err(netdev, "L3/L4 error pkt\n"); 2035 u64_stats_update_begin(&ring->syncp); 2036 ring->stats.l3l4_csum_err++; 2037 u64_stats_update_end(&ring->syncp); 2038 2039 return; 2040 } 2041 2042 l3_type = hnae_get_field(l234info, HNS3_RXD_L3ID_M, 2043 HNS3_RXD_L3ID_S); 2044 l4_type = hnae_get_field(l234info, HNS3_RXD_L4ID_M, 2045 HNS3_RXD_L4ID_S); 2046 2047 ol4_type = hnae_get_field(l234info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S); 2048 switch (ol4_type) { 2049 case HNS3_OL4_TYPE_MAC_IN_UDP: 2050 case HNS3_OL4_TYPE_NVGRE: 2051 skb->csum_level = 1; 2052 case HNS3_OL4_TYPE_NO_TUN: 2053 /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */ 2054 if (l3_type == HNS3_L3_TYPE_IPV4 || 2055 (l3_type == HNS3_L3_TYPE_IPV6 && 2056 (l4_type == HNS3_L4_TYPE_UDP || 2057 l4_type == HNS3_L4_TYPE_TCP || 2058 l4_type == HNS3_L4_TYPE_SCTP))) 2059 skb->ip_summed = CHECKSUM_UNNECESSARY; 2060 break; 2061 } 2062 } 2063 2064 static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb) 2065 { 2066 napi_gro_receive(&ring->tqp_vector->napi, skb); 2067 } 2068 2069 static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, 2070 struct sk_buff **out_skb, int *out_bnum) 2071 { 2072 struct net_device *netdev = ring->tqp->handle->kinfo.netdev; 2073 struct hns3_desc_cb *desc_cb; 2074 struct hns3_desc *desc; 2075 struct sk_buff *skb; 2076 unsigned char *va; 2077 u32 bd_base_info; 2078 int pull_len; 2079 u32 l234info; 2080 int length; 2081 int bnum; 2082 2083 desc = &ring->desc[ring->next_to_clean]; 2084 desc_cb = &ring->desc_cb[ring->next_to_clean]; 2085 2086 prefetch(desc); 2087 2088 length = le16_to_cpu(desc->rx.pkt_len); 2089 bd_base_info = le32_to_cpu(desc->rx.bd_base_info); 2090 l234info = le32_to_cpu(desc->rx.l234_info); 2091 2092 /* Check valid BD */ 2093 if (!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B)) 2094 return -EFAULT; 2095 2096 va = (unsigned char *)desc_cb->buf + desc_cb->page_offset; 2097 2098 /* Prefetch first cache line of first page 2099 * Idea is to cache few bytes of the header of the packet. Our L1 Cache 2100 * line size is 64B so need to prefetch twice to make it 128B. But in 2101 * actual we can have greater size of caches with 128B Level 1 cache 2102 * lines. In such a case, single fetch would suffice to cache in the 2103 * relevant part of the header. 2104 */ 2105 prefetch(va); 2106 #if L1_CACHE_BYTES < 128 2107 prefetch(va + L1_CACHE_BYTES); 2108 #endif 2109 2110 skb = *out_skb = napi_alloc_skb(&ring->tqp_vector->napi, 2111 HNS3_RX_HEAD_SIZE); 2112 if (unlikely(!skb)) { 2113 netdev_err(netdev, "alloc rx skb fail\n"); 2114 2115 u64_stats_update_begin(&ring->syncp); 2116 ring->stats.sw_err_cnt++; 2117 u64_stats_update_end(&ring->syncp); 2118 2119 return -ENOMEM; 2120 } 2121 2122 prefetchw(skb->data); 2123 2124 /* Based on hw strategy, the tag offloaded will be stored at 2125 * ot_vlan_tag in two layer tag case, and stored at vlan_tag 2126 * in one layer tag case. 2127 */ 2128 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) { 2129 u16 vlan_tag; 2130 2131 vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); 2132 if (!(vlan_tag & VLAN_VID_MASK)) 2133 vlan_tag = le16_to_cpu(desc->rx.vlan_tag); 2134 if (vlan_tag & VLAN_VID_MASK) 2135 __vlan_hwaccel_put_tag(skb, 2136 htons(ETH_P_8021Q), 2137 vlan_tag); 2138 } 2139 2140 bnum = 1; 2141 if (length <= HNS3_RX_HEAD_SIZE) { 2142 memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long))); 2143 2144 /* We can reuse buffer as-is, just make sure it is local */ 2145 if (likely(page_to_nid(desc_cb->priv) == numa_node_id())) 2146 desc_cb->reuse_flag = 1; 2147 else /* This page cannot be reused so discard it */ 2148 put_page(desc_cb->priv); 2149 2150 ring_ptr_move_fw(ring, next_to_clean); 2151 } else { 2152 u64_stats_update_begin(&ring->syncp); 2153 ring->stats.seg_pkt_cnt++; 2154 u64_stats_update_end(&ring->syncp); 2155 2156 pull_len = eth_get_headlen(va, HNS3_RX_HEAD_SIZE); 2157 2158 memcpy(__skb_put(skb, pull_len), va, 2159 ALIGN(pull_len, sizeof(long))); 2160 2161 hns3_nic_reuse_page(skb, 0, ring, pull_len, desc_cb); 2162 ring_ptr_move_fw(ring, next_to_clean); 2163 2164 while (!hnae_get_bit(bd_base_info, HNS3_RXD_FE_B)) { 2165 desc = &ring->desc[ring->next_to_clean]; 2166 desc_cb = &ring->desc_cb[ring->next_to_clean]; 2167 bd_base_info = le32_to_cpu(desc->rx.bd_base_info); 2168 hns3_nic_reuse_page(skb, bnum, ring, 0, desc_cb); 2169 ring_ptr_move_fw(ring, next_to_clean); 2170 bnum++; 2171 } 2172 } 2173 2174 *out_bnum = bnum; 2175 2176 if (unlikely(!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))) { 2177 netdev_err(netdev, "no valid bd,%016llx,%016llx\n", 2178 ((u64 *)desc)[0], ((u64 *)desc)[1]); 2179 u64_stats_update_begin(&ring->syncp); 2180 ring->stats.non_vld_descs++; 2181 u64_stats_update_end(&ring->syncp); 2182 2183 dev_kfree_skb_any(skb); 2184 return -EINVAL; 2185 } 2186 2187 if (unlikely((!desc->rx.pkt_len) || 2188 hnae_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) { 2189 netdev_err(netdev, "truncated pkt\n"); 2190 u64_stats_update_begin(&ring->syncp); 2191 ring->stats.err_pkt_len++; 2192 u64_stats_update_end(&ring->syncp); 2193 2194 dev_kfree_skb_any(skb); 2195 return -EFAULT; 2196 } 2197 2198 if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L2E_B))) { 2199 netdev_err(netdev, "L2 error pkt\n"); 2200 u64_stats_update_begin(&ring->syncp); 2201 ring->stats.l2_err++; 2202 u64_stats_update_end(&ring->syncp); 2203 2204 dev_kfree_skb_any(skb); 2205 return -EFAULT; 2206 } 2207 2208 u64_stats_update_begin(&ring->syncp); 2209 ring->stats.rx_pkts++; 2210 ring->stats.rx_bytes += skb->len; 2211 u64_stats_update_end(&ring->syncp); 2212 2213 ring->tqp_vector->rx_group.total_bytes += skb->len; 2214 2215 hns3_rx_checksum(ring, skb, desc); 2216 return 0; 2217 } 2218 2219 int hns3_clean_rx_ring( 2220 struct hns3_enet_ring *ring, int budget, 2221 void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *)) 2222 { 2223 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16 2224 struct net_device *netdev = ring->tqp->handle->kinfo.netdev; 2225 int recv_pkts, recv_bds, clean_count, err; 2226 int unused_count = hns3_desc_unused(ring); 2227 struct sk_buff *skb = NULL; 2228 int num, bnum = 0; 2229 2230 num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG); 2231 rmb(); /* Make sure num taken effect before the other data is touched */ 2232 2233 recv_pkts = 0, recv_bds = 0, clean_count = 0; 2234 num -= unused_count; 2235 2236 while (recv_pkts < budget && recv_bds < num) { 2237 /* Reuse or realloc buffers */ 2238 if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) { 2239 hns3_nic_alloc_rx_buffers(ring, 2240 clean_count + unused_count); 2241 clean_count = 0; 2242 unused_count = hns3_desc_unused(ring); 2243 } 2244 2245 /* Poll one pkt */ 2246 err = hns3_handle_rx_bd(ring, &skb, &bnum); 2247 if (unlikely(!skb)) /* This fault cannot be repaired */ 2248 goto out; 2249 2250 recv_bds += bnum; 2251 clean_count += bnum; 2252 if (unlikely(err)) { /* Do jump the err */ 2253 recv_pkts++; 2254 continue; 2255 } 2256 2257 /* Do update ip stack process */ 2258 skb->protocol = eth_type_trans(skb, netdev); 2259 rx_fn(ring, skb); 2260 2261 recv_pkts++; 2262 } 2263 2264 out: 2265 /* Make all data has been write before submit */ 2266 if (clean_count + unused_count > 0) 2267 hns3_nic_alloc_rx_buffers(ring, 2268 clean_count + unused_count); 2269 2270 return recv_pkts; 2271 } 2272 2273 static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group) 2274 { 2275 struct hns3_enet_tqp_vector *tqp_vector = 2276 ring_group->ring->tqp_vector; 2277 enum hns3_flow_level_range new_flow_level; 2278 int packets_per_msecs; 2279 int bytes_per_msecs; 2280 u32 time_passed_ms; 2281 u16 new_int_gl; 2282 2283 if (!ring_group->coal.int_gl || !tqp_vector->last_jiffies) 2284 return false; 2285 2286 if (ring_group->total_packets == 0) { 2287 ring_group->coal.int_gl = HNS3_INT_GL_50K; 2288 ring_group->coal.flow_level = HNS3_FLOW_LOW; 2289 return true; 2290 } 2291 2292 /* Simple throttlerate management 2293 * 0-10MB/s lower (50000 ints/s) 2294 * 10-20MB/s middle (20000 ints/s) 2295 * 20-1249MB/s high (18000 ints/s) 2296 * > 40000pps ultra (8000 ints/s) 2297 */ 2298 new_flow_level = ring_group->coal.flow_level; 2299 new_int_gl = ring_group->coal.int_gl; 2300 time_passed_ms = 2301 jiffies_to_msecs(jiffies - tqp_vector->last_jiffies); 2302 2303 if (!time_passed_ms) 2304 return false; 2305 2306 do_div(ring_group->total_packets, time_passed_ms); 2307 packets_per_msecs = ring_group->total_packets; 2308 2309 do_div(ring_group->total_bytes, time_passed_ms); 2310 bytes_per_msecs = ring_group->total_bytes; 2311 2312 #define HNS3_RX_LOW_BYTE_RATE 10000 2313 #define HNS3_RX_MID_BYTE_RATE 20000 2314 2315 switch (new_flow_level) { 2316 case HNS3_FLOW_LOW: 2317 if (bytes_per_msecs > HNS3_RX_LOW_BYTE_RATE) 2318 new_flow_level = HNS3_FLOW_MID; 2319 break; 2320 case HNS3_FLOW_MID: 2321 if (bytes_per_msecs > HNS3_RX_MID_BYTE_RATE) 2322 new_flow_level = HNS3_FLOW_HIGH; 2323 else if (bytes_per_msecs <= HNS3_RX_LOW_BYTE_RATE) 2324 new_flow_level = HNS3_FLOW_LOW; 2325 break; 2326 case HNS3_FLOW_HIGH: 2327 case HNS3_FLOW_ULTRA: 2328 default: 2329 if (bytes_per_msecs <= HNS3_RX_MID_BYTE_RATE) 2330 new_flow_level = HNS3_FLOW_MID; 2331 break; 2332 } 2333 2334 #define HNS3_RX_ULTRA_PACKET_RATE 40 2335 2336 if (packets_per_msecs > HNS3_RX_ULTRA_PACKET_RATE && 2337 &tqp_vector->rx_group == ring_group) 2338 new_flow_level = HNS3_FLOW_ULTRA; 2339 2340 switch (new_flow_level) { 2341 case HNS3_FLOW_LOW: 2342 new_int_gl = HNS3_INT_GL_50K; 2343 break; 2344 case HNS3_FLOW_MID: 2345 new_int_gl = HNS3_INT_GL_20K; 2346 break; 2347 case HNS3_FLOW_HIGH: 2348 new_int_gl = HNS3_INT_GL_18K; 2349 break; 2350 case HNS3_FLOW_ULTRA: 2351 new_int_gl = HNS3_INT_GL_8K; 2352 break; 2353 default: 2354 break; 2355 } 2356 2357 ring_group->total_bytes = 0; 2358 ring_group->total_packets = 0; 2359 ring_group->coal.flow_level = new_flow_level; 2360 if (new_int_gl != ring_group->coal.int_gl) { 2361 ring_group->coal.int_gl = new_int_gl; 2362 return true; 2363 } 2364 return false; 2365 } 2366 2367 static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector) 2368 { 2369 struct hns3_enet_ring_group *rx_group = &tqp_vector->rx_group; 2370 struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group; 2371 bool rx_update, tx_update; 2372 2373 if (tqp_vector->int_adapt_down > 0) { 2374 tqp_vector->int_adapt_down--; 2375 return; 2376 } 2377 2378 if (rx_group->coal.gl_adapt_enable) { 2379 rx_update = hns3_get_new_int_gl(rx_group); 2380 if (rx_update) 2381 hns3_set_vector_coalesce_rx_gl(tqp_vector, 2382 rx_group->coal.int_gl); 2383 } 2384 2385 if (tx_group->coal.gl_adapt_enable) { 2386 tx_update = hns3_get_new_int_gl(&tqp_vector->tx_group); 2387 if (tx_update) 2388 hns3_set_vector_coalesce_tx_gl(tqp_vector, 2389 tx_group->coal.int_gl); 2390 } 2391 2392 tqp_vector->last_jiffies = jiffies; 2393 tqp_vector->int_adapt_down = HNS3_INT_ADAPT_DOWN_START; 2394 } 2395 2396 static int hns3_nic_common_poll(struct napi_struct *napi, int budget) 2397 { 2398 struct hns3_enet_ring *ring; 2399 int rx_pkt_total = 0; 2400 2401 struct hns3_enet_tqp_vector *tqp_vector = 2402 container_of(napi, struct hns3_enet_tqp_vector, napi); 2403 bool clean_complete = true; 2404 int rx_budget; 2405 2406 /* Since the actual Tx work is minimal, we can give the Tx a larger 2407 * budget and be more aggressive about cleaning up the Tx descriptors. 2408 */ 2409 hns3_for_each_ring(ring, tqp_vector->tx_group) { 2410 if (!hns3_clean_tx_ring(ring, budget)) 2411 clean_complete = false; 2412 } 2413 2414 /* make sure rx ring budget not smaller than 1 */ 2415 rx_budget = max(budget / tqp_vector->num_tqps, 1); 2416 2417 hns3_for_each_ring(ring, tqp_vector->rx_group) { 2418 int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget, 2419 hns3_rx_skb); 2420 2421 if (rx_cleaned >= rx_budget) 2422 clean_complete = false; 2423 2424 rx_pkt_total += rx_cleaned; 2425 } 2426 2427 tqp_vector->rx_group.total_packets += rx_pkt_total; 2428 2429 if (!clean_complete) 2430 return budget; 2431 2432 napi_complete(napi); 2433 hns3_update_new_int_gl(tqp_vector); 2434 hns3_mask_vector_irq(tqp_vector, 1); 2435 2436 return rx_pkt_total; 2437 } 2438 2439 static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, 2440 struct hnae3_ring_chain_node *head) 2441 { 2442 struct pci_dev *pdev = tqp_vector->handle->pdev; 2443 struct hnae3_ring_chain_node *cur_chain = head; 2444 struct hnae3_ring_chain_node *chain; 2445 struct hns3_enet_ring *tx_ring; 2446 struct hns3_enet_ring *rx_ring; 2447 2448 tx_ring = tqp_vector->tx_group.ring; 2449 if (tx_ring) { 2450 cur_chain->tqp_index = tx_ring->tqp->tqp_index; 2451 hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, 2452 HNAE3_RING_TYPE_TX); 2453 hnae_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, 2454 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX); 2455 2456 cur_chain->next = NULL; 2457 2458 while (tx_ring->next) { 2459 tx_ring = tx_ring->next; 2460 2461 chain = devm_kzalloc(&pdev->dev, sizeof(*chain), 2462 GFP_KERNEL); 2463 if (!chain) 2464 return -ENOMEM; 2465 2466 cur_chain->next = chain; 2467 chain->tqp_index = tx_ring->tqp->tqp_index; 2468 hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B, 2469 HNAE3_RING_TYPE_TX); 2470 hnae_set_field(chain->int_gl_idx, 2471 HNAE3_RING_GL_IDX_M, 2472 HNAE3_RING_GL_IDX_S, 2473 HNAE3_RING_GL_TX); 2474 2475 cur_chain = chain; 2476 } 2477 } 2478 2479 rx_ring = tqp_vector->rx_group.ring; 2480 if (!tx_ring && rx_ring) { 2481 cur_chain->next = NULL; 2482 cur_chain->tqp_index = rx_ring->tqp->tqp_index; 2483 hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, 2484 HNAE3_RING_TYPE_RX); 2485 hnae_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, 2486 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX); 2487 2488 rx_ring = rx_ring->next; 2489 } 2490 2491 while (rx_ring) { 2492 chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL); 2493 if (!chain) 2494 return -ENOMEM; 2495 2496 cur_chain->next = chain; 2497 chain->tqp_index = rx_ring->tqp->tqp_index; 2498 hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B, 2499 HNAE3_RING_TYPE_RX); 2500 hnae_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M, 2501 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX); 2502 2503 cur_chain = chain; 2504 2505 rx_ring = rx_ring->next; 2506 } 2507 2508 return 0; 2509 } 2510 2511 static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, 2512 struct hnae3_ring_chain_node *head) 2513 { 2514 struct pci_dev *pdev = tqp_vector->handle->pdev; 2515 struct hnae3_ring_chain_node *chain_tmp, *chain; 2516 2517 chain = head->next; 2518 2519 while (chain) { 2520 chain_tmp = chain->next; 2521 devm_kfree(&pdev->dev, chain); 2522 chain = chain_tmp; 2523 } 2524 } 2525 2526 static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group, 2527 struct hns3_enet_ring *ring) 2528 { 2529 ring->next = group->ring; 2530 group->ring = ring; 2531 2532 group->count++; 2533 } 2534 2535 static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv) 2536 { 2537 struct hnae3_ring_chain_node vector_ring_chain; 2538 struct hnae3_handle *h = priv->ae_handle; 2539 struct hns3_enet_tqp_vector *tqp_vector; 2540 int ret = 0; 2541 u16 i; 2542 2543 for (i = 0; i < priv->vector_num; i++) { 2544 tqp_vector = &priv->tqp_vector[i]; 2545 hns3_vector_gl_rl_init_hw(tqp_vector, priv); 2546 tqp_vector->num_tqps = 0; 2547 } 2548 2549 for (i = 0; i < h->kinfo.num_tqps; i++) { 2550 u16 vector_i = i % priv->vector_num; 2551 u16 tqp_num = h->kinfo.num_tqps; 2552 2553 tqp_vector = &priv->tqp_vector[vector_i]; 2554 2555 hns3_add_ring_to_group(&tqp_vector->tx_group, 2556 priv->ring_data[i].ring); 2557 2558 hns3_add_ring_to_group(&tqp_vector->rx_group, 2559 priv->ring_data[i + tqp_num].ring); 2560 2561 priv->ring_data[i].ring->tqp_vector = tqp_vector; 2562 priv->ring_data[i + tqp_num].ring->tqp_vector = tqp_vector; 2563 tqp_vector->num_tqps++; 2564 } 2565 2566 for (i = 0; i < priv->vector_num; i++) { 2567 tqp_vector = &priv->tqp_vector[i]; 2568 2569 tqp_vector->rx_group.total_bytes = 0; 2570 tqp_vector->rx_group.total_packets = 0; 2571 tqp_vector->tx_group.total_bytes = 0; 2572 tqp_vector->tx_group.total_packets = 0; 2573 tqp_vector->handle = h; 2574 2575 ret = hns3_get_vector_ring_chain(tqp_vector, 2576 &vector_ring_chain); 2577 if (ret) 2578 return ret; 2579 2580 ret = h->ae_algo->ops->map_ring_to_vector(h, 2581 tqp_vector->vector_irq, &vector_ring_chain); 2582 2583 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); 2584 2585 if (ret) 2586 return ret; 2587 2588 netif_napi_add(priv->netdev, &tqp_vector->napi, 2589 hns3_nic_common_poll, NAPI_POLL_WEIGHT); 2590 } 2591 2592 return 0; 2593 } 2594 2595 static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv) 2596 { 2597 struct hnae3_handle *h = priv->ae_handle; 2598 struct hns3_enet_tqp_vector *tqp_vector; 2599 struct hnae3_vector_info *vector; 2600 struct pci_dev *pdev = h->pdev; 2601 u16 tqp_num = h->kinfo.num_tqps; 2602 u16 vector_num; 2603 int ret = 0; 2604 u16 i; 2605 2606 /* RSS size, cpu online and vector_num should be the same */ 2607 /* Should consider 2p/4p later */ 2608 vector_num = min_t(u16, num_online_cpus(), tqp_num); 2609 vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector), 2610 GFP_KERNEL); 2611 if (!vector) 2612 return -ENOMEM; 2613 2614 vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector); 2615 2616 priv->vector_num = vector_num; 2617 priv->tqp_vector = (struct hns3_enet_tqp_vector *) 2618 devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector), 2619 GFP_KERNEL); 2620 if (!priv->tqp_vector) { 2621 ret = -ENOMEM; 2622 goto out; 2623 } 2624 2625 for (i = 0; i < priv->vector_num; i++) { 2626 tqp_vector = &priv->tqp_vector[i]; 2627 tqp_vector->idx = i; 2628 tqp_vector->mask_addr = vector[i].io_addr; 2629 tqp_vector->vector_irq = vector[i].vector; 2630 hns3_vector_gl_rl_init(tqp_vector, priv); 2631 } 2632 2633 out: 2634 devm_kfree(&pdev->dev, vector); 2635 return ret; 2636 } 2637 2638 static void hns3_clear_ring_group(struct hns3_enet_ring_group *group) 2639 { 2640 group->ring = NULL; 2641 group->count = 0; 2642 } 2643 2644 static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv) 2645 { 2646 struct hnae3_ring_chain_node vector_ring_chain; 2647 struct hnae3_handle *h = priv->ae_handle; 2648 struct hns3_enet_tqp_vector *tqp_vector; 2649 int i, ret; 2650 2651 for (i = 0; i < priv->vector_num; i++) { 2652 tqp_vector = &priv->tqp_vector[i]; 2653 2654 ret = hns3_get_vector_ring_chain(tqp_vector, 2655 &vector_ring_chain); 2656 if (ret) 2657 return ret; 2658 2659 ret = h->ae_algo->ops->unmap_ring_from_vector(h, 2660 tqp_vector->vector_irq, &vector_ring_chain); 2661 if (ret) 2662 return ret; 2663 2664 ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq); 2665 if (ret) 2666 return ret; 2667 2668 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); 2669 2670 if (priv->tqp_vector[i].irq_init_flag == HNS3_VECTOR_INITED) { 2671 (void)irq_set_affinity_hint( 2672 priv->tqp_vector[i].vector_irq, 2673 NULL); 2674 free_irq(priv->tqp_vector[i].vector_irq, 2675 &priv->tqp_vector[i]); 2676 } 2677 2678 priv->ring_data[i].ring->irq_init_flag = HNS3_VECTOR_NOT_INITED; 2679 hns3_clear_ring_group(&tqp_vector->rx_group); 2680 hns3_clear_ring_group(&tqp_vector->tx_group); 2681 netif_napi_del(&priv->tqp_vector[i].napi); 2682 } 2683 2684 return 0; 2685 } 2686 2687 static int hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv) 2688 { 2689 struct hnae3_handle *h = priv->ae_handle; 2690 struct pci_dev *pdev = h->pdev; 2691 int i, ret; 2692 2693 for (i = 0; i < priv->vector_num; i++) { 2694 struct hns3_enet_tqp_vector *tqp_vector; 2695 2696 tqp_vector = &priv->tqp_vector[i]; 2697 ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq); 2698 if (ret) 2699 return ret; 2700 } 2701 2702 devm_kfree(&pdev->dev, priv->tqp_vector); 2703 return 0; 2704 } 2705 2706 static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv, 2707 int ring_type) 2708 { 2709 struct hns3_nic_ring_data *ring_data = priv->ring_data; 2710 int queue_num = priv->ae_handle->kinfo.num_tqps; 2711 struct pci_dev *pdev = priv->ae_handle->pdev; 2712 struct hns3_enet_ring *ring; 2713 2714 ring = devm_kzalloc(&pdev->dev, sizeof(*ring), GFP_KERNEL); 2715 if (!ring) 2716 return -ENOMEM; 2717 2718 if (ring_type == HNAE3_RING_TYPE_TX) { 2719 ring_data[q->tqp_index].ring = ring; 2720 ring_data[q->tqp_index].queue_index = q->tqp_index; 2721 ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET; 2722 } else { 2723 ring_data[q->tqp_index + queue_num].ring = ring; 2724 ring_data[q->tqp_index + queue_num].queue_index = q->tqp_index; 2725 ring->io_base = q->io_base; 2726 } 2727 2728 hnae_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type); 2729 2730 ring->tqp = q; 2731 ring->desc = NULL; 2732 ring->desc_cb = NULL; 2733 ring->dev = priv->dev; 2734 ring->desc_dma_addr = 0; 2735 ring->buf_size = q->buf_size; 2736 ring->desc_num = q->desc_num; 2737 ring->next_to_use = 0; 2738 ring->next_to_clean = 0; 2739 2740 return 0; 2741 } 2742 2743 static int hns3_queue_to_ring(struct hnae3_queue *tqp, 2744 struct hns3_nic_priv *priv) 2745 { 2746 int ret; 2747 2748 ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX); 2749 if (ret) 2750 return ret; 2751 2752 ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX); 2753 if (ret) 2754 return ret; 2755 2756 return 0; 2757 } 2758 2759 static int hns3_get_ring_config(struct hns3_nic_priv *priv) 2760 { 2761 struct hnae3_handle *h = priv->ae_handle; 2762 struct pci_dev *pdev = h->pdev; 2763 int i, ret; 2764 2765 priv->ring_data = devm_kzalloc(&pdev->dev, h->kinfo.num_tqps * 2766 sizeof(*priv->ring_data) * 2, 2767 GFP_KERNEL); 2768 if (!priv->ring_data) 2769 return -ENOMEM; 2770 2771 for (i = 0; i < h->kinfo.num_tqps; i++) { 2772 ret = hns3_queue_to_ring(h->kinfo.tqp[i], priv); 2773 if (ret) 2774 goto err; 2775 } 2776 2777 return 0; 2778 err: 2779 devm_kfree(&pdev->dev, priv->ring_data); 2780 return ret; 2781 } 2782 2783 static void hns3_put_ring_config(struct hns3_nic_priv *priv) 2784 { 2785 struct hnae3_handle *h = priv->ae_handle; 2786 int i; 2787 2788 for (i = 0; i < h->kinfo.num_tqps; i++) { 2789 devm_kfree(priv->dev, priv->ring_data[i].ring); 2790 devm_kfree(priv->dev, 2791 priv->ring_data[i + h->kinfo.num_tqps].ring); 2792 } 2793 devm_kfree(priv->dev, priv->ring_data); 2794 } 2795 2796 static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring) 2797 { 2798 int ret; 2799 2800 if (ring->desc_num <= 0 || ring->buf_size <= 0) 2801 return -EINVAL; 2802 2803 ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]), 2804 GFP_KERNEL); 2805 if (!ring->desc_cb) { 2806 ret = -ENOMEM; 2807 goto out; 2808 } 2809 2810 ret = hns3_alloc_desc(ring); 2811 if (ret) 2812 goto out_with_desc_cb; 2813 2814 if (!HNAE3_IS_TX_RING(ring)) { 2815 ret = hns3_alloc_ring_buffers(ring); 2816 if (ret) 2817 goto out_with_desc; 2818 } 2819 2820 return 0; 2821 2822 out_with_desc: 2823 hns3_free_desc(ring); 2824 out_with_desc_cb: 2825 kfree(ring->desc_cb); 2826 ring->desc_cb = NULL; 2827 out: 2828 return ret; 2829 } 2830 2831 static void hns3_fini_ring(struct hns3_enet_ring *ring) 2832 { 2833 hns3_free_desc(ring); 2834 kfree(ring->desc_cb); 2835 ring->desc_cb = NULL; 2836 ring->next_to_clean = 0; 2837 ring->next_to_use = 0; 2838 } 2839 2840 static int hns3_buf_size2type(u32 buf_size) 2841 { 2842 int bd_size_type; 2843 2844 switch (buf_size) { 2845 case 512: 2846 bd_size_type = HNS3_BD_SIZE_512_TYPE; 2847 break; 2848 case 1024: 2849 bd_size_type = HNS3_BD_SIZE_1024_TYPE; 2850 break; 2851 case 2048: 2852 bd_size_type = HNS3_BD_SIZE_2048_TYPE; 2853 break; 2854 case 4096: 2855 bd_size_type = HNS3_BD_SIZE_4096_TYPE; 2856 break; 2857 default: 2858 bd_size_type = HNS3_BD_SIZE_2048_TYPE; 2859 } 2860 2861 return bd_size_type; 2862 } 2863 2864 static void hns3_init_ring_hw(struct hns3_enet_ring *ring) 2865 { 2866 dma_addr_t dma = ring->desc_dma_addr; 2867 struct hnae3_queue *q = ring->tqp; 2868 2869 if (!HNAE3_IS_TX_RING(ring)) { 2870 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG, 2871 (u32)dma); 2872 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG, 2873 (u32)((dma >> 31) >> 1)); 2874 2875 hns3_write_dev(q, HNS3_RING_RX_RING_BD_LEN_REG, 2876 hns3_buf_size2type(ring->buf_size)); 2877 hns3_write_dev(q, HNS3_RING_RX_RING_BD_NUM_REG, 2878 ring->desc_num / 8 - 1); 2879 2880 } else { 2881 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_L_REG, 2882 (u32)dma); 2883 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG, 2884 (u32)((dma >> 31) >> 1)); 2885 2886 hns3_write_dev(q, HNS3_RING_TX_RING_BD_LEN_REG, 2887 hns3_buf_size2type(ring->buf_size)); 2888 hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG, 2889 ring->desc_num / 8 - 1); 2890 } 2891 } 2892 2893 int hns3_init_all_ring(struct hns3_nic_priv *priv) 2894 { 2895 struct hnae3_handle *h = priv->ae_handle; 2896 int ring_num = h->kinfo.num_tqps * 2; 2897 int i, j; 2898 int ret; 2899 2900 for (i = 0; i < ring_num; i++) { 2901 ret = hns3_alloc_ring_memory(priv->ring_data[i].ring); 2902 if (ret) { 2903 dev_err(priv->dev, 2904 "Alloc ring memory fail! ret=%d\n", ret); 2905 goto out_when_alloc_ring_memory; 2906 } 2907 2908 hns3_init_ring_hw(priv->ring_data[i].ring); 2909 2910 u64_stats_init(&priv->ring_data[i].ring->syncp); 2911 } 2912 2913 return 0; 2914 2915 out_when_alloc_ring_memory: 2916 for (j = i - 1; j >= 0; j--) 2917 hns3_fini_ring(priv->ring_data[j].ring); 2918 2919 return -ENOMEM; 2920 } 2921 2922 int hns3_uninit_all_ring(struct hns3_nic_priv *priv) 2923 { 2924 struct hnae3_handle *h = priv->ae_handle; 2925 int i; 2926 2927 for (i = 0; i < h->kinfo.num_tqps; i++) { 2928 if (h->ae_algo->ops->reset_queue) 2929 h->ae_algo->ops->reset_queue(h, i); 2930 2931 hns3_fini_ring(priv->ring_data[i].ring); 2932 hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring); 2933 } 2934 return 0; 2935 } 2936 2937 /* Set mac addr if it is configured. or leave it to the AE driver */ 2938 static void hns3_init_mac_addr(struct net_device *netdev, bool init) 2939 { 2940 struct hns3_nic_priv *priv = netdev_priv(netdev); 2941 struct hnae3_handle *h = priv->ae_handle; 2942 u8 mac_addr_temp[ETH_ALEN]; 2943 2944 if (h->ae_algo->ops->get_mac_addr && init) { 2945 h->ae_algo->ops->get_mac_addr(h, mac_addr_temp); 2946 ether_addr_copy(netdev->dev_addr, mac_addr_temp); 2947 } 2948 2949 /* Check if the MAC address is valid, if not get a random one */ 2950 if (!is_valid_ether_addr(netdev->dev_addr)) { 2951 eth_hw_addr_random(netdev); 2952 dev_warn(priv->dev, "using random MAC address %pM\n", 2953 netdev->dev_addr); 2954 } 2955 2956 if (h->ae_algo->ops->set_mac_addr) 2957 h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr, true); 2958 2959 } 2960 2961 static void hns3_nic_set_priv_ops(struct net_device *netdev) 2962 { 2963 struct hns3_nic_priv *priv = netdev_priv(netdev); 2964 2965 if ((netdev->features & NETIF_F_TSO) || 2966 (netdev->features & NETIF_F_TSO6)) { 2967 priv->ops.fill_desc = hns3_fill_desc_tso; 2968 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso; 2969 } else { 2970 priv->ops.fill_desc = hns3_fill_desc; 2971 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx; 2972 } 2973 } 2974 2975 static int hns3_client_init(struct hnae3_handle *handle) 2976 { 2977 struct pci_dev *pdev = handle->pdev; 2978 struct hns3_nic_priv *priv; 2979 struct net_device *netdev; 2980 int ret; 2981 2982 netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv), 2983 hns3_get_max_available_channels(handle)); 2984 if (!netdev) 2985 return -ENOMEM; 2986 2987 priv = netdev_priv(netdev); 2988 priv->dev = &pdev->dev; 2989 priv->netdev = netdev; 2990 priv->ae_handle = handle; 2991 priv->ae_handle->reset_level = HNAE3_NONE_RESET; 2992 priv->ae_handle->last_reset_time = jiffies; 2993 priv->tx_timeout_count = 0; 2994 2995 handle->kinfo.netdev = netdev; 2996 handle->priv = (void *)priv; 2997 2998 hns3_init_mac_addr(netdev, true); 2999 3000 hns3_set_default_feature(netdev); 3001 3002 netdev->watchdog_timeo = HNS3_TX_TIMEOUT; 3003 netdev->priv_flags |= IFF_UNICAST_FLT; 3004 netdev->netdev_ops = &hns3_nic_netdev_ops; 3005 SET_NETDEV_DEV(netdev, &pdev->dev); 3006 hns3_ethtool_set_ops(netdev); 3007 hns3_nic_set_priv_ops(netdev); 3008 3009 /* Carrier off reporting is important to ethtool even BEFORE open */ 3010 netif_carrier_off(netdev); 3011 3012 ret = hns3_get_ring_config(priv); 3013 if (ret) { 3014 ret = -ENOMEM; 3015 goto out_get_ring_cfg; 3016 } 3017 3018 ret = hns3_nic_alloc_vector_data(priv); 3019 if (ret) { 3020 ret = -ENOMEM; 3021 goto out_alloc_vector_data; 3022 } 3023 3024 ret = hns3_nic_init_vector_data(priv); 3025 if (ret) { 3026 ret = -ENOMEM; 3027 goto out_init_vector_data; 3028 } 3029 3030 ret = hns3_init_all_ring(priv); 3031 if (ret) { 3032 ret = -ENOMEM; 3033 goto out_init_ring_data; 3034 } 3035 3036 ret = register_netdev(netdev); 3037 if (ret) { 3038 dev_err(priv->dev, "probe register netdev fail!\n"); 3039 goto out_reg_netdev_fail; 3040 } 3041 3042 hns3_dcbnl_setup(handle); 3043 3044 /* MTU range: (ETH_MIN_MTU(kernel default) - 9706) */ 3045 netdev->max_mtu = HNS3_MAX_MTU - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); 3046 3047 return ret; 3048 3049 out_reg_netdev_fail: 3050 out_init_ring_data: 3051 (void)hns3_nic_uninit_vector_data(priv); 3052 out_init_vector_data: 3053 hns3_nic_dealloc_vector_data(priv); 3054 out_alloc_vector_data: 3055 priv->ring_data = NULL; 3056 out_get_ring_cfg: 3057 priv->ae_handle = NULL; 3058 free_netdev(netdev); 3059 return ret; 3060 } 3061 3062 static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) 3063 { 3064 struct net_device *netdev = handle->kinfo.netdev; 3065 struct hns3_nic_priv *priv = netdev_priv(netdev); 3066 int ret; 3067 3068 if (netdev->reg_state != NETREG_UNINITIALIZED) 3069 unregister_netdev(netdev); 3070 3071 ret = hns3_nic_uninit_vector_data(priv); 3072 if (ret) 3073 netdev_err(netdev, "uninit vector error\n"); 3074 3075 ret = hns3_nic_dealloc_vector_data(priv); 3076 if (ret) 3077 netdev_err(netdev, "dealloc vector error\n"); 3078 3079 ret = hns3_uninit_all_ring(priv); 3080 if (ret) 3081 netdev_err(netdev, "uninit ring error\n"); 3082 3083 hns3_put_ring_config(priv); 3084 3085 priv->ring_data = NULL; 3086 3087 free_netdev(netdev); 3088 } 3089 3090 static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup) 3091 { 3092 struct net_device *netdev = handle->kinfo.netdev; 3093 3094 if (!netdev) 3095 return; 3096 3097 if (linkup) { 3098 netif_carrier_on(netdev); 3099 netif_tx_wake_all_queues(netdev); 3100 netdev_info(netdev, "link up\n"); 3101 } else { 3102 netif_carrier_off(netdev); 3103 netif_tx_stop_all_queues(netdev); 3104 netdev_info(netdev, "link down\n"); 3105 } 3106 } 3107 3108 static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc) 3109 { 3110 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 3111 struct net_device *ndev = kinfo->netdev; 3112 bool if_running; 3113 int ret; 3114 u8 i; 3115 3116 if (tc > HNAE3_MAX_TC) 3117 return -EINVAL; 3118 3119 if (!ndev) 3120 return -ENODEV; 3121 3122 if_running = netif_running(ndev); 3123 3124 ret = netdev_set_num_tc(ndev, tc); 3125 if (ret) 3126 return ret; 3127 3128 if (if_running) { 3129 (void)hns3_nic_net_stop(ndev); 3130 msleep(100); 3131 } 3132 3133 ret = (kinfo->dcb_ops && kinfo->dcb_ops->map_update) ? 3134 kinfo->dcb_ops->map_update(handle) : -EOPNOTSUPP; 3135 if (ret) 3136 goto err_out; 3137 3138 if (tc <= 1) { 3139 netdev_reset_tc(ndev); 3140 goto out; 3141 } 3142 3143 for (i = 0; i < HNAE3_MAX_TC; i++) { 3144 struct hnae3_tc_info *tc_info = &kinfo->tc_info[i]; 3145 3146 if (tc_info->enable) 3147 netdev_set_tc_queue(ndev, 3148 tc_info->tc, 3149 tc_info->tqp_count, 3150 tc_info->tqp_offset); 3151 } 3152 3153 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) { 3154 netdev_set_prio_tc_map(ndev, i, 3155 kinfo->prio_tc[i]); 3156 } 3157 3158 out: 3159 ret = hns3_nic_set_real_num_queue(ndev); 3160 3161 err_out: 3162 if (if_running) 3163 (void)hns3_nic_net_open(ndev); 3164 3165 return ret; 3166 } 3167 3168 static void hns3_recover_hw_addr(struct net_device *ndev) 3169 { 3170 struct netdev_hw_addr_list *list; 3171 struct netdev_hw_addr *ha, *tmp; 3172 3173 /* go through and sync uc_addr entries to the device */ 3174 list = &ndev->uc; 3175 list_for_each_entry_safe(ha, tmp, &list->list, list) 3176 hns3_nic_uc_sync(ndev, ha->addr); 3177 3178 /* go through and sync mc_addr entries to the device */ 3179 list = &ndev->mc; 3180 list_for_each_entry_safe(ha, tmp, &list->list, list) 3181 hns3_nic_mc_sync(ndev, ha->addr); 3182 } 3183 3184 static void hns3_clear_tx_ring(struct hns3_enet_ring *ring) 3185 { 3186 if (!HNAE3_IS_TX_RING(ring)) 3187 return; 3188 3189 while (ring->next_to_clean != ring->next_to_use) { 3190 hns3_free_buffer_detach(ring, ring->next_to_clean); 3191 ring_ptr_move_fw(ring, next_to_clean); 3192 } 3193 } 3194 3195 static void hns3_clear_rx_ring(struct hns3_enet_ring *ring) 3196 { 3197 if (HNAE3_IS_TX_RING(ring)) 3198 return; 3199 3200 while (ring->next_to_use != ring->next_to_clean) { 3201 /* When a buffer is not reused, it's memory has been 3202 * freed in hns3_handle_rx_bd or will be freed by 3203 * stack, so only need to unmap the buffer here. 3204 */ 3205 if (!ring->desc_cb[ring->next_to_use].reuse_flag) { 3206 hns3_unmap_buffer(ring, 3207 &ring->desc_cb[ring->next_to_use]); 3208 ring->desc_cb[ring->next_to_use].dma = 0; 3209 } 3210 3211 ring_ptr_move_fw(ring, next_to_use); 3212 } 3213 } 3214 3215 static void hns3_clear_all_ring(struct hnae3_handle *h) 3216 { 3217 struct net_device *ndev = h->kinfo.netdev; 3218 struct hns3_nic_priv *priv = netdev_priv(ndev); 3219 u32 i; 3220 3221 for (i = 0; i < h->kinfo.num_tqps; i++) { 3222 struct netdev_queue *dev_queue; 3223 struct hns3_enet_ring *ring; 3224 3225 ring = priv->ring_data[i].ring; 3226 hns3_clear_tx_ring(ring); 3227 dev_queue = netdev_get_tx_queue(ndev, 3228 priv->ring_data[i].queue_index); 3229 netdev_tx_reset_queue(dev_queue); 3230 3231 ring = priv->ring_data[i + h->kinfo.num_tqps].ring; 3232 hns3_clear_rx_ring(ring); 3233 } 3234 } 3235 3236 static int hns3_reset_notify_down_enet(struct hnae3_handle *handle) 3237 { 3238 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 3239 struct net_device *ndev = kinfo->netdev; 3240 3241 if (!netif_running(ndev)) 3242 return -EIO; 3243 3244 return hns3_nic_net_stop(ndev); 3245 } 3246 3247 static int hns3_reset_notify_up_enet(struct hnae3_handle *handle) 3248 { 3249 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 3250 int ret = 0; 3251 3252 if (netif_running(kinfo->netdev)) { 3253 ret = hns3_nic_net_up(kinfo->netdev); 3254 if (ret) { 3255 netdev_err(kinfo->netdev, 3256 "hns net up fail, ret=%d!\n", ret); 3257 return ret; 3258 } 3259 handle->last_reset_time = jiffies; 3260 } 3261 3262 return ret; 3263 } 3264 3265 static int hns3_reset_notify_init_enet(struct hnae3_handle *handle) 3266 { 3267 struct net_device *netdev = handle->kinfo.netdev; 3268 struct hns3_nic_priv *priv = netdev_priv(netdev); 3269 int ret; 3270 3271 hns3_init_mac_addr(netdev, false); 3272 hns3_nic_set_rx_mode(netdev); 3273 hns3_recover_hw_addr(netdev); 3274 3275 /* Hardware table is only clear when pf resets */ 3276 if (!(handle->flags & HNAE3_SUPPORT_VF)) 3277 hns3_restore_vlan(netdev); 3278 3279 /* Carrier off reporting is important to ethtool even BEFORE open */ 3280 netif_carrier_off(netdev); 3281 3282 ret = hns3_get_ring_config(priv); 3283 if (ret) 3284 return ret; 3285 3286 ret = hns3_nic_init_vector_data(priv); 3287 if (ret) 3288 return ret; 3289 3290 ret = hns3_init_all_ring(priv); 3291 if (ret) { 3292 hns3_nic_uninit_vector_data(priv); 3293 priv->ring_data = NULL; 3294 } 3295 3296 return ret; 3297 } 3298 3299 static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) 3300 { 3301 struct net_device *netdev = handle->kinfo.netdev; 3302 struct hns3_nic_priv *priv = netdev_priv(netdev); 3303 int ret; 3304 3305 hns3_clear_all_ring(handle); 3306 3307 ret = hns3_nic_uninit_vector_data(priv); 3308 if (ret) { 3309 netdev_err(netdev, "uninit vector error\n"); 3310 return ret; 3311 } 3312 3313 ret = hns3_uninit_all_ring(priv); 3314 if (ret) 3315 netdev_err(netdev, "uninit ring error\n"); 3316 3317 hns3_put_ring_config(priv); 3318 3319 priv->ring_data = NULL; 3320 3321 return ret; 3322 } 3323 3324 static int hns3_reset_notify(struct hnae3_handle *handle, 3325 enum hnae3_reset_notify_type type) 3326 { 3327 int ret = 0; 3328 3329 switch (type) { 3330 case HNAE3_UP_CLIENT: 3331 ret = hns3_reset_notify_up_enet(handle); 3332 break; 3333 case HNAE3_DOWN_CLIENT: 3334 ret = hns3_reset_notify_down_enet(handle); 3335 break; 3336 case HNAE3_INIT_CLIENT: 3337 ret = hns3_reset_notify_init_enet(handle); 3338 break; 3339 case HNAE3_UNINIT_CLIENT: 3340 ret = hns3_reset_notify_uninit_enet(handle); 3341 break; 3342 default: 3343 break; 3344 } 3345 3346 return ret; 3347 } 3348 3349 static void hns3_restore_coal(struct hns3_nic_priv *priv, 3350 struct hns3_enet_coalesce *tx, 3351 struct hns3_enet_coalesce *rx) 3352 { 3353 u16 vector_num = priv->vector_num; 3354 int i; 3355 3356 for (i = 0; i < vector_num; i++) { 3357 memcpy(&priv->tqp_vector[i].tx_group.coal, tx, 3358 sizeof(struct hns3_enet_coalesce)); 3359 memcpy(&priv->tqp_vector[i].rx_group.coal, rx, 3360 sizeof(struct hns3_enet_coalesce)); 3361 } 3362 } 3363 3364 static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num, 3365 struct hns3_enet_coalesce *tx, 3366 struct hns3_enet_coalesce *rx) 3367 { 3368 struct hns3_nic_priv *priv = netdev_priv(netdev); 3369 struct hnae3_handle *h = hns3_get_handle(netdev); 3370 int ret; 3371 3372 ret = h->ae_algo->ops->set_channels(h, new_tqp_num); 3373 if (ret) 3374 return ret; 3375 3376 ret = hns3_get_ring_config(priv); 3377 if (ret) 3378 return ret; 3379 3380 ret = hns3_nic_alloc_vector_data(priv); 3381 if (ret) 3382 goto err_alloc_vector; 3383 3384 hns3_restore_coal(priv, tx, rx); 3385 3386 ret = hns3_nic_init_vector_data(priv); 3387 if (ret) 3388 goto err_uninit_vector; 3389 3390 ret = hns3_init_all_ring(priv); 3391 if (ret) 3392 goto err_put_ring; 3393 3394 return 0; 3395 3396 err_put_ring: 3397 hns3_put_ring_config(priv); 3398 err_uninit_vector: 3399 hns3_nic_uninit_vector_data(priv); 3400 err_alloc_vector: 3401 hns3_nic_dealloc_vector_data(priv); 3402 return ret; 3403 } 3404 3405 static int hns3_adjust_tqps_num(u8 num_tc, u32 new_tqp_num) 3406 { 3407 return (new_tqp_num / num_tc) * num_tc; 3408 } 3409 3410 int hns3_set_channels(struct net_device *netdev, 3411 struct ethtool_channels *ch) 3412 { 3413 struct hns3_nic_priv *priv = netdev_priv(netdev); 3414 struct hnae3_handle *h = hns3_get_handle(netdev); 3415 struct hnae3_knic_private_info *kinfo = &h->kinfo; 3416 struct hns3_enet_coalesce tx_coal, rx_coal; 3417 bool if_running = netif_running(netdev); 3418 u32 new_tqp_num = ch->combined_count; 3419 u16 org_tqp_num; 3420 int ret; 3421 3422 if (ch->rx_count || ch->tx_count) 3423 return -EINVAL; 3424 3425 if (new_tqp_num > hns3_get_max_available_channels(h) || 3426 new_tqp_num < kinfo->num_tc) { 3427 dev_err(&netdev->dev, 3428 "Change tqps fail, the tqp range is from %d to %d", 3429 kinfo->num_tc, 3430 hns3_get_max_available_channels(h)); 3431 return -EINVAL; 3432 } 3433 3434 new_tqp_num = hns3_adjust_tqps_num(kinfo->num_tc, new_tqp_num); 3435 if (kinfo->num_tqps == new_tqp_num) 3436 return 0; 3437 3438 if (if_running) 3439 hns3_nic_net_stop(netdev); 3440 3441 hns3_clear_all_ring(h); 3442 3443 ret = hns3_nic_uninit_vector_data(priv); 3444 if (ret) { 3445 dev_err(&netdev->dev, 3446 "Unbind vector with tqp fail, nothing is changed"); 3447 goto open_netdev; 3448 } 3449 3450 /* Changing the tqp num may also change the vector num, 3451 * ethtool only support setting and querying one coal 3452 * configuation for now, so save the vector 0' coal 3453 * configuation here in order to restore it. 3454 */ 3455 memcpy(&tx_coal, &priv->tqp_vector[0].tx_group.coal, 3456 sizeof(struct hns3_enet_coalesce)); 3457 memcpy(&rx_coal, &priv->tqp_vector[0].rx_group.coal, 3458 sizeof(struct hns3_enet_coalesce)); 3459 3460 hns3_nic_dealloc_vector_data(priv); 3461 3462 hns3_uninit_all_ring(priv); 3463 hns3_put_ring_config(priv); 3464 3465 org_tqp_num = h->kinfo.num_tqps; 3466 ret = hns3_modify_tqp_num(netdev, new_tqp_num, &tx_coal, &rx_coal); 3467 if (ret) { 3468 ret = hns3_modify_tqp_num(netdev, org_tqp_num, 3469 &tx_coal, &rx_coal); 3470 if (ret) { 3471 /* If revert to old tqp failed, fatal error occurred */ 3472 dev_err(&netdev->dev, 3473 "Revert to old tqp num fail, ret=%d", ret); 3474 return ret; 3475 } 3476 dev_info(&netdev->dev, 3477 "Change tqp num fail, Revert to old tqp num"); 3478 } 3479 3480 open_netdev: 3481 if (if_running) 3482 hns3_nic_net_open(netdev); 3483 3484 return ret; 3485 } 3486 3487 static const struct hnae3_client_ops client_ops = { 3488 .init_instance = hns3_client_init, 3489 .uninit_instance = hns3_client_uninit, 3490 .link_status_change = hns3_link_status_change, 3491 .setup_tc = hns3_client_setup_tc, 3492 .reset_notify = hns3_reset_notify, 3493 }; 3494 3495 /* hns3_init_module - Driver registration routine 3496 * hns3_init_module is the first routine called when the driver is 3497 * loaded. All it does is register with the PCI subsystem. 3498 */ 3499 static int __init hns3_init_module(void) 3500 { 3501 int ret; 3502 3503 pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string); 3504 pr_info("%s: %s\n", hns3_driver_name, hns3_copyright); 3505 3506 client.type = HNAE3_CLIENT_KNIC; 3507 snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH - 1, "%s", 3508 hns3_driver_name); 3509 3510 client.ops = &client_ops; 3511 3512 INIT_LIST_HEAD(&client.node); 3513 3514 ret = hnae3_register_client(&client); 3515 if (ret) 3516 return ret; 3517 3518 ret = pci_register_driver(&hns3_driver); 3519 if (ret) 3520 hnae3_unregister_client(&client); 3521 3522 return ret; 3523 } 3524 module_init(hns3_init_module); 3525 3526 /* hns3_exit_module - Driver exit cleanup routine 3527 * hns3_exit_module is called just before the driver is removed 3528 * from memory. 3529 */ 3530 static void __exit hns3_exit_module(void) 3531 { 3532 pci_unregister_driver(&hns3_driver); 3533 hnae3_unregister_client(&client); 3534 } 3535 module_exit(hns3_exit_module); 3536 3537 MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver"); 3538 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 3539 MODULE_LICENSE("GPL"); 3540 MODULE_ALIAS("pci:hns-nic"); 3541 MODULE_VERSION(HNS3_MOD_VERSION); 3542