1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/dma-mapping.h> 5 #include <linux/etherdevice.h> 6 #include <linux/interrupt.h> 7 #ifdef CONFIG_RFS_ACCEL 8 #include <linux/cpu_rmap.h> 9 #endif 10 #include <linux/if_vlan.h> 11 #include <linux/irq.h> 12 #include <linux/ip.h> 13 #include <linux/ipv6.h> 14 #include <linux/module.h> 15 #include <linux/pci.h> 16 #include <linux/aer.h> 17 #include <linux/skbuff.h> 18 #include <linux/sctp.h> 19 #include <net/gre.h> 20 #include <net/ip6_checksum.h> 21 #include <net/pkt_cls.h> 22 #include <net/tcp.h> 23 #include <net/vxlan.h> 24 25 #include "hnae3.h" 26 #include "hns3_enet.h" 27 /* All hns3 tracepoints are defined by the include below, which 28 * must be included exactly once across the whole kernel with 29 * CREATE_TRACE_POINTS defined 30 */ 31 #define CREATE_TRACE_POINTS 32 #include "hns3_trace.h" 33 34 #define hns3_set_field(origin, shift, val) ((origin) |= ((val) << (shift))) 35 #define hns3_tx_bd_count(S) DIV_ROUND_UP(S, HNS3_MAX_BD_SIZE) 36 37 #define hns3_rl_err(fmt, ...) \ 38 do { \ 39 if (net_ratelimit()) \ 40 netdev_err(fmt, ##__VA_ARGS__); \ 41 } while (0) 42 43 static void hns3_clear_all_ring(struct hnae3_handle *h, bool force); 44 45 static const char hns3_driver_name[] = "hns3"; 46 static const char hns3_driver_string[] = 47 "Hisilicon Ethernet Network Driver for Hip08 Family"; 48 static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation."; 49 static struct hnae3_client client; 50 51 static int debug = -1; 52 module_param(debug, int, 0); 53 MODULE_PARM_DESC(debug, " Network interface message level setting"); 54 55 #define DEFAULT_MSG_LEVEL (NETIF_MSG_PROBE | NETIF_MSG_LINK | \ 56 NETIF_MSG_IFDOWN | NETIF_MSG_IFUP) 57 58 #define HNS3_INNER_VLAN_TAG 1 59 #define HNS3_OUTER_VLAN_TAG 2 60 61 #define HNS3_MIN_TX_LEN 33U 62 63 /* hns3_pci_tbl - PCI Device ID Table 64 * 65 * Last entry must be all 0s 66 * 67 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 68 * Class, Class Mask, private data (not used) } 69 */ 70 static const struct pci_device_id hns3_pci_tbl[] = { 71 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0}, 72 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0}, 73 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 74 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 75 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 76 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 77 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 78 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 79 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 80 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 81 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 82 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 83 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0}, 84 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 85 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 86 /* required last entry */ 87 {0, } 88 }; 89 MODULE_DEVICE_TABLE(pci, hns3_pci_tbl); 90 91 static irqreturn_t hns3_irq_handle(int irq, void *vector) 92 { 93 struct hns3_enet_tqp_vector *tqp_vector = vector; 94 95 napi_schedule_irqoff(&tqp_vector->napi); 96 97 return IRQ_HANDLED; 98 } 99 100 static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv) 101 { 102 struct hns3_enet_tqp_vector *tqp_vectors; 103 unsigned int i; 104 105 for (i = 0; i < priv->vector_num; i++) { 106 tqp_vectors = &priv->tqp_vector[i]; 107 108 if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED) 109 continue; 110 111 /* clear the affinity mask */ 112 irq_set_affinity_hint(tqp_vectors->vector_irq, NULL); 113 114 /* release the irq resource */ 115 free_irq(tqp_vectors->vector_irq, tqp_vectors); 116 tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED; 117 } 118 } 119 120 static int hns3_nic_init_irq(struct hns3_nic_priv *priv) 121 { 122 struct hns3_enet_tqp_vector *tqp_vectors; 123 int txrx_int_idx = 0; 124 int rx_int_idx = 0; 125 int tx_int_idx = 0; 126 unsigned int i; 127 int ret; 128 129 for (i = 0; i < priv->vector_num; i++) { 130 tqp_vectors = &priv->tqp_vector[i]; 131 132 if (tqp_vectors->irq_init_flag == HNS3_VECTOR_INITED) 133 continue; 134 135 if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) { 136 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN, 137 "%s-%s-%s-%d", hns3_driver_name, 138 pci_name(priv->ae_handle->pdev), 139 "TxRx", txrx_int_idx++); 140 txrx_int_idx++; 141 } else if (tqp_vectors->rx_group.ring) { 142 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN, 143 "%s-%s-%s-%d", hns3_driver_name, 144 pci_name(priv->ae_handle->pdev), 145 "Rx", rx_int_idx++); 146 } else if (tqp_vectors->tx_group.ring) { 147 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN, 148 "%s-%s-%s-%d", hns3_driver_name, 149 pci_name(priv->ae_handle->pdev), 150 "Tx", tx_int_idx++); 151 } else { 152 /* Skip this unused q_vector */ 153 continue; 154 } 155 156 tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0'; 157 158 irq_set_status_flags(tqp_vectors->vector_irq, IRQ_NOAUTOEN); 159 ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0, 160 tqp_vectors->name, tqp_vectors); 161 if (ret) { 162 netdev_err(priv->netdev, "request irq(%d) fail\n", 163 tqp_vectors->vector_irq); 164 hns3_nic_uninit_irq(priv); 165 return ret; 166 } 167 168 irq_set_affinity_hint(tqp_vectors->vector_irq, 169 &tqp_vectors->affinity_mask); 170 171 tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED; 172 } 173 174 return 0; 175 } 176 177 static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector, 178 u32 mask_en) 179 { 180 writel(mask_en, tqp_vector->mask_addr); 181 } 182 183 static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector) 184 { 185 napi_enable(&tqp_vector->napi); 186 enable_irq(tqp_vector->vector_irq); 187 188 /* enable vector */ 189 hns3_mask_vector_irq(tqp_vector, 1); 190 } 191 192 static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector) 193 { 194 /* disable vector */ 195 hns3_mask_vector_irq(tqp_vector, 0); 196 197 disable_irq(tqp_vector->vector_irq); 198 napi_disable(&tqp_vector->napi); 199 } 200 201 void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector, 202 u32 rl_value) 203 { 204 u32 rl_reg = hns3_rl_usec_to_reg(rl_value); 205 206 /* this defines the configuration for RL (Interrupt Rate Limiter). 207 * Rl defines rate of interrupts i.e. number of interrupts-per-second 208 * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing 209 */ 210 211 if (rl_reg > 0 && !tqp_vector->tx_group.coal.gl_adapt_enable && 212 !tqp_vector->rx_group.coal.gl_adapt_enable) 213 /* According to the hardware, the range of rl_reg is 214 * 0-59 and the unit is 4. 215 */ 216 rl_reg |= HNS3_INT_RL_ENABLE_MASK; 217 218 writel(rl_reg, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET); 219 } 220 221 void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector, 222 u32 gl_value) 223 { 224 u32 rx_gl_reg = hns3_gl_usec_to_reg(gl_value); 225 226 writel(rx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET); 227 } 228 229 void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector, 230 u32 gl_value) 231 { 232 u32 tx_gl_reg = hns3_gl_usec_to_reg(gl_value); 233 234 writel(tx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET); 235 } 236 237 static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector, 238 struct hns3_nic_priv *priv) 239 { 240 /* initialize the configuration for interrupt coalescing. 241 * 1. GL (Interrupt Gap Limiter) 242 * 2. RL (Interrupt Rate Limiter) 243 * 244 * Default: enable interrupt coalescing self-adaptive and GL 245 */ 246 tqp_vector->tx_group.coal.gl_adapt_enable = 1; 247 tqp_vector->rx_group.coal.gl_adapt_enable = 1; 248 249 tqp_vector->tx_group.coal.int_gl = HNS3_INT_GL_50K; 250 tqp_vector->rx_group.coal.int_gl = HNS3_INT_GL_50K; 251 252 tqp_vector->rx_group.coal.flow_level = HNS3_FLOW_LOW; 253 tqp_vector->tx_group.coal.flow_level = HNS3_FLOW_LOW; 254 } 255 256 static void hns3_vector_gl_rl_init_hw(struct hns3_enet_tqp_vector *tqp_vector, 257 struct hns3_nic_priv *priv) 258 { 259 struct hnae3_handle *h = priv->ae_handle; 260 261 hns3_set_vector_coalesce_tx_gl(tqp_vector, 262 tqp_vector->tx_group.coal.int_gl); 263 hns3_set_vector_coalesce_rx_gl(tqp_vector, 264 tqp_vector->rx_group.coal.int_gl); 265 hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting); 266 } 267 268 static int hns3_nic_set_real_num_queue(struct net_device *netdev) 269 { 270 struct hnae3_handle *h = hns3_get_handle(netdev); 271 struct hnae3_knic_private_info *kinfo = &h->kinfo; 272 unsigned int queue_size = kinfo->rss_size * kinfo->num_tc; 273 int i, ret; 274 275 if (kinfo->num_tc <= 1) { 276 netdev_reset_tc(netdev); 277 } else { 278 ret = netdev_set_num_tc(netdev, kinfo->num_tc); 279 if (ret) { 280 netdev_err(netdev, 281 "netdev_set_num_tc fail, ret=%d!\n", ret); 282 return ret; 283 } 284 285 for (i = 0; i < HNAE3_MAX_TC; i++) { 286 if (!kinfo->tc_info[i].enable) 287 continue; 288 289 netdev_set_tc_queue(netdev, 290 kinfo->tc_info[i].tc, 291 kinfo->tc_info[i].tqp_count, 292 kinfo->tc_info[i].tqp_offset); 293 } 294 } 295 296 ret = netif_set_real_num_tx_queues(netdev, queue_size); 297 if (ret) { 298 netdev_err(netdev, 299 "netif_set_real_num_tx_queues fail, ret=%d!\n", ret); 300 return ret; 301 } 302 303 ret = netif_set_real_num_rx_queues(netdev, queue_size); 304 if (ret) { 305 netdev_err(netdev, 306 "netif_set_real_num_rx_queues fail, ret=%d!\n", ret); 307 return ret; 308 } 309 310 return 0; 311 } 312 313 static u16 hns3_get_max_available_channels(struct hnae3_handle *h) 314 { 315 u16 alloc_tqps, max_rss_size, rss_size; 316 317 h->ae_algo->ops->get_tqps_and_rss_info(h, &alloc_tqps, &max_rss_size); 318 rss_size = alloc_tqps / h->kinfo.num_tc; 319 320 return min_t(u16, rss_size, max_rss_size); 321 } 322 323 static void hns3_tqp_enable(struct hnae3_queue *tqp) 324 { 325 u32 rcb_reg; 326 327 rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG); 328 rcb_reg |= BIT(HNS3_RING_EN_B); 329 hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg); 330 } 331 332 static void hns3_tqp_disable(struct hnae3_queue *tqp) 333 { 334 u32 rcb_reg; 335 336 rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG); 337 rcb_reg &= ~BIT(HNS3_RING_EN_B); 338 hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg); 339 } 340 341 static void hns3_free_rx_cpu_rmap(struct net_device *netdev) 342 { 343 #ifdef CONFIG_RFS_ACCEL 344 free_irq_cpu_rmap(netdev->rx_cpu_rmap); 345 netdev->rx_cpu_rmap = NULL; 346 #endif 347 } 348 349 static int hns3_set_rx_cpu_rmap(struct net_device *netdev) 350 { 351 #ifdef CONFIG_RFS_ACCEL 352 struct hns3_nic_priv *priv = netdev_priv(netdev); 353 struct hns3_enet_tqp_vector *tqp_vector; 354 int i, ret; 355 356 if (!netdev->rx_cpu_rmap) { 357 netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->vector_num); 358 if (!netdev->rx_cpu_rmap) 359 return -ENOMEM; 360 } 361 362 for (i = 0; i < priv->vector_num; i++) { 363 tqp_vector = &priv->tqp_vector[i]; 364 ret = irq_cpu_rmap_add(netdev->rx_cpu_rmap, 365 tqp_vector->vector_irq); 366 if (ret) { 367 hns3_free_rx_cpu_rmap(netdev); 368 return ret; 369 } 370 } 371 #endif 372 return 0; 373 } 374 375 static int hns3_nic_net_up(struct net_device *netdev) 376 { 377 struct hns3_nic_priv *priv = netdev_priv(netdev); 378 struct hnae3_handle *h = priv->ae_handle; 379 int i, j; 380 int ret; 381 382 ret = hns3_nic_reset_all_ring(h); 383 if (ret) 384 return ret; 385 386 clear_bit(HNS3_NIC_STATE_DOWN, &priv->state); 387 388 /* enable the vectors */ 389 for (i = 0; i < priv->vector_num; i++) 390 hns3_vector_enable(&priv->tqp_vector[i]); 391 392 /* enable rcb */ 393 for (j = 0; j < h->kinfo.num_tqps; j++) 394 hns3_tqp_enable(h->kinfo.tqp[j]); 395 396 /* start the ae_dev */ 397 ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0; 398 if (ret) { 399 set_bit(HNS3_NIC_STATE_DOWN, &priv->state); 400 while (j--) 401 hns3_tqp_disable(h->kinfo.tqp[j]); 402 403 for (j = i - 1; j >= 0; j--) 404 hns3_vector_disable(&priv->tqp_vector[j]); 405 } 406 407 return ret; 408 } 409 410 static void hns3_config_xps(struct hns3_nic_priv *priv) 411 { 412 int i; 413 414 for (i = 0; i < priv->vector_num; i++) { 415 struct hns3_enet_tqp_vector *tqp_vector = &priv->tqp_vector[i]; 416 struct hns3_enet_ring *ring = tqp_vector->tx_group.ring; 417 418 while (ring) { 419 int ret; 420 421 ret = netif_set_xps_queue(priv->netdev, 422 &tqp_vector->affinity_mask, 423 ring->tqp->tqp_index); 424 if (ret) 425 netdev_warn(priv->netdev, 426 "set xps queue failed: %d", ret); 427 428 ring = ring->next; 429 } 430 } 431 } 432 433 static int hns3_nic_net_open(struct net_device *netdev) 434 { 435 struct hns3_nic_priv *priv = netdev_priv(netdev); 436 struct hnae3_handle *h = hns3_get_handle(netdev); 437 struct hnae3_knic_private_info *kinfo; 438 int i, ret; 439 440 if (hns3_nic_resetting(netdev)) 441 return -EBUSY; 442 443 netif_carrier_off(netdev); 444 445 ret = hns3_nic_set_real_num_queue(netdev); 446 if (ret) 447 return ret; 448 449 ret = hns3_nic_net_up(netdev); 450 if (ret) { 451 netdev_err(netdev, "net up fail, ret=%d!\n", ret); 452 return ret; 453 } 454 455 kinfo = &h->kinfo; 456 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) 457 netdev_set_prio_tc_map(netdev, i, kinfo->prio_tc[i]); 458 459 if (h->ae_algo->ops->set_timer_task) 460 h->ae_algo->ops->set_timer_task(priv->ae_handle, true); 461 462 hns3_config_xps(priv); 463 464 netif_dbg(h, drv, netdev, "net open\n"); 465 466 return 0; 467 } 468 469 static void hns3_reset_tx_queue(struct hnae3_handle *h) 470 { 471 struct net_device *ndev = h->kinfo.netdev; 472 struct hns3_nic_priv *priv = netdev_priv(ndev); 473 struct netdev_queue *dev_queue; 474 u32 i; 475 476 for (i = 0; i < h->kinfo.num_tqps; i++) { 477 dev_queue = netdev_get_tx_queue(ndev, 478 priv->ring[i].queue_index); 479 netdev_tx_reset_queue(dev_queue); 480 } 481 } 482 483 static void hns3_nic_net_down(struct net_device *netdev) 484 { 485 struct hns3_nic_priv *priv = netdev_priv(netdev); 486 struct hnae3_handle *h = hns3_get_handle(netdev); 487 const struct hnae3_ae_ops *ops; 488 int i; 489 490 /* disable vectors */ 491 for (i = 0; i < priv->vector_num; i++) 492 hns3_vector_disable(&priv->tqp_vector[i]); 493 494 /* disable rcb */ 495 for (i = 0; i < h->kinfo.num_tqps; i++) 496 hns3_tqp_disable(h->kinfo.tqp[i]); 497 498 /* stop ae_dev */ 499 ops = priv->ae_handle->ae_algo->ops; 500 if (ops->stop) 501 ops->stop(priv->ae_handle); 502 503 /* delay ring buffer clearing to hns3_reset_notify_uninit_enet 504 * during reset process, because driver may not be able 505 * to disable the ring through firmware when downing the netdev. 506 */ 507 if (!hns3_nic_resetting(netdev)) 508 hns3_clear_all_ring(priv->ae_handle, false); 509 510 hns3_reset_tx_queue(priv->ae_handle); 511 } 512 513 static int hns3_nic_net_stop(struct net_device *netdev) 514 { 515 struct hns3_nic_priv *priv = netdev_priv(netdev); 516 struct hnae3_handle *h = hns3_get_handle(netdev); 517 518 if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state)) 519 return 0; 520 521 netif_dbg(h, drv, netdev, "net stop\n"); 522 523 if (h->ae_algo->ops->set_timer_task) 524 h->ae_algo->ops->set_timer_task(priv->ae_handle, false); 525 526 netif_tx_stop_all_queues(netdev); 527 netif_carrier_off(netdev); 528 529 hns3_nic_net_down(netdev); 530 531 return 0; 532 } 533 534 static int hns3_nic_uc_sync(struct net_device *netdev, 535 const unsigned char *addr) 536 { 537 struct hnae3_handle *h = hns3_get_handle(netdev); 538 539 if (h->ae_algo->ops->add_uc_addr) 540 return h->ae_algo->ops->add_uc_addr(h, addr); 541 542 return 0; 543 } 544 545 static int hns3_nic_uc_unsync(struct net_device *netdev, 546 const unsigned char *addr) 547 { 548 struct hnae3_handle *h = hns3_get_handle(netdev); 549 550 /* need ignore the request of removing device address, because 551 * we store the device address and other addresses of uc list 552 * in the function's mac filter list. 553 */ 554 if (ether_addr_equal(addr, netdev->dev_addr)) 555 return 0; 556 557 if (h->ae_algo->ops->rm_uc_addr) 558 return h->ae_algo->ops->rm_uc_addr(h, addr); 559 560 return 0; 561 } 562 563 static int hns3_nic_mc_sync(struct net_device *netdev, 564 const unsigned char *addr) 565 { 566 struct hnae3_handle *h = hns3_get_handle(netdev); 567 568 if (h->ae_algo->ops->add_mc_addr) 569 return h->ae_algo->ops->add_mc_addr(h, addr); 570 571 return 0; 572 } 573 574 static int hns3_nic_mc_unsync(struct net_device *netdev, 575 const unsigned char *addr) 576 { 577 struct hnae3_handle *h = hns3_get_handle(netdev); 578 579 if (h->ae_algo->ops->rm_mc_addr) 580 return h->ae_algo->ops->rm_mc_addr(h, addr); 581 582 return 0; 583 } 584 585 static u8 hns3_get_netdev_flags(struct net_device *netdev) 586 { 587 u8 flags = 0; 588 589 if (netdev->flags & IFF_PROMISC) { 590 flags = HNAE3_USER_UPE | HNAE3_USER_MPE | HNAE3_BPE; 591 } else { 592 flags |= HNAE3_VLAN_FLTR; 593 if (netdev->flags & IFF_ALLMULTI) 594 flags |= HNAE3_USER_MPE; 595 } 596 597 return flags; 598 } 599 600 static void hns3_nic_set_rx_mode(struct net_device *netdev) 601 { 602 struct hnae3_handle *h = hns3_get_handle(netdev); 603 u8 new_flags; 604 605 new_flags = hns3_get_netdev_flags(netdev); 606 607 __dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync); 608 __dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync); 609 610 /* User mode Promisc mode enable and vlan filtering is disabled to 611 * let all packets in. 612 */ 613 h->netdev_flags = new_flags; 614 hns3_request_update_promisc_mode(h); 615 } 616 617 void hns3_request_update_promisc_mode(struct hnae3_handle *handle) 618 { 619 const struct hnae3_ae_ops *ops = handle->ae_algo->ops; 620 621 if (ops->request_update_promisc_mode) 622 ops->request_update_promisc_mode(handle); 623 } 624 625 int hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags) 626 { 627 struct hns3_nic_priv *priv = netdev_priv(netdev); 628 struct hnae3_handle *h = priv->ae_handle; 629 630 if (h->ae_algo->ops->set_promisc_mode) { 631 return h->ae_algo->ops->set_promisc_mode(h, 632 promisc_flags & HNAE3_UPE, 633 promisc_flags & HNAE3_MPE); 634 } 635 636 return 0; 637 } 638 639 void hns3_enable_vlan_filter(struct net_device *netdev, bool enable) 640 { 641 struct hns3_nic_priv *priv = netdev_priv(netdev); 642 struct hnae3_handle *h = priv->ae_handle; 643 bool last_state; 644 645 if (h->pdev->revision >= 0x21 && h->ae_algo->ops->enable_vlan_filter) { 646 last_state = h->netdev_flags & HNAE3_VLAN_FLTR ? true : false; 647 if (enable != last_state) { 648 netdev_info(netdev, 649 "%s vlan filter\n", 650 enable ? "enable" : "disable"); 651 h->ae_algo->ops->enable_vlan_filter(h, enable); 652 } 653 } 654 } 655 656 static int hns3_set_tso(struct sk_buff *skb, u32 *paylen, 657 u16 *mss, u32 *type_cs_vlan_tso) 658 { 659 u32 l4_offset, hdr_len; 660 union l3_hdr_info l3; 661 union l4_hdr_info l4; 662 u32 l4_paylen; 663 int ret; 664 665 if (!skb_is_gso(skb)) 666 return 0; 667 668 ret = skb_cow_head(skb, 0); 669 if (unlikely(ret < 0)) 670 return ret; 671 672 l3.hdr = skb_network_header(skb); 673 l4.hdr = skb_transport_header(skb); 674 675 /* Software should clear the IPv4's checksum field when tso is 676 * needed. 677 */ 678 if (l3.v4->version == 4) 679 l3.v4->check = 0; 680 681 /* tunnel packet */ 682 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | 683 SKB_GSO_GRE_CSUM | 684 SKB_GSO_UDP_TUNNEL | 685 SKB_GSO_UDP_TUNNEL_CSUM)) { 686 if ((!(skb_shinfo(skb)->gso_type & 687 SKB_GSO_PARTIAL)) && 688 (skb_shinfo(skb)->gso_type & 689 SKB_GSO_UDP_TUNNEL_CSUM)) { 690 /* Software should clear the udp's checksum 691 * field when tso is needed. 692 */ 693 l4.udp->check = 0; 694 } 695 /* reset l3&l4 pointers from outer to inner headers */ 696 l3.hdr = skb_inner_network_header(skb); 697 l4.hdr = skb_inner_transport_header(skb); 698 699 /* Software should clear the IPv4's checksum field when 700 * tso is needed. 701 */ 702 if (l3.v4->version == 4) 703 l3.v4->check = 0; 704 } 705 706 /* normal or tunnel packet */ 707 l4_offset = l4.hdr - skb->data; 708 hdr_len = (l4.tcp->doff << 2) + l4_offset; 709 710 /* remove payload length from inner pseudo checksum when tso */ 711 l4_paylen = skb->len - l4_offset; 712 csum_replace_by_diff(&l4.tcp->check, 713 (__force __wsum)htonl(l4_paylen)); 714 715 /* find the txbd field values */ 716 *paylen = skb->len - hdr_len; 717 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_TSO_B, 1); 718 719 /* get MSS for TSO */ 720 *mss = skb_shinfo(skb)->gso_size; 721 722 trace_hns3_tso(skb); 723 724 return 0; 725 } 726 727 static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto, 728 u8 *il4_proto) 729 { 730 union l3_hdr_info l3; 731 unsigned char *l4_hdr; 732 unsigned char *exthdr; 733 u8 l4_proto_tmp; 734 __be16 frag_off; 735 736 /* find outer header point */ 737 l3.hdr = skb_network_header(skb); 738 l4_hdr = skb_transport_header(skb); 739 740 if (skb->protocol == htons(ETH_P_IPV6)) { 741 exthdr = l3.hdr + sizeof(*l3.v6); 742 l4_proto_tmp = l3.v6->nexthdr; 743 if (l4_hdr != exthdr) 744 ipv6_skip_exthdr(skb, exthdr - skb->data, 745 &l4_proto_tmp, &frag_off); 746 } else if (skb->protocol == htons(ETH_P_IP)) { 747 l4_proto_tmp = l3.v4->protocol; 748 } else { 749 return -EINVAL; 750 } 751 752 *ol4_proto = l4_proto_tmp; 753 754 /* tunnel packet */ 755 if (!skb->encapsulation) { 756 *il4_proto = 0; 757 return 0; 758 } 759 760 /* find inner header point */ 761 l3.hdr = skb_inner_network_header(skb); 762 l4_hdr = skb_inner_transport_header(skb); 763 764 if (l3.v6->version == 6) { 765 exthdr = l3.hdr + sizeof(*l3.v6); 766 l4_proto_tmp = l3.v6->nexthdr; 767 if (l4_hdr != exthdr) 768 ipv6_skip_exthdr(skb, exthdr - skb->data, 769 &l4_proto_tmp, &frag_off); 770 } else if (l3.v4->version == 4) { 771 l4_proto_tmp = l3.v4->protocol; 772 } 773 774 *il4_proto = l4_proto_tmp; 775 776 return 0; 777 } 778 779 /* when skb->encapsulation is 0, skb->ip_summed is CHECKSUM_PARTIAL 780 * and it is udp packet, which has a dest port as the IANA assigned. 781 * the hardware is expected to do the checksum offload, but the 782 * hardware will not do the checksum offload when udp dest port is 783 * 4789. 784 */ 785 static bool hns3_tunnel_csum_bug(struct sk_buff *skb) 786 { 787 union l4_hdr_info l4; 788 789 l4.hdr = skb_transport_header(skb); 790 791 if (!(!skb->encapsulation && 792 l4.udp->dest == htons(IANA_VXLAN_UDP_PORT))) 793 return false; 794 795 skb_checksum_help(skb); 796 797 return true; 798 } 799 800 static void hns3_set_outer_l2l3l4(struct sk_buff *skb, u8 ol4_proto, 801 u32 *ol_type_vlan_len_msec) 802 { 803 u32 l2_len, l3_len, l4_len; 804 unsigned char *il2_hdr; 805 union l3_hdr_info l3; 806 union l4_hdr_info l4; 807 808 l3.hdr = skb_network_header(skb); 809 l4.hdr = skb_transport_header(skb); 810 811 /* compute OL2 header size, defined in 2 Bytes */ 812 l2_len = l3.hdr - skb->data; 813 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L2LEN_S, l2_len >> 1); 814 815 /* compute OL3 header size, defined in 4 Bytes */ 816 l3_len = l4.hdr - l3.hdr; 817 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_S, l3_len >> 2); 818 819 il2_hdr = skb_inner_mac_header(skb); 820 /* compute OL4 header size, defined in 4 Bytes */ 821 l4_len = il2_hdr - l4.hdr; 822 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_S, l4_len >> 2); 823 824 /* define outer network header type */ 825 if (skb->protocol == htons(ETH_P_IP)) { 826 if (skb_is_gso(skb)) 827 hns3_set_field(*ol_type_vlan_len_msec, 828 HNS3_TXD_OL3T_S, 829 HNS3_OL3T_IPV4_CSUM); 830 else 831 hns3_set_field(*ol_type_vlan_len_msec, 832 HNS3_TXD_OL3T_S, 833 HNS3_OL3T_IPV4_NO_CSUM); 834 835 } else if (skb->protocol == htons(ETH_P_IPV6)) { 836 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_S, 837 HNS3_OL3T_IPV6); 838 } 839 840 if (ol4_proto == IPPROTO_UDP) 841 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_TUNTYPE_S, 842 HNS3_TUN_MAC_IN_UDP); 843 else if (ol4_proto == IPPROTO_GRE) 844 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_TUNTYPE_S, 845 HNS3_TUN_NVGRE); 846 } 847 848 static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto, 849 u8 il4_proto, u32 *type_cs_vlan_tso, 850 u32 *ol_type_vlan_len_msec) 851 { 852 unsigned char *l2_hdr = skb->data; 853 u32 l4_proto = ol4_proto; 854 union l4_hdr_info l4; 855 union l3_hdr_info l3; 856 u32 l2_len, l3_len; 857 858 l4.hdr = skb_transport_header(skb); 859 l3.hdr = skb_network_header(skb); 860 861 /* handle encapsulation skb */ 862 if (skb->encapsulation) { 863 /* If this is a not UDP/GRE encapsulation skb */ 864 if (!(ol4_proto == IPPROTO_UDP || ol4_proto == IPPROTO_GRE)) { 865 /* drop the skb tunnel packet if hardware don't support, 866 * because hardware can't calculate csum when TSO. 867 */ 868 if (skb_is_gso(skb)) 869 return -EDOM; 870 871 /* the stack computes the IP header already, 872 * driver calculate l4 checksum when not TSO. 873 */ 874 skb_checksum_help(skb); 875 return 0; 876 } 877 878 hns3_set_outer_l2l3l4(skb, ol4_proto, ol_type_vlan_len_msec); 879 880 /* switch to inner header */ 881 l2_hdr = skb_inner_mac_header(skb); 882 l3.hdr = skb_inner_network_header(skb); 883 l4.hdr = skb_inner_transport_header(skb); 884 l4_proto = il4_proto; 885 } 886 887 if (l3.v4->version == 4) { 888 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S, 889 HNS3_L3T_IPV4); 890 891 /* the stack computes the IP header already, the only time we 892 * need the hardware to recompute it is in the case of TSO. 893 */ 894 if (skb_is_gso(skb)) 895 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1); 896 } else if (l3.v6->version == 6) { 897 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S, 898 HNS3_L3T_IPV6); 899 } 900 901 /* compute inner(/normal) L2 header size, defined in 2 Bytes */ 902 l2_len = l3.hdr - l2_hdr; 903 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, l2_len >> 1); 904 905 /* compute inner(/normal) L3 header size, defined in 4 Bytes */ 906 l3_len = l4.hdr - l3.hdr; 907 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_S, l3_len >> 2); 908 909 /* compute inner(/normal) L4 header size, defined in 4 Bytes */ 910 switch (l4_proto) { 911 case IPPROTO_TCP: 912 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); 913 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S, 914 HNS3_L4T_TCP); 915 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S, 916 l4.tcp->doff); 917 break; 918 case IPPROTO_UDP: 919 if (hns3_tunnel_csum_bug(skb)) 920 break; 921 922 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); 923 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S, 924 HNS3_L4T_UDP); 925 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S, 926 (sizeof(struct udphdr) >> 2)); 927 break; 928 case IPPROTO_SCTP: 929 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); 930 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S, 931 HNS3_L4T_SCTP); 932 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S, 933 (sizeof(struct sctphdr) >> 2)); 934 break; 935 default: 936 /* drop the skb tunnel packet if hardware don't support, 937 * because hardware can't calculate csum when TSO. 938 */ 939 if (skb_is_gso(skb)) 940 return -EDOM; 941 942 /* the stack computes the IP header already, 943 * driver calculate l4 checksum when not TSO. 944 */ 945 skb_checksum_help(skb); 946 return 0; 947 } 948 949 return 0; 950 } 951 952 static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring, 953 struct sk_buff *skb) 954 { 955 struct hnae3_handle *handle = tx_ring->tqp->handle; 956 struct vlan_ethhdr *vhdr; 957 int rc; 958 959 if (!(skb->protocol == htons(ETH_P_8021Q) || 960 skb_vlan_tag_present(skb))) 961 return 0; 962 963 /* Since HW limitation, if port based insert VLAN enabled, only one VLAN 964 * header is allowed in skb, otherwise it will cause RAS error. 965 */ 966 if (unlikely(skb_vlan_tagged_multi(skb) && 967 handle->port_base_vlan_state == 968 HNAE3_PORT_BASE_VLAN_ENABLE)) 969 return -EINVAL; 970 971 if (skb->protocol == htons(ETH_P_8021Q) && 972 !(handle->kinfo.netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) { 973 /* When HW VLAN acceleration is turned off, and the stack 974 * sets the protocol to 802.1q, the driver just need to 975 * set the protocol to the encapsulated ethertype. 976 */ 977 skb->protocol = vlan_get_protocol(skb); 978 return 0; 979 } 980 981 if (skb_vlan_tag_present(skb)) { 982 /* Based on hw strategy, use out_vtag in two layer tag case, 983 * and use inner_vtag in one tag case. 984 */ 985 if (skb->protocol == htons(ETH_P_8021Q) && 986 handle->port_base_vlan_state == 987 HNAE3_PORT_BASE_VLAN_DISABLE) 988 rc = HNS3_OUTER_VLAN_TAG; 989 else 990 rc = HNS3_INNER_VLAN_TAG; 991 992 skb->protocol = vlan_get_protocol(skb); 993 return rc; 994 } 995 996 rc = skb_cow_head(skb, 0); 997 if (unlikely(rc < 0)) 998 return rc; 999 1000 vhdr = (struct vlan_ethhdr *)skb->data; 1001 vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority << VLAN_PRIO_SHIFT) 1002 & VLAN_PRIO_MASK); 1003 1004 skb->protocol = vlan_get_protocol(skb); 1005 return 0; 1006 } 1007 1008 static int hns3_fill_skb_desc(struct hns3_enet_ring *ring, 1009 struct sk_buff *skb, struct hns3_desc *desc) 1010 { 1011 u32 ol_type_vlan_len_msec = 0; 1012 u32 type_cs_vlan_tso = 0; 1013 u32 paylen = skb->len; 1014 u16 inner_vtag = 0; 1015 u16 out_vtag = 0; 1016 u16 mss = 0; 1017 int ret; 1018 1019 ret = hns3_handle_vtags(ring, skb); 1020 if (unlikely(ret < 0)) { 1021 u64_stats_update_begin(&ring->syncp); 1022 ring->stats.tx_vlan_err++; 1023 u64_stats_update_end(&ring->syncp); 1024 return ret; 1025 } else if (ret == HNS3_INNER_VLAN_TAG) { 1026 inner_vtag = skb_vlan_tag_get(skb); 1027 inner_vtag |= (skb->priority << VLAN_PRIO_SHIFT) & 1028 VLAN_PRIO_MASK; 1029 hns3_set_field(type_cs_vlan_tso, HNS3_TXD_VLAN_B, 1); 1030 } else if (ret == HNS3_OUTER_VLAN_TAG) { 1031 out_vtag = skb_vlan_tag_get(skb); 1032 out_vtag |= (skb->priority << VLAN_PRIO_SHIFT) & 1033 VLAN_PRIO_MASK; 1034 hns3_set_field(ol_type_vlan_len_msec, HNS3_TXD_OVLAN_B, 1035 1); 1036 } 1037 1038 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1039 u8 ol4_proto, il4_proto; 1040 1041 skb_reset_mac_len(skb); 1042 1043 ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto); 1044 if (unlikely(ret < 0)) { 1045 u64_stats_update_begin(&ring->syncp); 1046 ring->stats.tx_l4_proto_err++; 1047 u64_stats_update_end(&ring->syncp); 1048 return ret; 1049 } 1050 1051 ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto, 1052 &type_cs_vlan_tso, 1053 &ol_type_vlan_len_msec); 1054 if (unlikely(ret < 0)) { 1055 u64_stats_update_begin(&ring->syncp); 1056 ring->stats.tx_l2l3l4_err++; 1057 u64_stats_update_end(&ring->syncp); 1058 return ret; 1059 } 1060 1061 ret = hns3_set_tso(skb, &paylen, &mss, 1062 &type_cs_vlan_tso); 1063 if (unlikely(ret < 0)) { 1064 u64_stats_update_begin(&ring->syncp); 1065 ring->stats.tx_tso_err++; 1066 u64_stats_update_end(&ring->syncp); 1067 return ret; 1068 } 1069 } 1070 1071 /* Set txbd */ 1072 desc->tx.ol_type_vlan_len_msec = 1073 cpu_to_le32(ol_type_vlan_len_msec); 1074 desc->tx.type_cs_vlan_tso_len = cpu_to_le32(type_cs_vlan_tso); 1075 desc->tx.paylen = cpu_to_le32(paylen); 1076 desc->tx.mss = cpu_to_le16(mss); 1077 desc->tx.vlan_tag = cpu_to_le16(inner_vtag); 1078 desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag); 1079 1080 return 0; 1081 } 1082 1083 static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, 1084 unsigned int size, enum hns_desc_type type) 1085 { 1086 #define HNS3_LIKELY_BD_NUM 1 1087 1088 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; 1089 struct hns3_desc *desc = &ring->desc[ring->next_to_use]; 1090 struct device *dev = ring_to_dev(ring); 1091 skb_frag_t *frag; 1092 unsigned int frag_buf_num; 1093 int k, sizeoflast; 1094 dma_addr_t dma; 1095 1096 if (type == DESC_TYPE_SKB) { 1097 struct sk_buff *skb = (struct sk_buff *)priv; 1098 int ret; 1099 1100 ret = hns3_fill_skb_desc(ring, skb, desc); 1101 if (unlikely(ret < 0)) 1102 return ret; 1103 1104 dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); 1105 } else if (type == DESC_TYPE_FRAGLIST_SKB) { 1106 struct sk_buff *skb = (struct sk_buff *)priv; 1107 1108 dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); 1109 } else { 1110 frag = (skb_frag_t *)priv; 1111 dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE); 1112 } 1113 1114 if (unlikely(dma_mapping_error(dev, dma))) { 1115 u64_stats_update_begin(&ring->syncp); 1116 ring->stats.sw_err_cnt++; 1117 u64_stats_update_end(&ring->syncp); 1118 return -ENOMEM; 1119 } 1120 1121 desc_cb->priv = priv; 1122 desc_cb->length = size; 1123 desc_cb->dma = dma; 1124 desc_cb->type = type; 1125 1126 if (likely(size <= HNS3_MAX_BD_SIZE)) { 1127 desc->addr = cpu_to_le64(dma); 1128 desc->tx.send_size = cpu_to_le16(size); 1129 desc->tx.bdtp_fe_sc_vld_ra_ri = 1130 cpu_to_le16(BIT(HNS3_TXD_VLD_B)); 1131 1132 trace_hns3_tx_desc(ring, ring->next_to_use); 1133 ring_ptr_move_fw(ring, next_to_use); 1134 return HNS3_LIKELY_BD_NUM; 1135 } 1136 1137 frag_buf_num = hns3_tx_bd_count(size); 1138 sizeoflast = size % HNS3_MAX_BD_SIZE; 1139 sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE; 1140 1141 /* When frag size is bigger than hardware limit, split this frag */ 1142 for (k = 0; k < frag_buf_num; k++) { 1143 /* now, fill the descriptor */ 1144 desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k); 1145 desc->tx.send_size = cpu_to_le16((k == frag_buf_num - 1) ? 1146 (u16)sizeoflast : (u16)HNS3_MAX_BD_SIZE); 1147 desc->tx.bdtp_fe_sc_vld_ra_ri = 1148 cpu_to_le16(BIT(HNS3_TXD_VLD_B)); 1149 1150 trace_hns3_tx_desc(ring, ring->next_to_use); 1151 /* move ring pointer to next */ 1152 ring_ptr_move_fw(ring, next_to_use); 1153 1154 desc = &ring->desc[ring->next_to_use]; 1155 } 1156 1157 return frag_buf_num; 1158 } 1159 1160 static unsigned int hns3_skb_bd_num(struct sk_buff *skb, unsigned int *bd_size, 1161 unsigned int bd_num) 1162 { 1163 unsigned int size; 1164 int i; 1165 1166 size = skb_headlen(skb); 1167 while (size > HNS3_MAX_BD_SIZE) { 1168 bd_size[bd_num++] = HNS3_MAX_BD_SIZE; 1169 size -= HNS3_MAX_BD_SIZE; 1170 1171 if (bd_num > HNS3_MAX_TSO_BD_NUM) 1172 return bd_num; 1173 } 1174 1175 if (size) { 1176 bd_size[bd_num++] = size; 1177 if (bd_num > HNS3_MAX_TSO_BD_NUM) 1178 return bd_num; 1179 } 1180 1181 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1182 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1183 size = skb_frag_size(frag); 1184 if (!size) 1185 continue; 1186 1187 while (size > HNS3_MAX_BD_SIZE) { 1188 bd_size[bd_num++] = HNS3_MAX_BD_SIZE; 1189 size -= HNS3_MAX_BD_SIZE; 1190 1191 if (bd_num > HNS3_MAX_TSO_BD_NUM) 1192 return bd_num; 1193 } 1194 1195 bd_size[bd_num++] = size; 1196 if (bd_num > HNS3_MAX_TSO_BD_NUM) 1197 return bd_num; 1198 } 1199 1200 return bd_num; 1201 } 1202 1203 static unsigned int hns3_tx_bd_num(struct sk_buff *skb, unsigned int *bd_size) 1204 { 1205 struct sk_buff *frag_skb; 1206 unsigned int bd_num = 0; 1207 1208 /* If the total len is within the max bd limit */ 1209 if (likely(skb->len <= HNS3_MAX_BD_SIZE && !skb_has_frag_list(skb) && 1210 skb_shinfo(skb)->nr_frags < HNS3_MAX_NON_TSO_BD_NUM)) 1211 return skb_shinfo(skb)->nr_frags + 1U; 1212 1213 /* The below case will always be linearized, return 1214 * HNS3_MAX_BD_NUM_TSO + 1U to make sure it is linearized. 1215 */ 1216 if (unlikely(skb->len > HNS3_MAX_TSO_SIZE || 1217 (!skb_is_gso(skb) && skb->len > HNS3_MAX_NON_TSO_SIZE))) 1218 return HNS3_MAX_TSO_BD_NUM + 1U; 1219 1220 bd_num = hns3_skb_bd_num(skb, bd_size, bd_num); 1221 1222 if (!skb_has_frag_list(skb) || bd_num > HNS3_MAX_TSO_BD_NUM) 1223 return bd_num; 1224 1225 skb_walk_frags(skb, frag_skb) { 1226 bd_num = hns3_skb_bd_num(frag_skb, bd_size, bd_num); 1227 if (bd_num > HNS3_MAX_TSO_BD_NUM) 1228 return bd_num; 1229 } 1230 1231 return bd_num; 1232 } 1233 1234 static unsigned int hns3_gso_hdr_len(struct sk_buff *skb) 1235 { 1236 if (!skb->encapsulation) 1237 return skb_transport_offset(skb) + tcp_hdrlen(skb); 1238 1239 return skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb); 1240 } 1241 1242 /* HW need every continuous 8 buffer data to be larger than MSS, 1243 * we simplify it by ensuring skb_headlen + the first continuous 1244 * 7 frags to to be larger than gso header len + mss, and the remaining 1245 * continuous 7 frags to be larger than MSS except the last 7 frags. 1246 */ 1247 static bool hns3_skb_need_linearized(struct sk_buff *skb, unsigned int *bd_size, 1248 unsigned int bd_num) 1249 { 1250 unsigned int tot_len = 0; 1251 int i; 1252 1253 for (i = 0; i < HNS3_MAX_NON_TSO_BD_NUM - 1U; i++) 1254 tot_len += bd_size[i]; 1255 1256 /* ensure the first 8 frags is greater than mss + header */ 1257 if (tot_len + bd_size[HNS3_MAX_NON_TSO_BD_NUM - 1U] < 1258 skb_shinfo(skb)->gso_size + hns3_gso_hdr_len(skb)) 1259 return true; 1260 1261 /* ensure every continuous 7 buffer is greater than mss 1262 * except the last one. 1263 */ 1264 for (i = 0; i < bd_num - HNS3_MAX_NON_TSO_BD_NUM; i++) { 1265 tot_len -= bd_size[i]; 1266 tot_len += bd_size[i + HNS3_MAX_NON_TSO_BD_NUM - 1U]; 1267 1268 if (tot_len < skb_shinfo(skb)->gso_size) 1269 return true; 1270 } 1271 1272 return false; 1273 } 1274 1275 void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size) 1276 { 1277 int i = 0; 1278 1279 for (i = 0; i < MAX_SKB_FRAGS; i++) 1280 size[i] = skb_frag_size(&shinfo->frags[i]); 1281 } 1282 1283 static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring, 1284 struct net_device *netdev, 1285 struct sk_buff *skb) 1286 { 1287 struct hns3_nic_priv *priv = netdev_priv(netdev); 1288 unsigned int bd_size[HNS3_MAX_TSO_BD_NUM + 1U]; 1289 unsigned int bd_num; 1290 1291 bd_num = hns3_tx_bd_num(skb, bd_size); 1292 if (unlikely(bd_num > HNS3_MAX_NON_TSO_BD_NUM)) { 1293 if (bd_num <= HNS3_MAX_TSO_BD_NUM && skb_is_gso(skb) && 1294 !hns3_skb_need_linearized(skb, bd_size, bd_num)) { 1295 trace_hns3_over_8bd(skb); 1296 goto out; 1297 } 1298 1299 if (__skb_linearize(skb)) 1300 return -ENOMEM; 1301 1302 bd_num = hns3_tx_bd_count(skb->len); 1303 if ((skb_is_gso(skb) && bd_num > HNS3_MAX_TSO_BD_NUM) || 1304 (!skb_is_gso(skb) && 1305 bd_num > HNS3_MAX_NON_TSO_BD_NUM)) { 1306 trace_hns3_over_8bd(skb); 1307 return -ENOMEM; 1308 } 1309 1310 u64_stats_update_begin(&ring->syncp); 1311 ring->stats.tx_copy++; 1312 u64_stats_update_end(&ring->syncp); 1313 } 1314 1315 out: 1316 if (likely(ring_space(ring) >= bd_num)) 1317 return bd_num; 1318 1319 netif_stop_subqueue(netdev, ring->queue_index); 1320 smp_mb(); /* Memory barrier before checking ring_space */ 1321 1322 /* Start queue in case hns3_clean_tx_ring has just made room 1323 * available and has not seen the queue stopped state performed 1324 * by netif_stop_subqueue above. 1325 */ 1326 if (ring_space(ring) >= bd_num && netif_carrier_ok(netdev) && 1327 !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) { 1328 netif_start_subqueue(netdev, ring->queue_index); 1329 return bd_num; 1330 } 1331 1332 return -EBUSY; 1333 } 1334 1335 static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig) 1336 { 1337 struct device *dev = ring_to_dev(ring); 1338 unsigned int i; 1339 1340 for (i = 0; i < ring->desc_num; i++) { 1341 struct hns3_desc *desc = &ring->desc[ring->next_to_use]; 1342 1343 memset(desc, 0, sizeof(*desc)); 1344 1345 /* check if this is where we started */ 1346 if (ring->next_to_use == next_to_use_orig) 1347 break; 1348 1349 /* rollback one */ 1350 ring_ptr_move_bw(ring, next_to_use); 1351 1352 if (!ring->desc_cb[ring->next_to_use].dma) 1353 continue; 1354 1355 /* unmap the descriptor dma address */ 1356 if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB || 1357 ring->desc_cb[ring->next_to_use].type == 1358 DESC_TYPE_FRAGLIST_SKB) 1359 dma_unmap_single(dev, 1360 ring->desc_cb[ring->next_to_use].dma, 1361 ring->desc_cb[ring->next_to_use].length, 1362 DMA_TO_DEVICE); 1363 else if (ring->desc_cb[ring->next_to_use].length) 1364 dma_unmap_page(dev, 1365 ring->desc_cb[ring->next_to_use].dma, 1366 ring->desc_cb[ring->next_to_use].length, 1367 DMA_TO_DEVICE); 1368 1369 ring->desc_cb[ring->next_to_use].length = 0; 1370 ring->desc_cb[ring->next_to_use].dma = 0; 1371 ring->desc_cb[ring->next_to_use].type = DESC_TYPE_UNKNOWN; 1372 } 1373 } 1374 1375 static int hns3_fill_skb_to_desc(struct hns3_enet_ring *ring, 1376 struct sk_buff *skb, enum hns_desc_type type) 1377 { 1378 unsigned int size = skb_headlen(skb); 1379 int i, ret, bd_num = 0; 1380 1381 if (size) { 1382 ret = hns3_fill_desc(ring, skb, size, type); 1383 if (unlikely(ret < 0)) 1384 return ret; 1385 1386 bd_num += ret; 1387 } 1388 1389 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1390 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1391 1392 size = skb_frag_size(frag); 1393 if (!size) 1394 continue; 1395 1396 ret = hns3_fill_desc(ring, frag, size, DESC_TYPE_PAGE); 1397 if (unlikely(ret < 0)) 1398 return ret; 1399 1400 bd_num += ret; 1401 } 1402 1403 return bd_num; 1404 } 1405 1406 netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) 1407 { 1408 struct hns3_nic_priv *priv = netdev_priv(netdev); 1409 struct hns3_enet_ring *ring = &priv->ring[skb->queue_mapping]; 1410 struct netdev_queue *dev_queue; 1411 int pre_ntu, next_to_use_head; 1412 struct sk_buff *frag_skb; 1413 int bd_num = 0; 1414 int ret; 1415 1416 /* Hardware can only handle short frames above 32 bytes */ 1417 if (skb_put_padto(skb, HNS3_MIN_TX_LEN)) 1418 return NETDEV_TX_OK; 1419 1420 /* Prefetch the data used later */ 1421 prefetch(skb->data); 1422 1423 ret = hns3_nic_maybe_stop_tx(ring, netdev, skb); 1424 if (unlikely(ret <= 0)) { 1425 if (ret == -EBUSY) { 1426 u64_stats_update_begin(&ring->syncp); 1427 ring->stats.tx_busy++; 1428 u64_stats_update_end(&ring->syncp); 1429 return NETDEV_TX_BUSY; 1430 } else if (ret == -ENOMEM) { 1431 u64_stats_update_begin(&ring->syncp); 1432 ring->stats.sw_err_cnt++; 1433 u64_stats_update_end(&ring->syncp); 1434 } 1435 1436 hns3_rl_err(netdev, "xmit error: %d!\n", ret); 1437 goto out_err_tx_ok; 1438 } 1439 1440 next_to_use_head = ring->next_to_use; 1441 1442 ret = hns3_fill_skb_to_desc(ring, skb, DESC_TYPE_SKB); 1443 if (unlikely(ret < 0)) 1444 goto fill_err; 1445 1446 bd_num += ret; 1447 1448 skb_walk_frags(skb, frag_skb) { 1449 ret = hns3_fill_skb_to_desc(ring, frag_skb, 1450 DESC_TYPE_FRAGLIST_SKB); 1451 if (unlikely(ret < 0)) 1452 goto fill_err; 1453 1454 bd_num += ret; 1455 } 1456 1457 pre_ntu = ring->next_to_use ? (ring->next_to_use - 1) : 1458 (ring->desc_num - 1); 1459 ring->desc[pre_ntu].tx.bdtp_fe_sc_vld_ra_ri |= 1460 cpu_to_le16(BIT(HNS3_TXD_FE_B)); 1461 trace_hns3_tx_desc(ring, pre_ntu); 1462 1463 /* Complete translate all packets */ 1464 dev_queue = netdev_get_tx_queue(netdev, ring->queue_index); 1465 netdev_tx_sent_queue(dev_queue, skb->len); 1466 1467 wmb(); /* Commit all data before submit */ 1468 1469 hnae3_queue_xmit(ring->tqp, bd_num); 1470 1471 return NETDEV_TX_OK; 1472 1473 fill_err: 1474 hns3_clear_desc(ring, next_to_use_head); 1475 1476 out_err_tx_ok: 1477 dev_kfree_skb_any(skb); 1478 return NETDEV_TX_OK; 1479 } 1480 1481 static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p) 1482 { 1483 struct hnae3_handle *h = hns3_get_handle(netdev); 1484 struct sockaddr *mac_addr = p; 1485 int ret; 1486 1487 if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data)) 1488 return -EADDRNOTAVAIL; 1489 1490 if (ether_addr_equal(netdev->dev_addr, mac_addr->sa_data)) { 1491 netdev_info(netdev, "already using mac address %pM\n", 1492 mac_addr->sa_data); 1493 return 0; 1494 } 1495 1496 /* For VF device, if there is a perm_addr, then the user will not 1497 * be allowed to change the address. 1498 */ 1499 if (!hns3_is_phys_func(h->pdev) && 1500 !is_zero_ether_addr(netdev->perm_addr)) { 1501 netdev_err(netdev, "has permanent MAC %pM, user MAC %pM not allow\n", 1502 netdev->perm_addr, mac_addr->sa_data); 1503 return -EPERM; 1504 } 1505 1506 ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data, false); 1507 if (ret) { 1508 netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret); 1509 return ret; 1510 } 1511 1512 ether_addr_copy(netdev->dev_addr, mac_addr->sa_data); 1513 1514 return 0; 1515 } 1516 1517 static int hns3_nic_do_ioctl(struct net_device *netdev, 1518 struct ifreq *ifr, int cmd) 1519 { 1520 struct hnae3_handle *h = hns3_get_handle(netdev); 1521 1522 if (!netif_running(netdev)) 1523 return -EINVAL; 1524 1525 if (!h->ae_algo->ops->do_ioctl) 1526 return -EOPNOTSUPP; 1527 1528 return h->ae_algo->ops->do_ioctl(h, ifr, cmd); 1529 } 1530 1531 static int hns3_nic_set_features(struct net_device *netdev, 1532 netdev_features_t features) 1533 { 1534 netdev_features_t changed = netdev->features ^ features; 1535 struct hns3_nic_priv *priv = netdev_priv(netdev); 1536 struct hnae3_handle *h = priv->ae_handle; 1537 bool enable; 1538 int ret; 1539 1540 if (changed & (NETIF_F_GRO_HW) && h->ae_algo->ops->set_gro_en) { 1541 enable = !!(features & NETIF_F_GRO_HW); 1542 ret = h->ae_algo->ops->set_gro_en(h, enable); 1543 if (ret) 1544 return ret; 1545 } 1546 1547 if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && 1548 h->ae_algo->ops->enable_hw_strip_rxvtag) { 1549 enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX); 1550 ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, enable); 1551 if (ret) 1552 return ret; 1553 } 1554 1555 if ((changed & NETIF_F_NTUPLE) && h->ae_algo->ops->enable_fd) { 1556 enable = !!(features & NETIF_F_NTUPLE); 1557 h->ae_algo->ops->enable_fd(h, enable); 1558 } 1559 1560 netdev->features = features; 1561 return 0; 1562 } 1563 1564 static netdev_features_t hns3_features_check(struct sk_buff *skb, 1565 struct net_device *dev, 1566 netdev_features_t features) 1567 { 1568 #define HNS3_MAX_HDR_LEN 480U 1569 #define HNS3_MAX_L4_HDR_LEN 60U 1570 1571 size_t len; 1572 1573 if (skb->ip_summed != CHECKSUM_PARTIAL) 1574 return features; 1575 1576 if (skb->encapsulation) 1577 len = skb_inner_transport_header(skb) - skb->data; 1578 else 1579 len = skb_transport_header(skb) - skb->data; 1580 1581 /* Assume L4 is 60 byte as TCP is the only protocol with a 1582 * a flexible value, and it's max len is 60 bytes. 1583 */ 1584 len += HNS3_MAX_L4_HDR_LEN; 1585 1586 /* Hardware only supports checksum on the skb with a max header 1587 * len of 480 bytes. 1588 */ 1589 if (len > HNS3_MAX_HDR_LEN) 1590 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 1591 1592 return features; 1593 } 1594 1595 static void hns3_nic_get_stats64(struct net_device *netdev, 1596 struct rtnl_link_stats64 *stats) 1597 { 1598 struct hns3_nic_priv *priv = netdev_priv(netdev); 1599 int queue_num = priv->ae_handle->kinfo.num_tqps; 1600 struct hnae3_handle *handle = priv->ae_handle; 1601 struct hns3_enet_ring *ring; 1602 u64 rx_length_errors = 0; 1603 u64 rx_crc_errors = 0; 1604 u64 rx_multicast = 0; 1605 unsigned int start; 1606 u64 tx_errors = 0; 1607 u64 rx_errors = 0; 1608 unsigned int idx; 1609 u64 tx_bytes = 0; 1610 u64 rx_bytes = 0; 1611 u64 tx_pkts = 0; 1612 u64 rx_pkts = 0; 1613 u64 tx_drop = 0; 1614 u64 rx_drop = 0; 1615 1616 if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) 1617 return; 1618 1619 handle->ae_algo->ops->update_stats(handle, &netdev->stats); 1620 1621 for (idx = 0; idx < queue_num; idx++) { 1622 /* fetch the tx stats */ 1623 ring = &priv->ring[idx]; 1624 do { 1625 start = u64_stats_fetch_begin_irq(&ring->syncp); 1626 tx_bytes += ring->stats.tx_bytes; 1627 tx_pkts += ring->stats.tx_pkts; 1628 tx_drop += ring->stats.sw_err_cnt; 1629 tx_drop += ring->stats.tx_vlan_err; 1630 tx_drop += ring->stats.tx_l4_proto_err; 1631 tx_drop += ring->stats.tx_l2l3l4_err; 1632 tx_drop += ring->stats.tx_tso_err; 1633 tx_errors += ring->stats.sw_err_cnt; 1634 tx_errors += ring->stats.tx_vlan_err; 1635 tx_errors += ring->stats.tx_l4_proto_err; 1636 tx_errors += ring->stats.tx_l2l3l4_err; 1637 tx_errors += ring->stats.tx_tso_err; 1638 } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); 1639 1640 /* fetch the rx stats */ 1641 ring = &priv->ring[idx + queue_num]; 1642 do { 1643 start = u64_stats_fetch_begin_irq(&ring->syncp); 1644 rx_bytes += ring->stats.rx_bytes; 1645 rx_pkts += ring->stats.rx_pkts; 1646 rx_drop += ring->stats.l2_err; 1647 rx_errors += ring->stats.l2_err; 1648 rx_errors += ring->stats.l3l4_csum_err; 1649 rx_crc_errors += ring->stats.l2_err; 1650 rx_multicast += ring->stats.rx_multicast; 1651 rx_length_errors += ring->stats.err_pkt_len; 1652 } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); 1653 } 1654 1655 stats->tx_bytes = tx_bytes; 1656 stats->tx_packets = tx_pkts; 1657 stats->rx_bytes = rx_bytes; 1658 stats->rx_packets = rx_pkts; 1659 1660 stats->rx_errors = rx_errors; 1661 stats->multicast = rx_multicast; 1662 stats->rx_length_errors = rx_length_errors; 1663 stats->rx_crc_errors = rx_crc_errors; 1664 stats->rx_missed_errors = netdev->stats.rx_missed_errors; 1665 1666 stats->tx_errors = tx_errors; 1667 stats->rx_dropped = rx_drop; 1668 stats->tx_dropped = tx_drop; 1669 stats->collisions = netdev->stats.collisions; 1670 stats->rx_over_errors = netdev->stats.rx_over_errors; 1671 stats->rx_frame_errors = netdev->stats.rx_frame_errors; 1672 stats->rx_fifo_errors = netdev->stats.rx_fifo_errors; 1673 stats->tx_aborted_errors = netdev->stats.tx_aborted_errors; 1674 stats->tx_carrier_errors = netdev->stats.tx_carrier_errors; 1675 stats->tx_fifo_errors = netdev->stats.tx_fifo_errors; 1676 stats->tx_heartbeat_errors = netdev->stats.tx_heartbeat_errors; 1677 stats->tx_window_errors = netdev->stats.tx_window_errors; 1678 stats->rx_compressed = netdev->stats.rx_compressed; 1679 stats->tx_compressed = netdev->stats.tx_compressed; 1680 } 1681 1682 static int hns3_setup_tc(struct net_device *netdev, void *type_data) 1683 { 1684 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data; 1685 u8 *prio_tc = mqprio_qopt->qopt.prio_tc_map; 1686 struct hnae3_knic_private_info *kinfo; 1687 u8 tc = mqprio_qopt->qopt.num_tc; 1688 u16 mode = mqprio_qopt->mode; 1689 u8 hw = mqprio_qopt->qopt.hw; 1690 struct hnae3_handle *h; 1691 1692 if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS && 1693 mode == TC_MQPRIO_MODE_CHANNEL) || (!hw && tc == 0))) 1694 return -EOPNOTSUPP; 1695 1696 if (tc > HNAE3_MAX_TC) 1697 return -EINVAL; 1698 1699 if (!netdev) 1700 return -EINVAL; 1701 1702 h = hns3_get_handle(netdev); 1703 kinfo = &h->kinfo; 1704 1705 netif_dbg(h, drv, netdev, "setup tc: num_tc=%u\n", tc); 1706 1707 return (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ? 1708 kinfo->dcb_ops->setup_tc(h, tc ? tc : 1, prio_tc) : -EOPNOTSUPP; 1709 } 1710 1711 static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type, 1712 void *type_data) 1713 { 1714 if (type != TC_SETUP_QDISC_MQPRIO) 1715 return -EOPNOTSUPP; 1716 1717 return hns3_setup_tc(dev, type_data); 1718 } 1719 1720 static int hns3_vlan_rx_add_vid(struct net_device *netdev, 1721 __be16 proto, u16 vid) 1722 { 1723 struct hnae3_handle *h = hns3_get_handle(netdev); 1724 int ret = -EIO; 1725 1726 if (h->ae_algo->ops->set_vlan_filter) 1727 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false); 1728 1729 return ret; 1730 } 1731 1732 static int hns3_vlan_rx_kill_vid(struct net_device *netdev, 1733 __be16 proto, u16 vid) 1734 { 1735 struct hnae3_handle *h = hns3_get_handle(netdev); 1736 int ret = -EIO; 1737 1738 if (h->ae_algo->ops->set_vlan_filter) 1739 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true); 1740 1741 return ret; 1742 } 1743 1744 static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, 1745 u8 qos, __be16 vlan_proto) 1746 { 1747 struct hnae3_handle *h = hns3_get_handle(netdev); 1748 int ret = -EIO; 1749 1750 netif_dbg(h, drv, netdev, 1751 "set vf vlan: vf=%d, vlan=%u, qos=%u, vlan_proto=0x%x\n", 1752 vf, vlan, qos, ntohs(vlan_proto)); 1753 1754 if (h->ae_algo->ops->set_vf_vlan_filter) 1755 ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan, 1756 qos, vlan_proto); 1757 1758 return ret; 1759 } 1760 1761 static int hns3_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable) 1762 { 1763 struct hnae3_handle *handle = hns3_get_handle(netdev); 1764 1765 if (hns3_nic_resetting(netdev)) 1766 return -EBUSY; 1767 1768 if (!handle->ae_algo->ops->set_vf_spoofchk) 1769 return -EOPNOTSUPP; 1770 1771 return handle->ae_algo->ops->set_vf_spoofchk(handle, vf, enable); 1772 } 1773 1774 static int hns3_set_vf_trust(struct net_device *netdev, int vf, bool enable) 1775 { 1776 struct hnae3_handle *handle = hns3_get_handle(netdev); 1777 1778 if (!handle->ae_algo->ops->set_vf_trust) 1779 return -EOPNOTSUPP; 1780 1781 return handle->ae_algo->ops->set_vf_trust(handle, vf, enable); 1782 } 1783 1784 static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu) 1785 { 1786 struct hnae3_handle *h = hns3_get_handle(netdev); 1787 int ret; 1788 1789 if (hns3_nic_resetting(netdev)) 1790 return -EBUSY; 1791 1792 if (!h->ae_algo->ops->set_mtu) 1793 return -EOPNOTSUPP; 1794 1795 netif_dbg(h, drv, netdev, 1796 "change mtu from %u to %d\n", netdev->mtu, new_mtu); 1797 1798 ret = h->ae_algo->ops->set_mtu(h, new_mtu); 1799 if (ret) 1800 netdev_err(netdev, "failed to change MTU in hardware %d\n", 1801 ret); 1802 else 1803 netdev->mtu = new_mtu; 1804 1805 return ret; 1806 } 1807 1808 static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) 1809 { 1810 struct hns3_nic_priv *priv = netdev_priv(ndev); 1811 struct hnae3_handle *h = hns3_get_handle(ndev); 1812 struct hns3_enet_ring *tx_ring; 1813 struct napi_struct *napi; 1814 int timeout_queue = 0; 1815 int hw_head, hw_tail; 1816 int fbd_num, fbd_oft; 1817 int ebd_num, ebd_oft; 1818 int bd_num, bd_err; 1819 int ring_en, tc; 1820 int i; 1821 1822 /* Find the stopped queue the same way the stack does */ 1823 for (i = 0; i < ndev->num_tx_queues; i++) { 1824 struct netdev_queue *q; 1825 unsigned long trans_start; 1826 1827 q = netdev_get_tx_queue(ndev, i); 1828 trans_start = q->trans_start; 1829 if (netif_xmit_stopped(q) && 1830 time_after(jiffies, 1831 (trans_start + ndev->watchdog_timeo))) { 1832 timeout_queue = i; 1833 netdev_info(ndev, "queue state: 0x%lx, delta msecs: %u\n", 1834 q->state, 1835 jiffies_to_msecs(jiffies - trans_start)); 1836 break; 1837 } 1838 } 1839 1840 if (i == ndev->num_tx_queues) { 1841 netdev_info(ndev, 1842 "no netdev TX timeout queue found, timeout count: %llu\n", 1843 priv->tx_timeout_count); 1844 return false; 1845 } 1846 1847 priv->tx_timeout_count++; 1848 1849 tx_ring = &priv->ring[timeout_queue]; 1850 napi = &tx_ring->tqp_vector->napi; 1851 1852 netdev_info(ndev, 1853 "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, napi state: %lu\n", 1854 priv->tx_timeout_count, timeout_queue, tx_ring->next_to_use, 1855 tx_ring->next_to_clean, napi->state); 1856 1857 netdev_info(ndev, 1858 "tx_pkts: %llu, tx_bytes: %llu, io_err_cnt: %llu, sw_err_cnt: %llu\n", 1859 tx_ring->stats.tx_pkts, tx_ring->stats.tx_bytes, 1860 tx_ring->stats.io_err_cnt, tx_ring->stats.sw_err_cnt); 1861 1862 netdev_info(ndev, 1863 "seg_pkt_cnt: %llu, tx_err_cnt: %llu, restart_queue: %llu, tx_busy: %llu\n", 1864 tx_ring->stats.seg_pkt_cnt, tx_ring->stats.tx_err_cnt, 1865 tx_ring->stats.restart_queue, tx_ring->stats.tx_busy); 1866 1867 /* When mac received many pause frames continuous, it's unable to send 1868 * packets, which may cause tx timeout 1869 */ 1870 if (h->ae_algo->ops->get_mac_stats) { 1871 struct hns3_mac_stats mac_stats; 1872 1873 h->ae_algo->ops->get_mac_stats(h, &mac_stats); 1874 netdev_info(ndev, "tx_pause_cnt: %llu, rx_pause_cnt: %llu\n", 1875 mac_stats.tx_pause_cnt, mac_stats.rx_pause_cnt); 1876 } 1877 1878 hw_head = readl_relaxed(tx_ring->tqp->io_base + 1879 HNS3_RING_TX_RING_HEAD_REG); 1880 hw_tail = readl_relaxed(tx_ring->tqp->io_base + 1881 HNS3_RING_TX_RING_TAIL_REG); 1882 fbd_num = readl_relaxed(tx_ring->tqp->io_base + 1883 HNS3_RING_TX_RING_FBDNUM_REG); 1884 fbd_oft = readl_relaxed(tx_ring->tqp->io_base + 1885 HNS3_RING_TX_RING_OFFSET_REG); 1886 ebd_num = readl_relaxed(tx_ring->tqp->io_base + 1887 HNS3_RING_TX_RING_EBDNUM_REG); 1888 ebd_oft = readl_relaxed(tx_ring->tqp->io_base + 1889 HNS3_RING_TX_RING_EBD_OFFSET_REG); 1890 bd_num = readl_relaxed(tx_ring->tqp->io_base + 1891 HNS3_RING_TX_RING_BD_NUM_REG); 1892 bd_err = readl_relaxed(tx_ring->tqp->io_base + 1893 HNS3_RING_TX_RING_BD_ERR_REG); 1894 ring_en = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_EN_REG); 1895 tc = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_TX_RING_TC_REG); 1896 1897 netdev_info(ndev, 1898 "BD_NUM: 0x%x HW_HEAD: 0x%x, HW_TAIL: 0x%x, BD_ERR: 0x%x, INT: 0x%x\n", 1899 bd_num, hw_head, hw_tail, bd_err, 1900 readl(tx_ring->tqp_vector->mask_addr)); 1901 netdev_info(ndev, 1902 "RING_EN: 0x%x, TC: 0x%x, FBD_NUM: 0x%x FBD_OFT: 0x%x, EBD_NUM: 0x%x, EBD_OFT: 0x%x\n", 1903 ring_en, tc, fbd_num, fbd_oft, ebd_num, ebd_oft); 1904 1905 return true; 1906 } 1907 1908 static void hns3_nic_net_timeout(struct net_device *ndev, unsigned int txqueue) 1909 { 1910 struct hns3_nic_priv *priv = netdev_priv(ndev); 1911 struct hnae3_handle *h = priv->ae_handle; 1912 1913 if (!hns3_get_tx_timeo_queue_info(ndev)) 1914 return; 1915 1916 /* request the reset, and let the hclge to determine 1917 * which reset level should be done 1918 */ 1919 if (h->ae_algo->ops->reset_event) 1920 h->ae_algo->ops->reset_event(h->pdev, h); 1921 } 1922 1923 #ifdef CONFIG_RFS_ACCEL 1924 static int hns3_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, 1925 u16 rxq_index, u32 flow_id) 1926 { 1927 struct hnae3_handle *h = hns3_get_handle(dev); 1928 struct flow_keys fkeys; 1929 1930 if (!h->ae_algo->ops->add_arfs_entry) 1931 return -EOPNOTSUPP; 1932 1933 if (skb->encapsulation) 1934 return -EPROTONOSUPPORT; 1935 1936 if (!skb_flow_dissect_flow_keys(skb, &fkeys, 0)) 1937 return -EPROTONOSUPPORT; 1938 1939 if ((fkeys.basic.n_proto != htons(ETH_P_IP) && 1940 fkeys.basic.n_proto != htons(ETH_P_IPV6)) || 1941 (fkeys.basic.ip_proto != IPPROTO_TCP && 1942 fkeys.basic.ip_proto != IPPROTO_UDP)) 1943 return -EPROTONOSUPPORT; 1944 1945 return h->ae_algo->ops->add_arfs_entry(h, rxq_index, flow_id, &fkeys); 1946 } 1947 #endif 1948 1949 static int hns3_nic_get_vf_config(struct net_device *ndev, int vf, 1950 struct ifla_vf_info *ivf) 1951 { 1952 struct hnae3_handle *h = hns3_get_handle(ndev); 1953 1954 if (!h->ae_algo->ops->get_vf_config) 1955 return -EOPNOTSUPP; 1956 1957 return h->ae_algo->ops->get_vf_config(h, vf, ivf); 1958 } 1959 1960 static int hns3_nic_set_vf_link_state(struct net_device *ndev, int vf, 1961 int link_state) 1962 { 1963 struct hnae3_handle *h = hns3_get_handle(ndev); 1964 1965 if (!h->ae_algo->ops->set_vf_link_state) 1966 return -EOPNOTSUPP; 1967 1968 return h->ae_algo->ops->set_vf_link_state(h, vf, link_state); 1969 } 1970 1971 static int hns3_nic_set_vf_rate(struct net_device *ndev, int vf, 1972 int min_tx_rate, int max_tx_rate) 1973 { 1974 struct hnae3_handle *h = hns3_get_handle(ndev); 1975 1976 if (!h->ae_algo->ops->set_vf_rate) 1977 return -EOPNOTSUPP; 1978 1979 return h->ae_algo->ops->set_vf_rate(h, vf, min_tx_rate, max_tx_rate, 1980 false); 1981 } 1982 1983 static int hns3_nic_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) 1984 { 1985 struct hnae3_handle *h = hns3_get_handle(netdev); 1986 1987 if (!h->ae_algo->ops->set_vf_mac) 1988 return -EOPNOTSUPP; 1989 1990 if (is_multicast_ether_addr(mac)) { 1991 netdev_err(netdev, 1992 "Invalid MAC:%pM specified. Could not set MAC\n", 1993 mac); 1994 return -EINVAL; 1995 } 1996 1997 return h->ae_algo->ops->set_vf_mac(h, vf_id, mac); 1998 } 1999 2000 static const struct net_device_ops hns3_nic_netdev_ops = { 2001 .ndo_open = hns3_nic_net_open, 2002 .ndo_stop = hns3_nic_net_stop, 2003 .ndo_start_xmit = hns3_nic_net_xmit, 2004 .ndo_tx_timeout = hns3_nic_net_timeout, 2005 .ndo_set_mac_address = hns3_nic_net_set_mac_address, 2006 .ndo_do_ioctl = hns3_nic_do_ioctl, 2007 .ndo_change_mtu = hns3_nic_change_mtu, 2008 .ndo_set_features = hns3_nic_set_features, 2009 .ndo_features_check = hns3_features_check, 2010 .ndo_get_stats64 = hns3_nic_get_stats64, 2011 .ndo_setup_tc = hns3_nic_setup_tc, 2012 .ndo_set_rx_mode = hns3_nic_set_rx_mode, 2013 .ndo_vlan_rx_add_vid = hns3_vlan_rx_add_vid, 2014 .ndo_vlan_rx_kill_vid = hns3_vlan_rx_kill_vid, 2015 .ndo_set_vf_vlan = hns3_ndo_set_vf_vlan, 2016 .ndo_set_vf_spoofchk = hns3_set_vf_spoofchk, 2017 .ndo_set_vf_trust = hns3_set_vf_trust, 2018 #ifdef CONFIG_RFS_ACCEL 2019 .ndo_rx_flow_steer = hns3_rx_flow_steer, 2020 #endif 2021 .ndo_get_vf_config = hns3_nic_get_vf_config, 2022 .ndo_set_vf_link_state = hns3_nic_set_vf_link_state, 2023 .ndo_set_vf_rate = hns3_nic_set_vf_rate, 2024 .ndo_set_vf_mac = hns3_nic_set_vf_mac, 2025 }; 2026 2027 bool hns3_is_phys_func(struct pci_dev *pdev) 2028 { 2029 u32 dev_id = pdev->device; 2030 2031 switch (dev_id) { 2032 case HNAE3_DEV_ID_GE: 2033 case HNAE3_DEV_ID_25GE: 2034 case HNAE3_DEV_ID_25GE_RDMA: 2035 case HNAE3_DEV_ID_25GE_RDMA_MACSEC: 2036 case HNAE3_DEV_ID_50GE_RDMA: 2037 case HNAE3_DEV_ID_50GE_RDMA_MACSEC: 2038 case HNAE3_DEV_ID_100G_RDMA_MACSEC: 2039 return true; 2040 case HNAE3_DEV_ID_100G_VF: 2041 case HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF: 2042 return false; 2043 default: 2044 dev_warn(&pdev->dev, "un-recognized pci device-id %u", 2045 dev_id); 2046 } 2047 2048 return false; 2049 } 2050 2051 static void hns3_disable_sriov(struct pci_dev *pdev) 2052 { 2053 /* If our VFs are assigned we cannot shut down SR-IOV 2054 * without causing issues, so just leave the hardware 2055 * available but disabled 2056 */ 2057 if (pci_vfs_assigned(pdev)) { 2058 dev_warn(&pdev->dev, 2059 "disabling driver while VFs are assigned\n"); 2060 return; 2061 } 2062 2063 pci_disable_sriov(pdev); 2064 } 2065 2066 static void hns3_get_dev_capability(struct pci_dev *pdev, 2067 struct hnae3_ae_dev *ae_dev) 2068 { 2069 if (pdev->revision >= 0x21) { 2070 hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B, 1); 2071 hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_GRO_B, 1); 2072 } 2073 } 2074 2075 /* hns3_probe - Device initialization routine 2076 * @pdev: PCI device information struct 2077 * @ent: entry in hns3_pci_tbl 2078 * 2079 * hns3_probe initializes a PF identified by a pci_dev structure. 2080 * The OS initialization, configuring of the PF private structure, 2081 * and a hardware reset occur. 2082 * 2083 * Returns 0 on success, negative on failure 2084 */ 2085 static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 2086 { 2087 struct hnae3_ae_dev *ae_dev; 2088 int ret; 2089 2090 ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev), GFP_KERNEL); 2091 if (!ae_dev) 2092 return -ENOMEM; 2093 2094 ae_dev->pdev = pdev; 2095 ae_dev->flag = ent->driver_data; 2096 hns3_get_dev_capability(pdev, ae_dev); 2097 pci_set_drvdata(pdev, ae_dev); 2098 2099 ret = hnae3_register_ae_dev(ae_dev); 2100 if (ret) 2101 pci_set_drvdata(pdev, NULL); 2102 2103 return ret; 2104 } 2105 2106 /* hns3_remove - Device removal routine 2107 * @pdev: PCI device information struct 2108 */ 2109 static void hns3_remove(struct pci_dev *pdev) 2110 { 2111 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 2112 2113 if (hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV)) 2114 hns3_disable_sriov(pdev); 2115 2116 hnae3_unregister_ae_dev(ae_dev); 2117 pci_set_drvdata(pdev, NULL); 2118 } 2119 2120 /** 2121 * hns3_pci_sriov_configure 2122 * @pdev: pointer to a pci_dev structure 2123 * @num_vfs: number of VFs to allocate 2124 * 2125 * Enable or change the number of VFs. Called when the user updates the number 2126 * of VFs in sysfs. 2127 **/ 2128 static int hns3_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) 2129 { 2130 int ret; 2131 2132 if (!(hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))) { 2133 dev_warn(&pdev->dev, "Can not config SRIOV\n"); 2134 return -EINVAL; 2135 } 2136 2137 if (num_vfs) { 2138 ret = pci_enable_sriov(pdev, num_vfs); 2139 if (ret) 2140 dev_err(&pdev->dev, "SRIOV enable failed %d\n", ret); 2141 else 2142 return num_vfs; 2143 } else if (!pci_vfs_assigned(pdev)) { 2144 pci_disable_sriov(pdev); 2145 } else { 2146 dev_warn(&pdev->dev, 2147 "Unable to free VFs because some are assigned to VMs.\n"); 2148 } 2149 2150 return 0; 2151 } 2152 2153 static void hns3_shutdown(struct pci_dev *pdev) 2154 { 2155 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 2156 2157 hnae3_unregister_ae_dev(ae_dev); 2158 pci_set_drvdata(pdev, NULL); 2159 2160 if (system_state == SYSTEM_POWER_OFF) 2161 pci_set_power_state(pdev, PCI_D3hot); 2162 } 2163 2164 static pci_ers_result_t hns3_error_detected(struct pci_dev *pdev, 2165 pci_channel_state_t state) 2166 { 2167 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 2168 pci_ers_result_t ret; 2169 2170 dev_info(&pdev->dev, "PCI error detected, state(=%d)!!\n", state); 2171 2172 if (state == pci_channel_io_perm_failure) 2173 return PCI_ERS_RESULT_DISCONNECT; 2174 2175 if (!ae_dev || !ae_dev->ops) { 2176 dev_err(&pdev->dev, 2177 "Can't recover - error happened before device initialized\n"); 2178 return PCI_ERS_RESULT_NONE; 2179 } 2180 2181 if (ae_dev->ops->handle_hw_ras_error) 2182 ret = ae_dev->ops->handle_hw_ras_error(ae_dev); 2183 else 2184 return PCI_ERS_RESULT_NONE; 2185 2186 return ret; 2187 } 2188 2189 static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev) 2190 { 2191 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 2192 const struct hnae3_ae_ops *ops; 2193 enum hnae3_reset_type reset_type; 2194 struct device *dev = &pdev->dev; 2195 2196 if (!ae_dev || !ae_dev->ops) 2197 return PCI_ERS_RESULT_NONE; 2198 2199 ops = ae_dev->ops; 2200 /* request the reset */ 2201 if (ops->reset_event && ops->get_reset_level && 2202 ops->set_default_reset_request) { 2203 if (ae_dev->hw_err_reset_req) { 2204 reset_type = ops->get_reset_level(ae_dev, 2205 &ae_dev->hw_err_reset_req); 2206 ops->set_default_reset_request(ae_dev, reset_type); 2207 dev_info(dev, "requesting reset due to PCI error\n"); 2208 ops->reset_event(pdev, NULL); 2209 } 2210 2211 return PCI_ERS_RESULT_RECOVERED; 2212 } 2213 2214 return PCI_ERS_RESULT_DISCONNECT; 2215 } 2216 2217 static void hns3_reset_prepare(struct pci_dev *pdev) 2218 { 2219 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 2220 2221 dev_info(&pdev->dev, "FLR prepare\n"); 2222 if (ae_dev && ae_dev->ops && ae_dev->ops->flr_prepare) 2223 ae_dev->ops->flr_prepare(ae_dev); 2224 } 2225 2226 static void hns3_reset_done(struct pci_dev *pdev) 2227 { 2228 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 2229 2230 dev_info(&pdev->dev, "FLR done\n"); 2231 if (ae_dev && ae_dev->ops && ae_dev->ops->flr_done) 2232 ae_dev->ops->flr_done(ae_dev); 2233 } 2234 2235 static const struct pci_error_handlers hns3_err_handler = { 2236 .error_detected = hns3_error_detected, 2237 .slot_reset = hns3_slot_reset, 2238 .reset_prepare = hns3_reset_prepare, 2239 .reset_done = hns3_reset_done, 2240 }; 2241 2242 static struct pci_driver hns3_driver = { 2243 .name = hns3_driver_name, 2244 .id_table = hns3_pci_tbl, 2245 .probe = hns3_probe, 2246 .remove = hns3_remove, 2247 .shutdown = hns3_shutdown, 2248 .sriov_configure = hns3_pci_sriov_configure, 2249 .err_handler = &hns3_err_handler, 2250 }; 2251 2252 /* set default feature to hns3 */ 2253 static void hns3_set_default_feature(struct net_device *netdev) 2254 { 2255 struct hnae3_handle *h = hns3_get_handle(netdev); 2256 struct pci_dev *pdev = h->pdev; 2257 2258 netdev->priv_flags |= IFF_UNICAST_FLT; 2259 2260 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 2261 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | 2262 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | 2263 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | 2264 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC | 2265 NETIF_F_TSO_MANGLEID | NETIF_F_FRAGLIST; 2266 2267 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; 2268 2269 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 2270 NETIF_F_HW_VLAN_CTAG_FILTER | 2271 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | 2272 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | 2273 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | 2274 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | 2275 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC | 2276 NETIF_F_FRAGLIST; 2277 2278 netdev->vlan_features |= 2279 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | 2280 NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO | 2281 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | 2282 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | 2283 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC | 2284 NETIF_F_FRAGLIST; 2285 2286 netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 2287 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | 2288 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | 2289 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | 2290 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | 2291 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC | 2292 NETIF_F_FRAGLIST; 2293 2294 if (pdev->revision >= 0x21) { 2295 netdev->hw_features |= NETIF_F_GRO_HW; 2296 netdev->features |= NETIF_F_GRO_HW; 2297 2298 if (!(h->flags & HNAE3_SUPPORT_VF)) { 2299 netdev->hw_features |= NETIF_F_NTUPLE; 2300 netdev->features |= NETIF_F_NTUPLE; 2301 } 2302 } 2303 } 2304 2305 static int hns3_alloc_buffer(struct hns3_enet_ring *ring, 2306 struct hns3_desc_cb *cb) 2307 { 2308 unsigned int order = hns3_page_order(ring); 2309 struct page *p; 2310 2311 p = dev_alloc_pages(order); 2312 if (!p) 2313 return -ENOMEM; 2314 2315 cb->priv = p; 2316 cb->page_offset = 0; 2317 cb->reuse_flag = 0; 2318 cb->buf = page_address(p); 2319 cb->length = hns3_page_size(ring); 2320 cb->type = DESC_TYPE_PAGE; 2321 2322 return 0; 2323 } 2324 2325 static void hns3_free_buffer(struct hns3_enet_ring *ring, 2326 struct hns3_desc_cb *cb) 2327 { 2328 if (cb->type == DESC_TYPE_SKB) 2329 dev_kfree_skb_any((struct sk_buff *)cb->priv); 2330 else if (!HNAE3_IS_TX_RING(ring)) 2331 put_page((struct page *)cb->priv); 2332 memset(cb, 0, sizeof(*cb)); 2333 } 2334 2335 static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb) 2336 { 2337 cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0, 2338 cb->length, ring_to_dma_dir(ring)); 2339 2340 if (unlikely(dma_mapping_error(ring_to_dev(ring), cb->dma))) 2341 return -EIO; 2342 2343 return 0; 2344 } 2345 2346 static void hns3_unmap_buffer(struct hns3_enet_ring *ring, 2347 struct hns3_desc_cb *cb) 2348 { 2349 if (cb->type == DESC_TYPE_SKB || cb->type == DESC_TYPE_FRAGLIST_SKB) 2350 dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length, 2351 ring_to_dma_dir(ring)); 2352 else if (cb->length) 2353 dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length, 2354 ring_to_dma_dir(ring)); 2355 } 2356 2357 static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i) 2358 { 2359 hns3_unmap_buffer(ring, &ring->desc_cb[i]); 2360 ring->desc[i].addr = 0; 2361 } 2362 2363 static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i) 2364 { 2365 struct hns3_desc_cb *cb = &ring->desc_cb[i]; 2366 2367 if (!ring->desc_cb[i].dma) 2368 return; 2369 2370 hns3_buffer_detach(ring, i); 2371 hns3_free_buffer(ring, cb); 2372 } 2373 2374 static void hns3_free_buffers(struct hns3_enet_ring *ring) 2375 { 2376 int i; 2377 2378 for (i = 0; i < ring->desc_num; i++) 2379 hns3_free_buffer_detach(ring, i); 2380 } 2381 2382 /* free desc along with its attached buffer */ 2383 static void hns3_free_desc(struct hns3_enet_ring *ring) 2384 { 2385 int size = ring->desc_num * sizeof(ring->desc[0]); 2386 2387 hns3_free_buffers(ring); 2388 2389 if (ring->desc) { 2390 dma_free_coherent(ring_to_dev(ring), size, 2391 ring->desc, ring->desc_dma_addr); 2392 ring->desc = NULL; 2393 } 2394 } 2395 2396 static int hns3_alloc_desc(struct hns3_enet_ring *ring) 2397 { 2398 int size = ring->desc_num * sizeof(ring->desc[0]); 2399 2400 ring->desc = dma_alloc_coherent(ring_to_dev(ring), size, 2401 &ring->desc_dma_addr, GFP_KERNEL); 2402 if (!ring->desc) 2403 return -ENOMEM; 2404 2405 return 0; 2406 } 2407 2408 static int hns3_alloc_and_map_buffer(struct hns3_enet_ring *ring, 2409 struct hns3_desc_cb *cb) 2410 { 2411 int ret; 2412 2413 ret = hns3_alloc_buffer(ring, cb); 2414 if (ret) 2415 goto out; 2416 2417 ret = hns3_map_buffer(ring, cb); 2418 if (ret) 2419 goto out_with_buf; 2420 2421 return 0; 2422 2423 out_with_buf: 2424 hns3_free_buffer(ring, cb); 2425 out: 2426 return ret; 2427 } 2428 2429 static int hns3_alloc_and_attach_buffer(struct hns3_enet_ring *ring, int i) 2430 { 2431 int ret = hns3_alloc_and_map_buffer(ring, &ring->desc_cb[i]); 2432 2433 if (ret) 2434 return ret; 2435 2436 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma); 2437 2438 return 0; 2439 } 2440 2441 /* Allocate memory for raw pkg, and map with dma */ 2442 static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring) 2443 { 2444 int i, j, ret; 2445 2446 for (i = 0; i < ring->desc_num; i++) { 2447 ret = hns3_alloc_and_attach_buffer(ring, i); 2448 if (ret) 2449 goto out_buffer_fail; 2450 } 2451 2452 return 0; 2453 2454 out_buffer_fail: 2455 for (j = i - 1; j >= 0; j--) 2456 hns3_free_buffer_detach(ring, j); 2457 return ret; 2458 } 2459 2460 /* detach a in-used buffer and replace with a reserved one */ 2461 static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i, 2462 struct hns3_desc_cb *res_cb) 2463 { 2464 hns3_unmap_buffer(ring, &ring->desc_cb[i]); 2465 ring->desc_cb[i] = *res_cb; 2466 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma); 2467 ring->desc[i].rx.bd_base_info = 0; 2468 } 2469 2470 static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i) 2471 { 2472 ring->desc_cb[i].reuse_flag = 0; 2473 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma + 2474 ring->desc_cb[i].page_offset); 2475 ring->desc[i].rx.bd_base_info = 0; 2476 2477 dma_sync_single_for_device(ring_to_dev(ring), 2478 ring->desc_cb[i].dma + ring->desc_cb[i].page_offset, 2479 hns3_buf_size(ring), 2480 DMA_FROM_DEVICE); 2481 } 2482 2483 static void hns3_nic_reclaim_desc(struct hns3_enet_ring *ring, int head, 2484 int *bytes, int *pkts) 2485 { 2486 int ntc = ring->next_to_clean; 2487 struct hns3_desc_cb *desc_cb; 2488 2489 while (head != ntc) { 2490 desc_cb = &ring->desc_cb[ntc]; 2491 (*pkts) += (desc_cb->type == DESC_TYPE_SKB); 2492 (*bytes) += desc_cb->length; 2493 /* desc_cb will be cleaned, after hnae3_free_buffer_detach */ 2494 hns3_free_buffer_detach(ring, ntc); 2495 2496 if (++ntc == ring->desc_num) 2497 ntc = 0; 2498 2499 /* Issue prefetch for next Tx descriptor */ 2500 prefetch(&ring->desc_cb[ntc]); 2501 } 2502 2503 /* This smp_store_release() pairs with smp_load_acquire() in 2504 * ring_space called by hns3_nic_net_xmit. 2505 */ 2506 smp_store_release(&ring->next_to_clean, ntc); 2507 } 2508 2509 static int is_valid_clean_head(struct hns3_enet_ring *ring, int h) 2510 { 2511 int u = ring->next_to_use; 2512 int c = ring->next_to_clean; 2513 2514 if (unlikely(h > ring->desc_num)) 2515 return 0; 2516 2517 return u > c ? (h > c && h <= u) : (h > c || h <= u); 2518 } 2519 2520 void hns3_clean_tx_ring(struct hns3_enet_ring *ring) 2521 { 2522 struct net_device *netdev = ring_to_netdev(ring); 2523 struct hns3_nic_priv *priv = netdev_priv(netdev); 2524 struct netdev_queue *dev_queue; 2525 int bytes, pkts; 2526 int head; 2527 2528 head = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG); 2529 2530 if (is_ring_empty(ring) || head == ring->next_to_clean) 2531 return; /* no data to poll */ 2532 2533 rmb(); /* Make sure head is ready before touch any data */ 2534 2535 if (unlikely(!is_valid_clean_head(ring, head))) { 2536 hns3_rl_err(netdev, "wrong head (%d, %d-%d)\n", head, 2537 ring->next_to_use, ring->next_to_clean); 2538 2539 u64_stats_update_begin(&ring->syncp); 2540 ring->stats.io_err_cnt++; 2541 u64_stats_update_end(&ring->syncp); 2542 return; 2543 } 2544 2545 bytes = 0; 2546 pkts = 0; 2547 hns3_nic_reclaim_desc(ring, head, &bytes, &pkts); 2548 2549 ring->tqp_vector->tx_group.total_bytes += bytes; 2550 ring->tqp_vector->tx_group.total_packets += pkts; 2551 2552 u64_stats_update_begin(&ring->syncp); 2553 ring->stats.tx_bytes += bytes; 2554 ring->stats.tx_pkts += pkts; 2555 u64_stats_update_end(&ring->syncp); 2556 2557 dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index); 2558 netdev_tx_completed_queue(dev_queue, pkts, bytes); 2559 2560 if (unlikely(netif_carrier_ok(netdev) && 2561 ring_space(ring) > HNS3_MAX_TSO_BD_NUM)) { 2562 /* Make sure that anybody stopping the queue after this 2563 * sees the new next_to_clean. 2564 */ 2565 smp_mb(); 2566 if (netif_tx_queue_stopped(dev_queue) && 2567 !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) { 2568 netif_tx_wake_queue(dev_queue); 2569 ring->stats.restart_queue++; 2570 } 2571 } 2572 } 2573 2574 static int hns3_desc_unused(struct hns3_enet_ring *ring) 2575 { 2576 int ntc = ring->next_to_clean; 2577 int ntu = ring->next_to_use; 2578 2579 return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu; 2580 } 2581 2582 static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, 2583 int cleand_count) 2584 { 2585 struct hns3_desc_cb *desc_cb; 2586 struct hns3_desc_cb res_cbs; 2587 int i, ret; 2588 2589 for (i = 0; i < cleand_count; i++) { 2590 desc_cb = &ring->desc_cb[ring->next_to_use]; 2591 if (desc_cb->reuse_flag) { 2592 u64_stats_update_begin(&ring->syncp); 2593 ring->stats.reuse_pg_cnt++; 2594 u64_stats_update_end(&ring->syncp); 2595 2596 hns3_reuse_buffer(ring, ring->next_to_use); 2597 } else { 2598 ret = hns3_alloc_and_map_buffer(ring, &res_cbs); 2599 if (ret) { 2600 u64_stats_update_begin(&ring->syncp); 2601 ring->stats.sw_err_cnt++; 2602 u64_stats_update_end(&ring->syncp); 2603 2604 hns3_rl_err(ring_to_netdev(ring), 2605 "alloc rx buffer failed: %d\n", 2606 ret); 2607 break; 2608 } 2609 hns3_replace_buffer(ring, ring->next_to_use, &res_cbs); 2610 2611 u64_stats_update_begin(&ring->syncp); 2612 ring->stats.non_reuse_pg++; 2613 u64_stats_update_end(&ring->syncp); 2614 } 2615 2616 ring_ptr_move_fw(ring, next_to_use); 2617 } 2618 2619 wmb(); /* Make all data has been write before submit */ 2620 writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG); 2621 } 2622 2623 static bool hns3_page_is_reusable(struct page *page) 2624 { 2625 return page_to_nid(page) == numa_mem_id() && 2626 !page_is_pfmemalloc(page); 2627 } 2628 2629 static void hns3_nic_reuse_page(struct sk_buff *skb, int i, 2630 struct hns3_enet_ring *ring, int pull_len, 2631 struct hns3_desc_cb *desc_cb) 2632 { 2633 struct hns3_desc *desc = &ring->desc[ring->next_to_clean]; 2634 int size = le16_to_cpu(desc->rx.size); 2635 u32 truesize = hns3_buf_size(ring); 2636 2637 skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len, 2638 size - pull_len, truesize); 2639 2640 /* Avoid re-using remote pages, or the stack is still using the page 2641 * when page_offset rollback to zero, flag default unreuse 2642 */ 2643 if (unlikely(!hns3_page_is_reusable(desc_cb->priv)) || 2644 (!desc_cb->page_offset && page_count(desc_cb->priv) > 1)) 2645 return; 2646 2647 /* Move offset up to the next cache line */ 2648 desc_cb->page_offset += truesize; 2649 2650 if (desc_cb->page_offset + truesize <= hns3_page_size(ring)) { 2651 desc_cb->reuse_flag = 1; 2652 /* Bump ref count on page before it is given */ 2653 get_page(desc_cb->priv); 2654 } else if (page_count(desc_cb->priv) == 1) { 2655 desc_cb->reuse_flag = 1; 2656 desc_cb->page_offset = 0; 2657 get_page(desc_cb->priv); 2658 } 2659 } 2660 2661 static int hns3_gro_complete(struct sk_buff *skb, u32 l234info) 2662 { 2663 __be16 type = skb->protocol; 2664 struct tcphdr *th; 2665 int depth = 0; 2666 2667 while (eth_type_vlan(type)) { 2668 struct vlan_hdr *vh; 2669 2670 if ((depth + VLAN_HLEN) > skb_headlen(skb)) 2671 return -EFAULT; 2672 2673 vh = (struct vlan_hdr *)(skb->data + depth); 2674 type = vh->h_vlan_encapsulated_proto; 2675 depth += VLAN_HLEN; 2676 } 2677 2678 skb_set_network_header(skb, depth); 2679 2680 if (type == htons(ETH_P_IP)) { 2681 const struct iphdr *iph = ip_hdr(skb); 2682 2683 depth += sizeof(struct iphdr); 2684 skb_set_transport_header(skb, depth); 2685 th = tcp_hdr(skb); 2686 th->check = ~tcp_v4_check(skb->len - depth, iph->saddr, 2687 iph->daddr, 0); 2688 } else if (type == htons(ETH_P_IPV6)) { 2689 const struct ipv6hdr *iph = ipv6_hdr(skb); 2690 2691 depth += sizeof(struct ipv6hdr); 2692 skb_set_transport_header(skb, depth); 2693 th = tcp_hdr(skb); 2694 th->check = ~tcp_v6_check(skb->len - depth, &iph->saddr, 2695 &iph->daddr, 0); 2696 } else { 2697 hns3_rl_err(skb->dev, 2698 "Error: FW GRO supports only IPv4/IPv6, not 0x%04x, depth: %d\n", 2699 be16_to_cpu(type), depth); 2700 return -EFAULT; 2701 } 2702 2703 skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count; 2704 if (th->cwr) 2705 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; 2706 2707 if (l234info & BIT(HNS3_RXD_GRO_FIXID_B)) 2708 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID; 2709 2710 skb->csum_start = (unsigned char *)th - skb->head; 2711 skb->csum_offset = offsetof(struct tcphdr, check); 2712 skb->ip_summed = CHECKSUM_PARTIAL; 2713 2714 trace_hns3_gro(skb); 2715 2716 return 0; 2717 } 2718 2719 static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, 2720 u32 l234info, u32 bd_base_info, u32 ol_info) 2721 { 2722 struct net_device *netdev = ring_to_netdev(ring); 2723 int l3_type, l4_type; 2724 int ol4_type; 2725 2726 skb->ip_summed = CHECKSUM_NONE; 2727 2728 skb_checksum_none_assert(skb); 2729 2730 if (!(netdev->features & NETIF_F_RXCSUM)) 2731 return; 2732 2733 /* check if hardware has done checksum */ 2734 if (!(bd_base_info & BIT(HNS3_RXD_L3L4P_B))) 2735 return; 2736 2737 if (unlikely(l234info & (BIT(HNS3_RXD_L3E_B) | BIT(HNS3_RXD_L4E_B) | 2738 BIT(HNS3_RXD_OL3E_B) | 2739 BIT(HNS3_RXD_OL4E_B)))) { 2740 u64_stats_update_begin(&ring->syncp); 2741 ring->stats.l3l4_csum_err++; 2742 u64_stats_update_end(&ring->syncp); 2743 2744 return; 2745 } 2746 2747 ol4_type = hnae3_get_field(ol_info, HNS3_RXD_OL4ID_M, 2748 HNS3_RXD_OL4ID_S); 2749 switch (ol4_type) { 2750 case HNS3_OL4_TYPE_MAC_IN_UDP: 2751 case HNS3_OL4_TYPE_NVGRE: 2752 skb->csum_level = 1; 2753 /* fall through */ 2754 case HNS3_OL4_TYPE_NO_TUN: 2755 l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, 2756 HNS3_RXD_L3ID_S); 2757 l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M, 2758 HNS3_RXD_L4ID_S); 2759 2760 /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */ 2761 if ((l3_type == HNS3_L3_TYPE_IPV4 || 2762 l3_type == HNS3_L3_TYPE_IPV6) && 2763 (l4_type == HNS3_L4_TYPE_UDP || 2764 l4_type == HNS3_L4_TYPE_TCP || 2765 l4_type == HNS3_L4_TYPE_SCTP)) 2766 skb->ip_summed = CHECKSUM_UNNECESSARY; 2767 break; 2768 default: 2769 break; 2770 } 2771 } 2772 2773 static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb) 2774 { 2775 if (skb_has_frag_list(skb)) 2776 napi_gro_flush(&ring->tqp_vector->napi, false); 2777 2778 napi_gro_receive(&ring->tqp_vector->napi, skb); 2779 } 2780 2781 static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring, 2782 struct hns3_desc *desc, u32 l234info, 2783 u16 *vlan_tag) 2784 { 2785 struct hnae3_handle *handle = ring->tqp->handle; 2786 struct pci_dev *pdev = ring->tqp->handle->pdev; 2787 2788 if (pdev->revision == 0x20) { 2789 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); 2790 if (!(*vlan_tag & VLAN_VID_MASK)) 2791 *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); 2792 2793 return (*vlan_tag != 0); 2794 } 2795 2796 #define HNS3_STRP_OUTER_VLAN 0x1 2797 #define HNS3_STRP_INNER_VLAN 0x2 2798 #define HNS3_STRP_BOTH 0x3 2799 2800 /* Hardware always insert VLAN tag into RX descriptor when 2801 * remove the tag from packet, driver needs to determine 2802 * reporting which tag to stack. 2803 */ 2804 switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M, 2805 HNS3_RXD_STRP_TAGP_S)) { 2806 case HNS3_STRP_OUTER_VLAN: 2807 if (handle->port_base_vlan_state != 2808 HNAE3_PORT_BASE_VLAN_DISABLE) 2809 return false; 2810 2811 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); 2812 return true; 2813 case HNS3_STRP_INNER_VLAN: 2814 if (handle->port_base_vlan_state != 2815 HNAE3_PORT_BASE_VLAN_DISABLE) 2816 return false; 2817 2818 *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); 2819 return true; 2820 case HNS3_STRP_BOTH: 2821 if (handle->port_base_vlan_state == 2822 HNAE3_PORT_BASE_VLAN_DISABLE) 2823 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); 2824 else 2825 *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); 2826 2827 return true; 2828 default: 2829 return false; 2830 } 2831 } 2832 2833 static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length, 2834 unsigned char *va) 2835 { 2836 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean]; 2837 struct net_device *netdev = ring_to_netdev(ring); 2838 struct sk_buff *skb; 2839 2840 ring->skb = napi_alloc_skb(&ring->tqp_vector->napi, HNS3_RX_HEAD_SIZE); 2841 skb = ring->skb; 2842 if (unlikely(!skb)) { 2843 hns3_rl_err(netdev, "alloc rx skb fail\n"); 2844 2845 u64_stats_update_begin(&ring->syncp); 2846 ring->stats.sw_err_cnt++; 2847 u64_stats_update_end(&ring->syncp); 2848 2849 return -ENOMEM; 2850 } 2851 2852 trace_hns3_rx_desc(ring); 2853 prefetchw(skb->data); 2854 2855 ring->pending_buf = 1; 2856 ring->frag_num = 0; 2857 ring->tail_skb = NULL; 2858 if (length <= HNS3_RX_HEAD_SIZE) { 2859 memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long))); 2860 2861 /* We can reuse buffer as-is, just make sure it is local */ 2862 if (likely(hns3_page_is_reusable(desc_cb->priv))) 2863 desc_cb->reuse_flag = 1; 2864 else /* This page cannot be reused so discard it */ 2865 put_page(desc_cb->priv); 2866 2867 ring_ptr_move_fw(ring, next_to_clean); 2868 return 0; 2869 } 2870 u64_stats_update_begin(&ring->syncp); 2871 ring->stats.seg_pkt_cnt++; 2872 u64_stats_update_end(&ring->syncp); 2873 2874 ring->pull_len = eth_get_headlen(netdev, va, HNS3_RX_HEAD_SIZE); 2875 __skb_put(skb, ring->pull_len); 2876 hns3_nic_reuse_page(skb, ring->frag_num++, ring, ring->pull_len, 2877 desc_cb); 2878 ring_ptr_move_fw(ring, next_to_clean); 2879 2880 return 0; 2881 } 2882 2883 static int hns3_add_frag(struct hns3_enet_ring *ring) 2884 { 2885 struct sk_buff *skb = ring->skb; 2886 struct sk_buff *head_skb = skb; 2887 struct sk_buff *new_skb; 2888 struct hns3_desc_cb *desc_cb; 2889 struct hns3_desc *desc; 2890 u32 bd_base_info; 2891 2892 do { 2893 desc = &ring->desc[ring->next_to_clean]; 2894 desc_cb = &ring->desc_cb[ring->next_to_clean]; 2895 bd_base_info = le32_to_cpu(desc->rx.bd_base_info); 2896 /* make sure HW write desc complete */ 2897 dma_rmb(); 2898 if (!(bd_base_info & BIT(HNS3_RXD_VLD_B))) 2899 return -ENXIO; 2900 2901 if (unlikely(ring->frag_num >= MAX_SKB_FRAGS)) { 2902 new_skb = napi_alloc_skb(&ring->tqp_vector->napi, 0); 2903 if (unlikely(!new_skb)) { 2904 hns3_rl_err(ring_to_netdev(ring), 2905 "alloc rx fraglist skb fail\n"); 2906 return -ENXIO; 2907 } 2908 ring->frag_num = 0; 2909 2910 if (ring->tail_skb) { 2911 ring->tail_skb->next = new_skb; 2912 ring->tail_skb = new_skb; 2913 } else { 2914 skb_shinfo(skb)->frag_list = new_skb; 2915 ring->tail_skb = new_skb; 2916 } 2917 } 2918 2919 if (ring->tail_skb) { 2920 head_skb->truesize += hns3_buf_size(ring); 2921 head_skb->data_len += le16_to_cpu(desc->rx.size); 2922 head_skb->len += le16_to_cpu(desc->rx.size); 2923 skb = ring->tail_skb; 2924 } 2925 2926 dma_sync_single_for_cpu(ring_to_dev(ring), 2927 desc_cb->dma + desc_cb->page_offset, 2928 hns3_buf_size(ring), 2929 DMA_FROM_DEVICE); 2930 2931 hns3_nic_reuse_page(skb, ring->frag_num++, ring, 0, desc_cb); 2932 trace_hns3_rx_desc(ring); 2933 ring_ptr_move_fw(ring, next_to_clean); 2934 ring->pending_buf++; 2935 } while (!(bd_base_info & BIT(HNS3_RXD_FE_B))); 2936 2937 return 0; 2938 } 2939 2940 static int hns3_set_gro_and_checksum(struct hns3_enet_ring *ring, 2941 struct sk_buff *skb, u32 l234info, 2942 u32 bd_base_info, u32 ol_info) 2943 { 2944 u32 l3_type; 2945 2946 skb_shinfo(skb)->gso_size = hnae3_get_field(bd_base_info, 2947 HNS3_RXD_GRO_SIZE_M, 2948 HNS3_RXD_GRO_SIZE_S); 2949 /* if there is no HW GRO, do not set gro params */ 2950 if (!skb_shinfo(skb)->gso_size) { 2951 hns3_rx_checksum(ring, skb, l234info, bd_base_info, ol_info); 2952 return 0; 2953 } 2954 2955 NAPI_GRO_CB(skb)->count = hnae3_get_field(l234info, 2956 HNS3_RXD_GRO_COUNT_M, 2957 HNS3_RXD_GRO_COUNT_S); 2958 2959 l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S); 2960 if (l3_type == HNS3_L3_TYPE_IPV4) 2961 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; 2962 else if (l3_type == HNS3_L3_TYPE_IPV6) 2963 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; 2964 else 2965 return -EFAULT; 2966 2967 return hns3_gro_complete(skb, l234info); 2968 } 2969 2970 static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring, 2971 struct sk_buff *skb, u32 rss_hash) 2972 { 2973 struct hnae3_handle *handle = ring->tqp->handle; 2974 enum pkt_hash_types rss_type; 2975 2976 if (rss_hash) 2977 rss_type = handle->kinfo.rss_type; 2978 else 2979 rss_type = PKT_HASH_TYPE_NONE; 2980 2981 skb_set_hash(skb, rss_hash, rss_type); 2982 } 2983 2984 static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb) 2985 { 2986 struct net_device *netdev = ring_to_netdev(ring); 2987 enum hns3_pkt_l2t_type l2_frame_type; 2988 u32 bd_base_info, l234info, ol_info; 2989 struct hns3_desc *desc; 2990 unsigned int len; 2991 int pre_ntc, ret; 2992 2993 /* bdinfo handled below is only valid on the last BD of the 2994 * current packet, and ring->next_to_clean indicates the first 2995 * descriptor of next packet, so need - 1 below. 2996 */ 2997 pre_ntc = ring->next_to_clean ? (ring->next_to_clean - 1) : 2998 (ring->desc_num - 1); 2999 desc = &ring->desc[pre_ntc]; 3000 bd_base_info = le32_to_cpu(desc->rx.bd_base_info); 3001 l234info = le32_to_cpu(desc->rx.l234_info); 3002 ol_info = le32_to_cpu(desc->rx.ol_info); 3003 3004 /* Based on hw strategy, the tag offloaded will be stored at 3005 * ot_vlan_tag in two layer tag case, and stored at vlan_tag 3006 * in one layer tag case. 3007 */ 3008 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) { 3009 u16 vlan_tag; 3010 3011 if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag)) 3012 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 3013 vlan_tag); 3014 } 3015 3016 if (unlikely(!desc->rx.pkt_len || (l234info & (BIT(HNS3_RXD_TRUNCAT_B) | 3017 BIT(HNS3_RXD_L2E_B))))) { 3018 u64_stats_update_begin(&ring->syncp); 3019 if (l234info & BIT(HNS3_RXD_L2E_B)) 3020 ring->stats.l2_err++; 3021 else 3022 ring->stats.err_pkt_len++; 3023 u64_stats_update_end(&ring->syncp); 3024 3025 return -EFAULT; 3026 } 3027 3028 len = skb->len; 3029 3030 /* Do update ip stack process */ 3031 skb->protocol = eth_type_trans(skb, netdev); 3032 3033 /* This is needed in order to enable forwarding support */ 3034 ret = hns3_set_gro_and_checksum(ring, skb, l234info, 3035 bd_base_info, ol_info); 3036 if (unlikely(ret)) { 3037 u64_stats_update_begin(&ring->syncp); 3038 ring->stats.rx_err_cnt++; 3039 u64_stats_update_end(&ring->syncp); 3040 return ret; 3041 } 3042 3043 l2_frame_type = hnae3_get_field(l234info, HNS3_RXD_DMAC_M, 3044 HNS3_RXD_DMAC_S); 3045 3046 u64_stats_update_begin(&ring->syncp); 3047 ring->stats.rx_pkts++; 3048 ring->stats.rx_bytes += len; 3049 3050 if (l2_frame_type == HNS3_L2_TYPE_MULTICAST) 3051 ring->stats.rx_multicast++; 3052 3053 u64_stats_update_end(&ring->syncp); 3054 3055 ring->tqp_vector->rx_group.total_bytes += len; 3056 3057 hns3_set_rx_skb_rss_type(ring, skb, le32_to_cpu(desc->rx.rss_hash)); 3058 return 0; 3059 } 3060 3061 static int hns3_handle_rx_bd(struct hns3_enet_ring *ring) 3062 { 3063 struct sk_buff *skb = ring->skb; 3064 struct hns3_desc_cb *desc_cb; 3065 struct hns3_desc *desc; 3066 unsigned int length; 3067 u32 bd_base_info; 3068 int ret; 3069 3070 desc = &ring->desc[ring->next_to_clean]; 3071 desc_cb = &ring->desc_cb[ring->next_to_clean]; 3072 3073 prefetch(desc); 3074 3075 length = le16_to_cpu(desc->rx.size); 3076 bd_base_info = le32_to_cpu(desc->rx.bd_base_info); 3077 3078 /* Check valid BD */ 3079 if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B)))) 3080 return -ENXIO; 3081 3082 if (!skb) { 3083 ring->va = desc_cb->buf + desc_cb->page_offset; 3084 3085 dma_sync_single_for_cpu(ring_to_dev(ring), 3086 desc_cb->dma + desc_cb->page_offset, 3087 hns3_buf_size(ring), 3088 DMA_FROM_DEVICE); 3089 } 3090 3091 /* Prefetch first cache line of first page 3092 * Idea is to cache few bytes of the header of the packet. Our L1 Cache 3093 * line size is 64B so need to prefetch twice to make it 128B. But in 3094 * actual we can have greater size of caches with 128B Level 1 cache 3095 * lines. In such a case, single fetch would suffice to cache in the 3096 * relevant part of the header. 3097 */ 3098 prefetch(ring->va); 3099 #if L1_CACHE_BYTES < 128 3100 prefetch(ring->va + L1_CACHE_BYTES); 3101 #endif 3102 3103 if (!skb) { 3104 ret = hns3_alloc_skb(ring, length, ring->va); 3105 skb = ring->skb; 3106 3107 if (ret < 0) /* alloc buffer fail */ 3108 return ret; 3109 if (!(bd_base_info & BIT(HNS3_RXD_FE_B))) { /* need add frag */ 3110 ret = hns3_add_frag(ring); 3111 if (ret) 3112 return ret; 3113 } 3114 } else { 3115 ret = hns3_add_frag(ring); 3116 if (ret) 3117 return ret; 3118 } 3119 3120 /* As the head data may be changed when GRO enable, copy 3121 * the head data in after other data rx completed 3122 */ 3123 if (skb->len > HNS3_RX_HEAD_SIZE) 3124 memcpy(skb->data, ring->va, 3125 ALIGN(ring->pull_len, sizeof(long))); 3126 3127 ret = hns3_handle_bdinfo(ring, skb); 3128 if (unlikely(ret)) { 3129 dev_kfree_skb_any(skb); 3130 return ret; 3131 } 3132 3133 skb_record_rx_queue(skb, ring->tqp->tqp_index); 3134 return 0; 3135 } 3136 3137 int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget, 3138 void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *)) 3139 { 3140 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16 3141 int unused_count = hns3_desc_unused(ring); 3142 int recv_pkts = 0; 3143 int recv_bds = 0; 3144 int err, num; 3145 3146 num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG); 3147 num -= unused_count; 3148 unused_count -= ring->pending_buf; 3149 3150 if (num <= 0) 3151 goto out; 3152 3153 rmb(); /* Make sure num taken effect before the other data is touched */ 3154 3155 while (recv_pkts < budget && recv_bds < num) { 3156 /* Reuse or realloc buffers */ 3157 if (unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) { 3158 hns3_nic_alloc_rx_buffers(ring, unused_count); 3159 unused_count = hns3_desc_unused(ring) - 3160 ring->pending_buf; 3161 } 3162 3163 /* Poll one pkt */ 3164 err = hns3_handle_rx_bd(ring); 3165 /* Do not get FE for the packet or failed to alloc skb */ 3166 if (unlikely(!ring->skb || err == -ENXIO)) { 3167 goto out; 3168 } else if (likely(!err)) { 3169 rx_fn(ring, ring->skb); 3170 recv_pkts++; 3171 } 3172 3173 recv_bds += ring->pending_buf; 3174 unused_count += ring->pending_buf; 3175 ring->skb = NULL; 3176 ring->pending_buf = 0; 3177 } 3178 3179 out: 3180 /* Make all data has been write before submit */ 3181 if (unused_count > 0) 3182 hns3_nic_alloc_rx_buffers(ring, unused_count); 3183 3184 return recv_pkts; 3185 } 3186 3187 static bool hns3_get_new_flow_lvl(struct hns3_enet_ring_group *ring_group) 3188 { 3189 #define HNS3_RX_LOW_BYTE_RATE 10000 3190 #define HNS3_RX_MID_BYTE_RATE 20000 3191 #define HNS3_RX_ULTRA_PACKET_RATE 40 3192 3193 enum hns3_flow_level_range new_flow_level; 3194 struct hns3_enet_tqp_vector *tqp_vector; 3195 int packets_per_msecs, bytes_per_msecs; 3196 u32 time_passed_ms; 3197 3198 tqp_vector = ring_group->ring->tqp_vector; 3199 time_passed_ms = 3200 jiffies_to_msecs(jiffies - tqp_vector->last_jiffies); 3201 if (!time_passed_ms) 3202 return false; 3203 3204 do_div(ring_group->total_packets, time_passed_ms); 3205 packets_per_msecs = ring_group->total_packets; 3206 3207 do_div(ring_group->total_bytes, time_passed_ms); 3208 bytes_per_msecs = ring_group->total_bytes; 3209 3210 new_flow_level = ring_group->coal.flow_level; 3211 3212 /* Simple throttlerate management 3213 * 0-10MB/s lower (50000 ints/s) 3214 * 10-20MB/s middle (20000 ints/s) 3215 * 20-1249MB/s high (18000 ints/s) 3216 * > 40000pps ultra (8000 ints/s) 3217 */ 3218 switch (new_flow_level) { 3219 case HNS3_FLOW_LOW: 3220 if (bytes_per_msecs > HNS3_RX_LOW_BYTE_RATE) 3221 new_flow_level = HNS3_FLOW_MID; 3222 break; 3223 case HNS3_FLOW_MID: 3224 if (bytes_per_msecs > HNS3_RX_MID_BYTE_RATE) 3225 new_flow_level = HNS3_FLOW_HIGH; 3226 else if (bytes_per_msecs <= HNS3_RX_LOW_BYTE_RATE) 3227 new_flow_level = HNS3_FLOW_LOW; 3228 break; 3229 case HNS3_FLOW_HIGH: 3230 case HNS3_FLOW_ULTRA: 3231 default: 3232 if (bytes_per_msecs <= HNS3_RX_MID_BYTE_RATE) 3233 new_flow_level = HNS3_FLOW_MID; 3234 break; 3235 } 3236 3237 if (packets_per_msecs > HNS3_RX_ULTRA_PACKET_RATE && 3238 &tqp_vector->rx_group == ring_group) 3239 new_flow_level = HNS3_FLOW_ULTRA; 3240 3241 ring_group->total_bytes = 0; 3242 ring_group->total_packets = 0; 3243 ring_group->coal.flow_level = new_flow_level; 3244 3245 return true; 3246 } 3247 3248 static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group) 3249 { 3250 struct hns3_enet_tqp_vector *tqp_vector; 3251 u16 new_int_gl; 3252 3253 if (!ring_group->ring) 3254 return false; 3255 3256 tqp_vector = ring_group->ring->tqp_vector; 3257 if (!tqp_vector->last_jiffies) 3258 return false; 3259 3260 if (ring_group->total_packets == 0) { 3261 ring_group->coal.int_gl = HNS3_INT_GL_50K; 3262 ring_group->coal.flow_level = HNS3_FLOW_LOW; 3263 return true; 3264 } 3265 3266 if (!hns3_get_new_flow_lvl(ring_group)) 3267 return false; 3268 3269 new_int_gl = ring_group->coal.int_gl; 3270 switch (ring_group->coal.flow_level) { 3271 case HNS3_FLOW_LOW: 3272 new_int_gl = HNS3_INT_GL_50K; 3273 break; 3274 case HNS3_FLOW_MID: 3275 new_int_gl = HNS3_INT_GL_20K; 3276 break; 3277 case HNS3_FLOW_HIGH: 3278 new_int_gl = HNS3_INT_GL_18K; 3279 break; 3280 case HNS3_FLOW_ULTRA: 3281 new_int_gl = HNS3_INT_GL_8K; 3282 break; 3283 default: 3284 break; 3285 } 3286 3287 if (new_int_gl != ring_group->coal.int_gl) { 3288 ring_group->coal.int_gl = new_int_gl; 3289 return true; 3290 } 3291 return false; 3292 } 3293 3294 static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector) 3295 { 3296 struct hns3_enet_ring_group *rx_group = &tqp_vector->rx_group; 3297 struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group; 3298 bool rx_update, tx_update; 3299 3300 /* update param every 1000ms */ 3301 if (time_before(jiffies, 3302 tqp_vector->last_jiffies + msecs_to_jiffies(1000))) 3303 return; 3304 3305 if (rx_group->coal.gl_adapt_enable) { 3306 rx_update = hns3_get_new_int_gl(rx_group); 3307 if (rx_update) 3308 hns3_set_vector_coalesce_rx_gl(tqp_vector, 3309 rx_group->coal.int_gl); 3310 } 3311 3312 if (tx_group->coal.gl_adapt_enable) { 3313 tx_update = hns3_get_new_int_gl(tx_group); 3314 if (tx_update) 3315 hns3_set_vector_coalesce_tx_gl(tqp_vector, 3316 tx_group->coal.int_gl); 3317 } 3318 3319 tqp_vector->last_jiffies = jiffies; 3320 } 3321 3322 static int hns3_nic_common_poll(struct napi_struct *napi, int budget) 3323 { 3324 struct hns3_nic_priv *priv = netdev_priv(napi->dev); 3325 struct hns3_enet_ring *ring; 3326 int rx_pkt_total = 0; 3327 3328 struct hns3_enet_tqp_vector *tqp_vector = 3329 container_of(napi, struct hns3_enet_tqp_vector, napi); 3330 bool clean_complete = true; 3331 int rx_budget = budget; 3332 3333 if (unlikely(test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) { 3334 napi_complete(napi); 3335 return 0; 3336 } 3337 3338 /* Since the actual Tx work is minimal, we can give the Tx a larger 3339 * budget and be more aggressive about cleaning up the Tx descriptors. 3340 */ 3341 hns3_for_each_ring(ring, tqp_vector->tx_group) 3342 hns3_clean_tx_ring(ring); 3343 3344 /* make sure rx ring budget not smaller than 1 */ 3345 if (tqp_vector->num_tqps > 1) 3346 rx_budget = max(budget / tqp_vector->num_tqps, 1); 3347 3348 hns3_for_each_ring(ring, tqp_vector->rx_group) { 3349 int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget, 3350 hns3_rx_skb); 3351 3352 if (rx_cleaned >= rx_budget) 3353 clean_complete = false; 3354 3355 rx_pkt_total += rx_cleaned; 3356 } 3357 3358 tqp_vector->rx_group.total_packets += rx_pkt_total; 3359 3360 if (!clean_complete) 3361 return budget; 3362 3363 if (napi_complete(napi) && 3364 likely(!test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) { 3365 hns3_update_new_int_gl(tqp_vector); 3366 hns3_mask_vector_irq(tqp_vector, 1); 3367 } 3368 3369 return rx_pkt_total; 3370 } 3371 3372 static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, 3373 struct hnae3_ring_chain_node *head) 3374 { 3375 struct pci_dev *pdev = tqp_vector->handle->pdev; 3376 struct hnae3_ring_chain_node *cur_chain = head; 3377 struct hnae3_ring_chain_node *chain; 3378 struct hns3_enet_ring *tx_ring; 3379 struct hns3_enet_ring *rx_ring; 3380 3381 tx_ring = tqp_vector->tx_group.ring; 3382 if (tx_ring) { 3383 cur_chain->tqp_index = tx_ring->tqp->tqp_index; 3384 hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, 3385 HNAE3_RING_TYPE_TX); 3386 hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, 3387 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX); 3388 3389 cur_chain->next = NULL; 3390 3391 while (tx_ring->next) { 3392 tx_ring = tx_ring->next; 3393 3394 chain = devm_kzalloc(&pdev->dev, sizeof(*chain), 3395 GFP_KERNEL); 3396 if (!chain) 3397 goto err_free_chain; 3398 3399 cur_chain->next = chain; 3400 chain->tqp_index = tx_ring->tqp->tqp_index; 3401 hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B, 3402 HNAE3_RING_TYPE_TX); 3403 hnae3_set_field(chain->int_gl_idx, 3404 HNAE3_RING_GL_IDX_M, 3405 HNAE3_RING_GL_IDX_S, 3406 HNAE3_RING_GL_TX); 3407 3408 cur_chain = chain; 3409 } 3410 } 3411 3412 rx_ring = tqp_vector->rx_group.ring; 3413 if (!tx_ring && rx_ring) { 3414 cur_chain->next = NULL; 3415 cur_chain->tqp_index = rx_ring->tqp->tqp_index; 3416 hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, 3417 HNAE3_RING_TYPE_RX); 3418 hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, 3419 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX); 3420 3421 rx_ring = rx_ring->next; 3422 } 3423 3424 while (rx_ring) { 3425 chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL); 3426 if (!chain) 3427 goto err_free_chain; 3428 3429 cur_chain->next = chain; 3430 chain->tqp_index = rx_ring->tqp->tqp_index; 3431 hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B, 3432 HNAE3_RING_TYPE_RX); 3433 hnae3_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M, 3434 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX); 3435 3436 cur_chain = chain; 3437 3438 rx_ring = rx_ring->next; 3439 } 3440 3441 return 0; 3442 3443 err_free_chain: 3444 cur_chain = head->next; 3445 while (cur_chain) { 3446 chain = cur_chain->next; 3447 devm_kfree(&pdev->dev, cur_chain); 3448 cur_chain = chain; 3449 } 3450 head->next = NULL; 3451 3452 return -ENOMEM; 3453 } 3454 3455 static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, 3456 struct hnae3_ring_chain_node *head) 3457 { 3458 struct pci_dev *pdev = tqp_vector->handle->pdev; 3459 struct hnae3_ring_chain_node *chain_tmp, *chain; 3460 3461 chain = head->next; 3462 3463 while (chain) { 3464 chain_tmp = chain->next; 3465 devm_kfree(&pdev->dev, chain); 3466 chain = chain_tmp; 3467 } 3468 } 3469 3470 static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group, 3471 struct hns3_enet_ring *ring) 3472 { 3473 ring->next = group->ring; 3474 group->ring = ring; 3475 3476 group->count++; 3477 } 3478 3479 static void hns3_nic_set_cpumask(struct hns3_nic_priv *priv) 3480 { 3481 struct pci_dev *pdev = priv->ae_handle->pdev; 3482 struct hns3_enet_tqp_vector *tqp_vector; 3483 int num_vectors = priv->vector_num; 3484 int numa_node; 3485 int vector_i; 3486 3487 numa_node = dev_to_node(&pdev->dev); 3488 3489 for (vector_i = 0; vector_i < num_vectors; vector_i++) { 3490 tqp_vector = &priv->tqp_vector[vector_i]; 3491 cpumask_set_cpu(cpumask_local_spread(vector_i, numa_node), 3492 &tqp_vector->affinity_mask); 3493 } 3494 } 3495 3496 static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv) 3497 { 3498 struct hnae3_ring_chain_node vector_ring_chain; 3499 struct hnae3_handle *h = priv->ae_handle; 3500 struct hns3_enet_tqp_vector *tqp_vector; 3501 int ret = 0; 3502 int i; 3503 3504 hns3_nic_set_cpumask(priv); 3505 3506 for (i = 0; i < priv->vector_num; i++) { 3507 tqp_vector = &priv->tqp_vector[i]; 3508 hns3_vector_gl_rl_init_hw(tqp_vector, priv); 3509 tqp_vector->num_tqps = 0; 3510 } 3511 3512 for (i = 0; i < h->kinfo.num_tqps; i++) { 3513 u16 vector_i = i % priv->vector_num; 3514 u16 tqp_num = h->kinfo.num_tqps; 3515 3516 tqp_vector = &priv->tqp_vector[vector_i]; 3517 3518 hns3_add_ring_to_group(&tqp_vector->tx_group, 3519 &priv->ring[i]); 3520 3521 hns3_add_ring_to_group(&tqp_vector->rx_group, 3522 &priv->ring[i + tqp_num]); 3523 3524 priv->ring[i].tqp_vector = tqp_vector; 3525 priv->ring[i + tqp_num].tqp_vector = tqp_vector; 3526 tqp_vector->num_tqps++; 3527 } 3528 3529 for (i = 0; i < priv->vector_num; i++) { 3530 tqp_vector = &priv->tqp_vector[i]; 3531 3532 tqp_vector->rx_group.total_bytes = 0; 3533 tqp_vector->rx_group.total_packets = 0; 3534 tqp_vector->tx_group.total_bytes = 0; 3535 tqp_vector->tx_group.total_packets = 0; 3536 tqp_vector->handle = h; 3537 3538 ret = hns3_get_vector_ring_chain(tqp_vector, 3539 &vector_ring_chain); 3540 if (ret) 3541 goto map_ring_fail; 3542 3543 ret = h->ae_algo->ops->map_ring_to_vector(h, 3544 tqp_vector->vector_irq, &vector_ring_chain); 3545 3546 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); 3547 3548 if (ret) 3549 goto map_ring_fail; 3550 3551 netif_napi_add(priv->netdev, &tqp_vector->napi, 3552 hns3_nic_common_poll, NAPI_POLL_WEIGHT); 3553 } 3554 3555 return 0; 3556 3557 map_ring_fail: 3558 while (i--) 3559 netif_napi_del(&priv->tqp_vector[i].napi); 3560 3561 return ret; 3562 } 3563 3564 static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv) 3565 { 3566 #define HNS3_VECTOR_PF_MAX_NUM 64 3567 3568 struct hnae3_handle *h = priv->ae_handle; 3569 struct hns3_enet_tqp_vector *tqp_vector; 3570 struct hnae3_vector_info *vector; 3571 struct pci_dev *pdev = h->pdev; 3572 u16 tqp_num = h->kinfo.num_tqps; 3573 u16 vector_num; 3574 int ret = 0; 3575 u16 i; 3576 3577 /* RSS size, cpu online and vector_num should be the same */ 3578 /* Should consider 2p/4p later */ 3579 vector_num = min_t(u16, num_online_cpus(), tqp_num); 3580 vector_num = min_t(u16, vector_num, HNS3_VECTOR_PF_MAX_NUM); 3581 3582 vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector), 3583 GFP_KERNEL); 3584 if (!vector) 3585 return -ENOMEM; 3586 3587 /* save the actual available vector number */ 3588 vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector); 3589 3590 priv->vector_num = vector_num; 3591 priv->tqp_vector = (struct hns3_enet_tqp_vector *) 3592 devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector), 3593 GFP_KERNEL); 3594 if (!priv->tqp_vector) { 3595 ret = -ENOMEM; 3596 goto out; 3597 } 3598 3599 for (i = 0; i < priv->vector_num; i++) { 3600 tqp_vector = &priv->tqp_vector[i]; 3601 tqp_vector->idx = i; 3602 tqp_vector->mask_addr = vector[i].io_addr; 3603 tqp_vector->vector_irq = vector[i].vector; 3604 hns3_vector_gl_rl_init(tqp_vector, priv); 3605 } 3606 3607 out: 3608 devm_kfree(&pdev->dev, vector); 3609 return ret; 3610 } 3611 3612 static void hns3_clear_ring_group(struct hns3_enet_ring_group *group) 3613 { 3614 group->ring = NULL; 3615 group->count = 0; 3616 } 3617 3618 static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv) 3619 { 3620 struct hnae3_ring_chain_node vector_ring_chain; 3621 struct hnae3_handle *h = priv->ae_handle; 3622 struct hns3_enet_tqp_vector *tqp_vector; 3623 int i; 3624 3625 for (i = 0; i < priv->vector_num; i++) { 3626 tqp_vector = &priv->tqp_vector[i]; 3627 3628 if (!tqp_vector->rx_group.ring && !tqp_vector->tx_group.ring) 3629 continue; 3630 3631 /* Since the mapping can be overwritten, when fail to get the 3632 * chain between vector and ring, we should go on to deal with 3633 * the remaining options. 3634 */ 3635 if (hns3_get_vector_ring_chain(tqp_vector, &vector_ring_chain)) 3636 dev_warn(priv->dev, "failed to get ring chain\n"); 3637 3638 h->ae_algo->ops->unmap_ring_from_vector(h, 3639 tqp_vector->vector_irq, &vector_ring_chain); 3640 3641 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); 3642 3643 hns3_clear_ring_group(&tqp_vector->rx_group); 3644 hns3_clear_ring_group(&tqp_vector->tx_group); 3645 netif_napi_del(&priv->tqp_vector[i].napi); 3646 } 3647 } 3648 3649 static void hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv) 3650 { 3651 struct hnae3_handle *h = priv->ae_handle; 3652 struct pci_dev *pdev = h->pdev; 3653 int i, ret; 3654 3655 for (i = 0; i < priv->vector_num; i++) { 3656 struct hns3_enet_tqp_vector *tqp_vector; 3657 3658 tqp_vector = &priv->tqp_vector[i]; 3659 ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq); 3660 if (ret) 3661 return; 3662 } 3663 3664 devm_kfree(&pdev->dev, priv->tqp_vector); 3665 } 3666 3667 static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv, 3668 unsigned int ring_type) 3669 { 3670 int queue_num = priv->ae_handle->kinfo.num_tqps; 3671 struct hns3_enet_ring *ring; 3672 int desc_num; 3673 3674 if (ring_type == HNAE3_RING_TYPE_TX) { 3675 ring = &priv->ring[q->tqp_index]; 3676 desc_num = priv->ae_handle->kinfo.num_tx_desc; 3677 ring->queue_index = q->tqp_index; 3678 ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET; 3679 } else { 3680 ring = &priv->ring[q->tqp_index + queue_num]; 3681 desc_num = priv->ae_handle->kinfo.num_rx_desc; 3682 ring->queue_index = q->tqp_index; 3683 ring->io_base = q->io_base; 3684 } 3685 3686 hnae3_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type); 3687 3688 ring->tqp = q; 3689 ring->desc = NULL; 3690 ring->desc_cb = NULL; 3691 ring->dev = priv->dev; 3692 ring->desc_dma_addr = 0; 3693 ring->buf_size = q->buf_size; 3694 ring->desc_num = desc_num; 3695 ring->next_to_use = 0; 3696 ring->next_to_clean = 0; 3697 } 3698 3699 static void hns3_queue_to_ring(struct hnae3_queue *tqp, 3700 struct hns3_nic_priv *priv) 3701 { 3702 hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX); 3703 hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX); 3704 } 3705 3706 static int hns3_get_ring_config(struct hns3_nic_priv *priv) 3707 { 3708 struct hnae3_handle *h = priv->ae_handle; 3709 struct pci_dev *pdev = h->pdev; 3710 int i; 3711 3712 priv->ring = devm_kzalloc(&pdev->dev, 3713 array3_size(h->kinfo.num_tqps, 3714 sizeof(*priv->ring), 2), 3715 GFP_KERNEL); 3716 if (!priv->ring) 3717 return -ENOMEM; 3718 3719 for (i = 0; i < h->kinfo.num_tqps; i++) 3720 hns3_queue_to_ring(h->kinfo.tqp[i], priv); 3721 3722 return 0; 3723 } 3724 3725 static void hns3_put_ring_config(struct hns3_nic_priv *priv) 3726 { 3727 if (!priv->ring) 3728 return; 3729 3730 devm_kfree(priv->dev, priv->ring); 3731 priv->ring = NULL; 3732 } 3733 3734 static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring) 3735 { 3736 int ret; 3737 3738 if (ring->desc_num <= 0 || ring->buf_size <= 0) 3739 return -EINVAL; 3740 3741 ring->desc_cb = devm_kcalloc(ring_to_dev(ring), ring->desc_num, 3742 sizeof(ring->desc_cb[0]), GFP_KERNEL); 3743 if (!ring->desc_cb) { 3744 ret = -ENOMEM; 3745 goto out; 3746 } 3747 3748 ret = hns3_alloc_desc(ring); 3749 if (ret) 3750 goto out_with_desc_cb; 3751 3752 if (!HNAE3_IS_TX_RING(ring)) { 3753 ret = hns3_alloc_ring_buffers(ring); 3754 if (ret) 3755 goto out_with_desc; 3756 } 3757 3758 return 0; 3759 3760 out_with_desc: 3761 hns3_free_desc(ring); 3762 out_with_desc_cb: 3763 devm_kfree(ring_to_dev(ring), ring->desc_cb); 3764 ring->desc_cb = NULL; 3765 out: 3766 return ret; 3767 } 3768 3769 void hns3_fini_ring(struct hns3_enet_ring *ring) 3770 { 3771 hns3_free_desc(ring); 3772 devm_kfree(ring_to_dev(ring), ring->desc_cb); 3773 ring->desc_cb = NULL; 3774 ring->next_to_clean = 0; 3775 ring->next_to_use = 0; 3776 ring->pending_buf = 0; 3777 if (ring->skb) { 3778 dev_kfree_skb_any(ring->skb); 3779 ring->skb = NULL; 3780 } 3781 } 3782 3783 static int hns3_buf_size2type(u32 buf_size) 3784 { 3785 int bd_size_type; 3786 3787 switch (buf_size) { 3788 case 512: 3789 bd_size_type = HNS3_BD_SIZE_512_TYPE; 3790 break; 3791 case 1024: 3792 bd_size_type = HNS3_BD_SIZE_1024_TYPE; 3793 break; 3794 case 2048: 3795 bd_size_type = HNS3_BD_SIZE_2048_TYPE; 3796 break; 3797 case 4096: 3798 bd_size_type = HNS3_BD_SIZE_4096_TYPE; 3799 break; 3800 default: 3801 bd_size_type = HNS3_BD_SIZE_2048_TYPE; 3802 } 3803 3804 return bd_size_type; 3805 } 3806 3807 static void hns3_init_ring_hw(struct hns3_enet_ring *ring) 3808 { 3809 dma_addr_t dma = ring->desc_dma_addr; 3810 struct hnae3_queue *q = ring->tqp; 3811 3812 if (!HNAE3_IS_TX_RING(ring)) { 3813 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG, (u32)dma); 3814 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG, 3815 (u32)((dma >> 31) >> 1)); 3816 3817 hns3_write_dev(q, HNS3_RING_RX_RING_BD_LEN_REG, 3818 hns3_buf_size2type(ring->buf_size)); 3819 hns3_write_dev(q, HNS3_RING_RX_RING_BD_NUM_REG, 3820 ring->desc_num / 8 - 1); 3821 3822 } else { 3823 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_L_REG, 3824 (u32)dma); 3825 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG, 3826 (u32)((dma >> 31) >> 1)); 3827 3828 hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG, 3829 ring->desc_num / 8 - 1); 3830 } 3831 } 3832 3833 static void hns3_init_tx_ring_tc(struct hns3_nic_priv *priv) 3834 { 3835 struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo; 3836 int i; 3837 3838 for (i = 0; i < HNAE3_MAX_TC; i++) { 3839 struct hnae3_tc_info *tc_info = &kinfo->tc_info[i]; 3840 int j; 3841 3842 if (!tc_info->enable) 3843 continue; 3844 3845 for (j = 0; j < tc_info->tqp_count; j++) { 3846 struct hnae3_queue *q; 3847 3848 q = priv->ring[tc_info->tqp_offset + j].tqp; 3849 hns3_write_dev(q, HNS3_RING_TX_RING_TC_REG, 3850 tc_info->tc); 3851 } 3852 } 3853 } 3854 3855 int hns3_init_all_ring(struct hns3_nic_priv *priv) 3856 { 3857 struct hnae3_handle *h = priv->ae_handle; 3858 int ring_num = h->kinfo.num_tqps * 2; 3859 int i, j; 3860 int ret; 3861 3862 for (i = 0; i < ring_num; i++) { 3863 ret = hns3_alloc_ring_memory(&priv->ring[i]); 3864 if (ret) { 3865 dev_err(priv->dev, 3866 "Alloc ring memory fail! ret=%d\n", ret); 3867 goto out_when_alloc_ring_memory; 3868 } 3869 3870 u64_stats_init(&priv->ring[i].syncp); 3871 } 3872 3873 return 0; 3874 3875 out_when_alloc_ring_memory: 3876 for (j = i - 1; j >= 0; j--) 3877 hns3_fini_ring(&priv->ring[j]); 3878 3879 return -ENOMEM; 3880 } 3881 3882 int hns3_uninit_all_ring(struct hns3_nic_priv *priv) 3883 { 3884 struct hnae3_handle *h = priv->ae_handle; 3885 int i; 3886 3887 for (i = 0; i < h->kinfo.num_tqps; i++) { 3888 hns3_fini_ring(&priv->ring[i]); 3889 hns3_fini_ring(&priv->ring[i + h->kinfo.num_tqps]); 3890 } 3891 return 0; 3892 } 3893 3894 /* Set mac addr if it is configured. or leave it to the AE driver */ 3895 static int hns3_init_mac_addr(struct net_device *netdev) 3896 { 3897 struct hns3_nic_priv *priv = netdev_priv(netdev); 3898 struct hnae3_handle *h = priv->ae_handle; 3899 u8 mac_addr_temp[ETH_ALEN]; 3900 int ret = 0; 3901 3902 if (h->ae_algo->ops->get_mac_addr) 3903 h->ae_algo->ops->get_mac_addr(h, mac_addr_temp); 3904 3905 /* Check if the MAC address is valid, if not get a random one */ 3906 if (!is_valid_ether_addr(mac_addr_temp)) { 3907 eth_hw_addr_random(netdev); 3908 dev_warn(priv->dev, "using random MAC address %pM\n", 3909 netdev->dev_addr); 3910 } else if (!ether_addr_equal(netdev->dev_addr, mac_addr_temp)) { 3911 ether_addr_copy(netdev->dev_addr, mac_addr_temp); 3912 ether_addr_copy(netdev->perm_addr, mac_addr_temp); 3913 } else { 3914 return 0; 3915 } 3916 3917 if (h->ae_algo->ops->set_mac_addr) 3918 ret = h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr, true); 3919 3920 return ret; 3921 } 3922 3923 static int hns3_init_phy(struct net_device *netdev) 3924 { 3925 struct hnae3_handle *h = hns3_get_handle(netdev); 3926 int ret = 0; 3927 3928 if (h->ae_algo->ops->mac_connect_phy) 3929 ret = h->ae_algo->ops->mac_connect_phy(h); 3930 3931 return ret; 3932 } 3933 3934 static void hns3_uninit_phy(struct net_device *netdev) 3935 { 3936 struct hnae3_handle *h = hns3_get_handle(netdev); 3937 3938 if (h->ae_algo->ops->mac_disconnect_phy) 3939 h->ae_algo->ops->mac_disconnect_phy(h); 3940 } 3941 3942 static void hns3_del_all_fd_rules(struct net_device *netdev, bool clear_list) 3943 { 3944 struct hnae3_handle *h = hns3_get_handle(netdev); 3945 3946 if (h->ae_algo->ops->del_all_fd_entries) 3947 h->ae_algo->ops->del_all_fd_entries(h, clear_list); 3948 } 3949 3950 static int hns3_client_start(struct hnae3_handle *handle) 3951 { 3952 if (!handle->ae_algo->ops->client_start) 3953 return 0; 3954 3955 return handle->ae_algo->ops->client_start(handle); 3956 } 3957 3958 static void hns3_client_stop(struct hnae3_handle *handle) 3959 { 3960 if (!handle->ae_algo->ops->client_stop) 3961 return; 3962 3963 handle->ae_algo->ops->client_stop(handle); 3964 } 3965 3966 static void hns3_info_show(struct hns3_nic_priv *priv) 3967 { 3968 struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo; 3969 3970 dev_info(priv->dev, "MAC address: %pM\n", priv->netdev->dev_addr); 3971 dev_info(priv->dev, "Task queue pairs numbers: %u\n", kinfo->num_tqps); 3972 dev_info(priv->dev, "RSS size: %u\n", kinfo->rss_size); 3973 dev_info(priv->dev, "Allocated RSS size: %u\n", kinfo->req_rss_size); 3974 dev_info(priv->dev, "RX buffer length: %u\n", kinfo->rx_buf_len); 3975 dev_info(priv->dev, "Desc num per TX queue: %u\n", kinfo->num_tx_desc); 3976 dev_info(priv->dev, "Desc num per RX queue: %u\n", kinfo->num_rx_desc); 3977 dev_info(priv->dev, "Total number of enabled TCs: %u\n", kinfo->num_tc); 3978 dev_info(priv->dev, "Max mtu size: %u\n", priv->netdev->max_mtu); 3979 } 3980 3981 static int hns3_client_init(struct hnae3_handle *handle) 3982 { 3983 struct pci_dev *pdev = handle->pdev; 3984 u16 alloc_tqps, max_rss_size; 3985 struct hns3_nic_priv *priv; 3986 struct net_device *netdev; 3987 int ret; 3988 3989 handle->ae_algo->ops->get_tqps_and_rss_info(handle, &alloc_tqps, 3990 &max_rss_size); 3991 netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv), alloc_tqps); 3992 if (!netdev) 3993 return -ENOMEM; 3994 3995 priv = netdev_priv(netdev); 3996 priv->dev = &pdev->dev; 3997 priv->netdev = netdev; 3998 priv->ae_handle = handle; 3999 priv->tx_timeout_count = 0; 4000 set_bit(HNS3_NIC_STATE_DOWN, &priv->state); 4001 4002 handle->msg_enable = netif_msg_init(debug, DEFAULT_MSG_LEVEL); 4003 4004 handle->kinfo.netdev = netdev; 4005 handle->priv = (void *)priv; 4006 4007 hns3_init_mac_addr(netdev); 4008 4009 hns3_set_default_feature(netdev); 4010 4011 netdev->watchdog_timeo = HNS3_TX_TIMEOUT; 4012 netdev->priv_flags |= IFF_UNICAST_FLT; 4013 netdev->netdev_ops = &hns3_nic_netdev_ops; 4014 SET_NETDEV_DEV(netdev, &pdev->dev); 4015 hns3_ethtool_set_ops(netdev); 4016 4017 /* Carrier off reporting is important to ethtool even BEFORE open */ 4018 netif_carrier_off(netdev); 4019 4020 ret = hns3_get_ring_config(priv); 4021 if (ret) { 4022 ret = -ENOMEM; 4023 goto out_get_ring_cfg; 4024 } 4025 4026 ret = hns3_nic_alloc_vector_data(priv); 4027 if (ret) { 4028 ret = -ENOMEM; 4029 goto out_alloc_vector_data; 4030 } 4031 4032 ret = hns3_nic_init_vector_data(priv); 4033 if (ret) { 4034 ret = -ENOMEM; 4035 goto out_init_vector_data; 4036 } 4037 4038 ret = hns3_init_all_ring(priv); 4039 if (ret) { 4040 ret = -ENOMEM; 4041 goto out_init_ring; 4042 } 4043 4044 ret = hns3_init_phy(netdev); 4045 if (ret) 4046 goto out_init_phy; 4047 4048 ret = register_netdev(netdev); 4049 if (ret) { 4050 dev_err(priv->dev, "probe register netdev fail!\n"); 4051 goto out_reg_netdev_fail; 4052 } 4053 4054 /* the device can work without cpu rmap, only aRFS needs it */ 4055 ret = hns3_set_rx_cpu_rmap(netdev); 4056 if (ret) 4057 dev_warn(priv->dev, "set rx cpu rmap fail, ret=%d\n", ret); 4058 4059 ret = hns3_nic_init_irq(priv); 4060 if (ret) { 4061 dev_err(priv->dev, "init irq failed! ret=%d\n", ret); 4062 hns3_free_rx_cpu_rmap(netdev); 4063 goto out_init_irq_fail; 4064 } 4065 4066 ret = hns3_client_start(handle); 4067 if (ret) { 4068 dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret); 4069 goto out_client_start; 4070 } 4071 4072 hns3_dcbnl_setup(handle); 4073 4074 hns3_dbg_init(handle); 4075 4076 /* MTU range: (ETH_MIN_MTU(kernel default) - 9702) */ 4077 netdev->max_mtu = HNS3_MAX_MTU; 4078 4079 set_bit(HNS3_NIC_STATE_INITED, &priv->state); 4080 4081 if (netif_msg_drv(handle)) 4082 hns3_info_show(priv); 4083 4084 return ret; 4085 4086 out_client_start: 4087 hns3_free_rx_cpu_rmap(netdev); 4088 hns3_nic_uninit_irq(priv); 4089 out_init_irq_fail: 4090 unregister_netdev(netdev); 4091 out_reg_netdev_fail: 4092 hns3_uninit_phy(netdev); 4093 out_init_phy: 4094 hns3_uninit_all_ring(priv); 4095 out_init_ring: 4096 hns3_nic_uninit_vector_data(priv); 4097 out_init_vector_data: 4098 hns3_nic_dealloc_vector_data(priv); 4099 out_alloc_vector_data: 4100 priv->ring = NULL; 4101 out_get_ring_cfg: 4102 priv->ae_handle = NULL; 4103 free_netdev(netdev); 4104 return ret; 4105 } 4106 4107 static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) 4108 { 4109 struct net_device *netdev = handle->kinfo.netdev; 4110 struct hns3_nic_priv *priv = netdev_priv(netdev); 4111 int ret; 4112 4113 if (netdev->reg_state != NETREG_UNINITIALIZED) 4114 unregister_netdev(netdev); 4115 4116 hns3_client_stop(handle); 4117 4118 hns3_uninit_phy(netdev); 4119 4120 if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) { 4121 netdev_warn(netdev, "already uninitialized\n"); 4122 goto out_netdev_free; 4123 } 4124 4125 hns3_free_rx_cpu_rmap(netdev); 4126 4127 hns3_nic_uninit_irq(priv); 4128 4129 hns3_del_all_fd_rules(netdev, true); 4130 4131 hns3_clear_all_ring(handle, true); 4132 4133 hns3_nic_uninit_vector_data(priv); 4134 4135 hns3_nic_dealloc_vector_data(priv); 4136 4137 ret = hns3_uninit_all_ring(priv); 4138 if (ret) 4139 netdev_err(netdev, "uninit ring error\n"); 4140 4141 hns3_put_ring_config(priv); 4142 4143 out_netdev_free: 4144 hns3_dbg_uninit(handle); 4145 free_netdev(netdev); 4146 } 4147 4148 static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup) 4149 { 4150 struct net_device *netdev = handle->kinfo.netdev; 4151 4152 if (!netdev) 4153 return; 4154 4155 if (linkup) { 4156 netif_carrier_on(netdev); 4157 netif_tx_wake_all_queues(netdev); 4158 if (netif_msg_link(handle)) 4159 netdev_info(netdev, "link up\n"); 4160 } else { 4161 netif_carrier_off(netdev); 4162 netif_tx_stop_all_queues(netdev); 4163 if (netif_msg_link(handle)) 4164 netdev_info(netdev, "link down\n"); 4165 } 4166 } 4167 4168 static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc) 4169 { 4170 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 4171 struct net_device *ndev = kinfo->netdev; 4172 4173 if (tc > HNAE3_MAX_TC) 4174 return -EINVAL; 4175 4176 if (!ndev) 4177 return -ENODEV; 4178 4179 return hns3_nic_set_real_num_queue(ndev); 4180 } 4181 4182 static void hns3_clear_tx_ring(struct hns3_enet_ring *ring) 4183 { 4184 while (ring->next_to_clean != ring->next_to_use) { 4185 ring->desc[ring->next_to_clean].tx.bdtp_fe_sc_vld_ra_ri = 0; 4186 hns3_free_buffer_detach(ring, ring->next_to_clean); 4187 ring_ptr_move_fw(ring, next_to_clean); 4188 } 4189 } 4190 4191 static int hns3_clear_rx_ring(struct hns3_enet_ring *ring) 4192 { 4193 struct hns3_desc_cb res_cbs; 4194 int ret; 4195 4196 while (ring->next_to_use != ring->next_to_clean) { 4197 /* When a buffer is not reused, it's memory has been 4198 * freed in hns3_handle_rx_bd or will be freed by 4199 * stack, so we need to replace the buffer here. 4200 */ 4201 if (!ring->desc_cb[ring->next_to_use].reuse_flag) { 4202 ret = hns3_alloc_and_map_buffer(ring, &res_cbs); 4203 if (ret) { 4204 u64_stats_update_begin(&ring->syncp); 4205 ring->stats.sw_err_cnt++; 4206 u64_stats_update_end(&ring->syncp); 4207 /* if alloc new buffer fail, exit directly 4208 * and reclear in up flow. 4209 */ 4210 netdev_warn(ring_to_netdev(ring), 4211 "reserve buffer map failed, ret = %d\n", 4212 ret); 4213 return ret; 4214 } 4215 hns3_replace_buffer(ring, ring->next_to_use, &res_cbs); 4216 } 4217 ring_ptr_move_fw(ring, next_to_use); 4218 } 4219 4220 /* Free the pending skb in rx ring */ 4221 if (ring->skb) { 4222 dev_kfree_skb_any(ring->skb); 4223 ring->skb = NULL; 4224 ring->pending_buf = 0; 4225 } 4226 4227 return 0; 4228 } 4229 4230 static void hns3_force_clear_rx_ring(struct hns3_enet_ring *ring) 4231 { 4232 while (ring->next_to_use != ring->next_to_clean) { 4233 /* When a buffer is not reused, it's memory has been 4234 * freed in hns3_handle_rx_bd or will be freed by 4235 * stack, so only need to unmap the buffer here. 4236 */ 4237 if (!ring->desc_cb[ring->next_to_use].reuse_flag) { 4238 hns3_unmap_buffer(ring, 4239 &ring->desc_cb[ring->next_to_use]); 4240 ring->desc_cb[ring->next_to_use].dma = 0; 4241 } 4242 4243 ring_ptr_move_fw(ring, next_to_use); 4244 } 4245 } 4246 4247 static void hns3_clear_all_ring(struct hnae3_handle *h, bool force) 4248 { 4249 struct net_device *ndev = h->kinfo.netdev; 4250 struct hns3_nic_priv *priv = netdev_priv(ndev); 4251 u32 i; 4252 4253 for (i = 0; i < h->kinfo.num_tqps; i++) { 4254 struct hns3_enet_ring *ring; 4255 4256 ring = &priv->ring[i]; 4257 hns3_clear_tx_ring(ring); 4258 4259 ring = &priv->ring[i + h->kinfo.num_tqps]; 4260 /* Continue to clear other rings even if clearing some 4261 * rings failed. 4262 */ 4263 if (force) 4264 hns3_force_clear_rx_ring(ring); 4265 else 4266 hns3_clear_rx_ring(ring); 4267 } 4268 } 4269 4270 int hns3_nic_reset_all_ring(struct hnae3_handle *h) 4271 { 4272 struct net_device *ndev = h->kinfo.netdev; 4273 struct hns3_nic_priv *priv = netdev_priv(ndev); 4274 struct hns3_enet_ring *rx_ring; 4275 int i, j; 4276 int ret; 4277 4278 for (i = 0; i < h->kinfo.num_tqps; i++) { 4279 ret = h->ae_algo->ops->reset_queue(h, i); 4280 if (ret) 4281 return ret; 4282 4283 hns3_init_ring_hw(&priv->ring[i]); 4284 4285 /* We need to clear tx ring here because self test will 4286 * use the ring and will not run down before up 4287 */ 4288 hns3_clear_tx_ring(&priv->ring[i]); 4289 priv->ring[i].next_to_clean = 0; 4290 priv->ring[i].next_to_use = 0; 4291 4292 rx_ring = &priv->ring[i + h->kinfo.num_tqps]; 4293 hns3_init_ring_hw(rx_ring); 4294 ret = hns3_clear_rx_ring(rx_ring); 4295 if (ret) 4296 return ret; 4297 4298 /* We can not know the hardware head and tail when this 4299 * function is called in reset flow, so we reuse all desc. 4300 */ 4301 for (j = 0; j < rx_ring->desc_num; j++) 4302 hns3_reuse_buffer(rx_ring, j); 4303 4304 rx_ring->next_to_clean = 0; 4305 rx_ring->next_to_use = 0; 4306 } 4307 4308 hns3_init_tx_ring_tc(priv); 4309 4310 return 0; 4311 } 4312 4313 static void hns3_store_coal(struct hns3_nic_priv *priv) 4314 { 4315 /* ethtool only support setting and querying one coal 4316 * configuration for now, so save the vector 0' coal 4317 * configuration here in order to restore it. 4318 */ 4319 memcpy(&priv->tx_coal, &priv->tqp_vector[0].tx_group.coal, 4320 sizeof(struct hns3_enet_coalesce)); 4321 memcpy(&priv->rx_coal, &priv->tqp_vector[0].rx_group.coal, 4322 sizeof(struct hns3_enet_coalesce)); 4323 } 4324 4325 static void hns3_restore_coal(struct hns3_nic_priv *priv) 4326 { 4327 u16 vector_num = priv->vector_num; 4328 int i; 4329 4330 for (i = 0; i < vector_num; i++) { 4331 memcpy(&priv->tqp_vector[i].tx_group.coal, &priv->tx_coal, 4332 sizeof(struct hns3_enet_coalesce)); 4333 memcpy(&priv->tqp_vector[i].rx_group.coal, &priv->rx_coal, 4334 sizeof(struct hns3_enet_coalesce)); 4335 } 4336 } 4337 4338 static int hns3_reset_notify_down_enet(struct hnae3_handle *handle) 4339 { 4340 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 4341 struct net_device *ndev = kinfo->netdev; 4342 struct hns3_nic_priv *priv = netdev_priv(ndev); 4343 4344 if (test_and_set_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) 4345 return 0; 4346 4347 if (!netif_running(ndev)) 4348 return 0; 4349 4350 return hns3_nic_net_stop(ndev); 4351 } 4352 4353 static int hns3_reset_notify_up_enet(struct hnae3_handle *handle) 4354 { 4355 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 4356 struct hns3_nic_priv *priv = netdev_priv(kinfo->netdev); 4357 int ret = 0; 4358 4359 clear_bit(HNS3_NIC_STATE_RESETTING, &priv->state); 4360 4361 if (netif_running(kinfo->netdev)) { 4362 ret = hns3_nic_net_open(kinfo->netdev); 4363 if (ret) { 4364 set_bit(HNS3_NIC_STATE_RESETTING, &priv->state); 4365 netdev_err(kinfo->netdev, 4366 "net up fail, ret=%d!\n", ret); 4367 return ret; 4368 } 4369 } 4370 4371 return ret; 4372 } 4373 4374 static int hns3_reset_notify_init_enet(struct hnae3_handle *handle) 4375 { 4376 struct net_device *netdev = handle->kinfo.netdev; 4377 struct hns3_nic_priv *priv = netdev_priv(netdev); 4378 int ret; 4379 4380 /* Carrier off reporting is important to ethtool even BEFORE open */ 4381 netif_carrier_off(netdev); 4382 4383 ret = hns3_get_ring_config(priv); 4384 if (ret) 4385 return ret; 4386 4387 ret = hns3_nic_alloc_vector_data(priv); 4388 if (ret) 4389 goto err_put_ring; 4390 4391 hns3_restore_coal(priv); 4392 4393 ret = hns3_nic_init_vector_data(priv); 4394 if (ret) 4395 goto err_dealloc_vector; 4396 4397 ret = hns3_init_all_ring(priv); 4398 if (ret) 4399 goto err_uninit_vector; 4400 4401 /* the device can work without cpu rmap, only aRFS needs it */ 4402 ret = hns3_set_rx_cpu_rmap(netdev); 4403 if (ret) 4404 dev_warn(priv->dev, "set rx cpu rmap fail, ret=%d\n", ret); 4405 4406 ret = hns3_nic_init_irq(priv); 4407 if (ret) { 4408 dev_err(priv->dev, "init irq failed! ret=%d\n", ret); 4409 hns3_free_rx_cpu_rmap(netdev); 4410 goto err_init_irq_fail; 4411 } 4412 4413 if (!hns3_is_phys_func(handle->pdev)) 4414 hns3_init_mac_addr(netdev); 4415 4416 ret = hns3_client_start(handle); 4417 if (ret) { 4418 dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret); 4419 goto err_client_start_fail; 4420 } 4421 4422 set_bit(HNS3_NIC_STATE_INITED, &priv->state); 4423 4424 return ret; 4425 4426 err_client_start_fail: 4427 hns3_free_rx_cpu_rmap(netdev); 4428 hns3_nic_uninit_irq(priv); 4429 err_init_irq_fail: 4430 hns3_uninit_all_ring(priv); 4431 err_uninit_vector: 4432 hns3_nic_uninit_vector_data(priv); 4433 err_dealloc_vector: 4434 hns3_nic_dealloc_vector_data(priv); 4435 err_put_ring: 4436 hns3_put_ring_config(priv); 4437 4438 return ret; 4439 } 4440 4441 static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) 4442 { 4443 struct net_device *netdev = handle->kinfo.netdev; 4444 struct hns3_nic_priv *priv = netdev_priv(netdev); 4445 int ret; 4446 4447 if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) { 4448 netdev_warn(netdev, "already uninitialized\n"); 4449 return 0; 4450 } 4451 4452 hns3_free_rx_cpu_rmap(netdev); 4453 hns3_nic_uninit_irq(priv); 4454 hns3_clear_all_ring(handle, true); 4455 hns3_reset_tx_queue(priv->ae_handle); 4456 4457 hns3_nic_uninit_vector_data(priv); 4458 4459 hns3_store_coal(priv); 4460 4461 hns3_nic_dealloc_vector_data(priv); 4462 4463 ret = hns3_uninit_all_ring(priv); 4464 if (ret) 4465 netdev_err(netdev, "uninit ring error\n"); 4466 4467 hns3_put_ring_config(priv); 4468 4469 return ret; 4470 } 4471 4472 static int hns3_reset_notify(struct hnae3_handle *handle, 4473 enum hnae3_reset_notify_type type) 4474 { 4475 int ret = 0; 4476 4477 switch (type) { 4478 case HNAE3_UP_CLIENT: 4479 ret = hns3_reset_notify_up_enet(handle); 4480 break; 4481 case HNAE3_DOWN_CLIENT: 4482 ret = hns3_reset_notify_down_enet(handle); 4483 break; 4484 case HNAE3_INIT_CLIENT: 4485 ret = hns3_reset_notify_init_enet(handle); 4486 break; 4487 case HNAE3_UNINIT_CLIENT: 4488 ret = hns3_reset_notify_uninit_enet(handle); 4489 break; 4490 default: 4491 break; 4492 } 4493 4494 return ret; 4495 } 4496 4497 static int hns3_change_channels(struct hnae3_handle *handle, u32 new_tqp_num, 4498 bool rxfh_configured) 4499 { 4500 int ret; 4501 4502 ret = handle->ae_algo->ops->set_channels(handle, new_tqp_num, 4503 rxfh_configured); 4504 if (ret) { 4505 dev_err(&handle->pdev->dev, 4506 "Change tqp num(%u) fail.\n", new_tqp_num); 4507 return ret; 4508 } 4509 4510 ret = hns3_reset_notify(handle, HNAE3_INIT_CLIENT); 4511 if (ret) 4512 return ret; 4513 4514 ret = hns3_reset_notify(handle, HNAE3_UP_CLIENT); 4515 if (ret) 4516 hns3_reset_notify(handle, HNAE3_UNINIT_CLIENT); 4517 4518 return ret; 4519 } 4520 4521 int hns3_set_channels(struct net_device *netdev, 4522 struct ethtool_channels *ch) 4523 { 4524 struct hnae3_handle *h = hns3_get_handle(netdev); 4525 struct hnae3_knic_private_info *kinfo = &h->kinfo; 4526 bool rxfh_configured = netif_is_rxfh_configured(netdev); 4527 u32 new_tqp_num = ch->combined_count; 4528 u16 org_tqp_num; 4529 int ret; 4530 4531 if (hns3_nic_resetting(netdev)) 4532 return -EBUSY; 4533 4534 if (ch->rx_count || ch->tx_count) 4535 return -EINVAL; 4536 4537 if (new_tqp_num > hns3_get_max_available_channels(h) || 4538 new_tqp_num < 1) { 4539 dev_err(&netdev->dev, 4540 "Change tqps fail, the tqp range is from 1 to %u", 4541 hns3_get_max_available_channels(h)); 4542 return -EINVAL; 4543 } 4544 4545 if (kinfo->rss_size == new_tqp_num) 4546 return 0; 4547 4548 netif_dbg(h, drv, netdev, 4549 "set channels: tqp_num=%u, rxfh=%d\n", 4550 new_tqp_num, rxfh_configured); 4551 4552 ret = hns3_reset_notify(h, HNAE3_DOWN_CLIENT); 4553 if (ret) 4554 return ret; 4555 4556 ret = hns3_reset_notify(h, HNAE3_UNINIT_CLIENT); 4557 if (ret) 4558 return ret; 4559 4560 org_tqp_num = h->kinfo.num_tqps; 4561 ret = hns3_change_channels(h, new_tqp_num, rxfh_configured); 4562 if (ret) { 4563 int ret1; 4564 4565 netdev_warn(netdev, 4566 "Change channels fail, revert to old value\n"); 4567 ret1 = hns3_change_channels(h, org_tqp_num, rxfh_configured); 4568 if (ret1) { 4569 netdev_err(netdev, 4570 "revert to old channel fail\n"); 4571 return ret1; 4572 } 4573 4574 return ret; 4575 } 4576 4577 return 0; 4578 } 4579 4580 static const struct hns3_hw_error_info hns3_hw_err[] = { 4581 { .type = HNAE3_PPU_POISON_ERROR, 4582 .msg = "PPU poison" }, 4583 { .type = HNAE3_CMDQ_ECC_ERROR, 4584 .msg = "IMP CMDQ error" }, 4585 { .type = HNAE3_IMP_RD_POISON_ERROR, 4586 .msg = "IMP RD poison" }, 4587 }; 4588 4589 static void hns3_process_hw_error(struct hnae3_handle *handle, 4590 enum hnae3_hw_error_type type) 4591 { 4592 int i; 4593 4594 for (i = 0; i < ARRAY_SIZE(hns3_hw_err); i++) { 4595 if (hns3_hw_err[i].type == type) { 4596 dev_err(&handle->pdev->dev, "Detected %s!\n", 4597 hns3_hw_err[i].msg); 4598 break; 4599 } 4600 } 4601 } 4602 4603 static const struct hnae3_client_ops client_ops = { 4604 .init_instance = hns3_client_init, 4605 .uninit_instance = hns3_client_uninit, 4606 .link_status_change = hns3_link_status_change, 4607 .setup_tc = hns3_client_setup_tc, 4608 .reset_notify = hns3_reset_notify, 4609 .process_hw_error = hns3_process_hw_error, 4610 }; 4611 4612 /* hns3_init_module - Driver registration routine 4613 * hns3_init_module is the first routine called when the driver is 4614 * loaded. All it does is register with the PCI subsystem. 4615 */ 4616 static int __init hns3_init_module(void) 4617 { 4618 int ret; 4619 4620 pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string); 4621 pr_info("%s: %s\n", hns3_driver_name, hns3_copyright); 4622 4623 client.type = HNAE3_CLIENT_KNIC; 4624 snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH, "%s", 4625 hns3_driver_name); 4626 4627 client.ops = &client_ops; 4628 4629 INIT_LIST_HEAD(&client.node); 4630 4631 hns3_dbg_register_debugfs(hns3_driver_name); 4632 4633 ret = hnae3_register_client(&client); 4634 if (ret) 4635 goto err_reg_client; 4636 4637 ret = pci_register_driver(&hns3_driver); 4638 if (ret) 4639 goto err_reg_driver; 4640 4641 return ret; 4642 4643 err_reg_driver: 4644 hnae3_unregister_client(&client); 4645 err_reg_client: 4646 hns3_dbg_unregister_debugfs(); 4647 return ret; 4648 } 4649 module_init(hns3_init_module); 4650 4651 /* hns3_exit_module - Driver exit cleanup routine 4652 * hns3_exit_module is called just before the driver is removed 4653 * from memory. 4654 */ 4655 static void __exit hns3_exit_module(void) 4656 { 4657 pci_unregister_driver(&hns3_driver); 4658 hnae3_unregister_client(&client); 4659 hns3_dbg_unregister_debugfs(); 4660 } 4661 module_exit(hns3_exit_module); 4662 4663 MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver"); 4664 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 4665 MODULE_LICENSE("GPL"); 4666 MODULE_ALIAS("pci:hns-nic"); 4667