1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/dma-mapping.h> 5 #include <linux/etherdevice.h> 6 #include <linux/interrupt.h> 7 #ifdef CONFIG_RFS_ACCEL 8 #include <linux/cpu_rmap.h> 9 #endif 10 #include <linux/if_vlan.h> 11 #include <linux/irq.h> 12 #include <linux/ip.h> 13 #include <linux/ipv6.h> 14 #include <linux/module.h> 15 #include <linux/pci.h> 16 #include <linux/aer.h> 17 #include <linux/skbuff.h> 18 #include <linux/sctp.h> 19 #include <net/gre.h> 20 #include <net/ip6_checksum.h> 21 #include <net/pkt_cls.h> 22 #include <net/tcp.h> 23 #include <net/vxlan.h> 24 #include <net/geneve.h> 25 26 #include "hnae3.h" 27 #include "hns3_enet.h" 28 /* All hns3 tracepoints are defined by the include below, which 29 * must be included exactly once across the whole kernel with 30 * CREATE_TRACE_POINTS defined 31 */ 32 #define CREATE_TRACE_POINTS 33 #include "hns3_trace.h" 34 35 #define hns3_set_field(origin, shift, val) ((origin) |= ((val) << (shift))) 36 #define hns3_tx_bd_count(S) DIV_ROUND_UP(S, HNS3_MAX_BD_SIZE) 37 38 #define hns3_rl_err(fmt, ...) \ 39 do { \ 40 if (net_ratelimit()) \ 41 netdev_err(fmt, ##__VA_ARGS__); \ 42 } while (0) 43 44 static void hns3_clear_all_ring(struct hnae3_handle *h, bool force); 45 46 static const char hns3_driver_name[] = "hns3"; 47 static const char hns3_driver_string[] = 48 "Hisilicon Ethernet Network Driver for Hip08 Family"; 49 static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation."; 50 static struct hnae3_client client; 51 52 static int debug = -1; 53 module_param(debug, int, 0); 54 MODULE_PARM_DESC(debug, " Network interface message level setting"); 55 56 #define DEFAULT_MSG_LEVEL (NETIF_MSG_PROBE | NETIF_MSG_LINK | \ 57 NETIF_MSG_IFDOWN | NETIF_MSG_IFUP) 58 59 #define HNS3_INNER_VLAN_TAG 1 60 #define HNS3_OUTER_VLAN_TAG 2 61 62 #define HNS3_MIN_TX_LEN 33U 63 64 /* hns3_pci_tbl - PCI Device ID Table 65 * 66 * Last entry must be all 0s 67 * 68 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 69 * Class, Class Mask, private data (not used) } 70 */ 71 static const struct pci_device_id hns3_pci_tbl[] = { 72 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0}, 73 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0}, 74 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 75 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 76 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 77 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 78 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 79 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 80 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 81 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 82 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 83 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 84 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0}, 85 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 86 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 87 /* required last entry */ 88 {0, } 89 }; 90 MODULE_DEVICE_TABLE(pci, hns3_pci_tbl); 91 92 static irqreturn_t hns3_irq_handle(int irq, void *vector) 93 { 94 struct hns3_enet_tqp_vector *tqp_vector = vector; 95 96 napi_schedule_irqoff(&tqp_vector->napi); 97 98 return IRQ_HANDLED; 99 } 100 101 static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv) 102 { 103 struct hns3_enet_tqp_vector *tqp_vectors; 104 unsigned int i; 105 106 for (i = 0; i < priv->vector_num; i++) { 107 tqp_vectors = &priv->tqp_vector[i]; 108 109 if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED) 110 continue; 111 112 /* clear the affinity mask */ 113 irq_set_affinity_hint(tqp_vectors->vector_irq, NULL); 114 115 /* release the irq resource */ 116 free_irq(tqp_vectors->vector_irq, tqp_vectors); 117 tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED; 118 } 119 } 120 121 static int hns3_nic_init_irq(struct hns3_nic_priv *priv) 122 { 123 struct hns3_enet_tqp_vector *tqp_vectors; 124 int txrx_int_idx = 0; 125 int rx_int_idx = 0; 126 int tx_int_idx = 0; 127 unsigned int i; 128 int ret; 129 130 for (i = 0; i < priv->vector_num; i++) { 131 tqp_vectors = &priv->tqp_vector[i]; 132 133 if (tqp_vectors->irq_init_flag == HNS3_VECTOR_INITED) 134 continue; 135 136 if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) { 137 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN, 138 "%s-%s-%s-%d", hns3_driver_name, 139 pci_name(priv->ae_handle->pdev), 140 "TxRx", txrx_int_idx++); 141 txrx_int_idx++; 142 } else if (tqp_vectors->rx_group.ring) { 143 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN, 144 "%s-%s-%s-%d", hns3_driver_name, 145 pci_name(priv->ae_handle->pdev), 146 "Rx", rx_int_idx++); 147 } else if (tqp_vectors->tx_group.ring) { 148 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN, 149 "%s-%s-%s-%d", hns3_driver_name, 150 pci_name(priv->ae_handle->pdev), 151 "Tx", tx_int_idx++); 152 } else { 153 /* Skip this unused q_vector */ 154 continue; 155 } 156 157 tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0'; 158 159 irq_set_status_flags(tqp_vectors->vector_irq, IRQ_NOAUTOEN); 160 ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0, 161 tqp_vectors->name, tqp_vectors); 162 if (ret) { 163 netdev_err(priv->netdev, "request irq(%d) fail\n", 164 tqp_vectors->vector_irq); 165 hns3_nic_uninit_irq(priv); 166 return ret; 167 } 168 169 irq_set_affinity_hint(tqp_vectors->vector_irq, 170 &tqp_vectors->affinity_mask); 171 172 tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED; 173 } 174 175 return 0; 176 } 177 178 static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector, 179 u32 mask_en) 180 { 181 writel(mask_en, tqp_vector->mask_addr); 182 } 183 184 static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector) 185 { 186 napi_enable(&tqp_vector->napi); 187 enable_irq(tqp_vector->vector_irq); 188 189 /* enable vector */ 190 hns3_mask_vector_irq(tqp_vector, 1); 191 } 192 193 static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector) 194 { 195 /* disable vector */ 196 hns3_mask_vector_irq(tqp_vector, 0); 197 198 disable_irq(tqp_vector->vector_irq); 199 napi_disable(&tqp_vector->napi); 200 } 201 202 void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector, 203 u32 rl_value) 204 { 205 u32 rl_reg = hns3_rl_usec_to_reg(rl_value); 206 207 /* this defines the configuration for RL (Interrupt Rate Limiter). 208 * Rl defines rate of interrupts i.e. number of interrupts-per-second 209 * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing 210 */ 211 212 if (rl_reg > 0 && !tqp_vector->tx_group.coal.gl_adapt_enable && 213 !tqp_vector->rx_group.coal.gl_adapt_enable) 214 /* According to the hardware, the range of rl_reg is 215 * 0-59 and the unit is 4. 216 */ 217 rl_reg |= HNS3_INT_RL_ENABLE_MASK; 218 219 writel(rl_reg, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET); 220 } 221 222 void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector, 223 u32 gl_value) 224 { 225 u32 rx_gl_reg = hns3_gl_usec_to_reg(gl_value); 226 227 writel(rx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET); 228 } 229 230 void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector, 231 u32 gl_value) 232 { 233 u32 tx_gl_reg = hns3_gl_usec_to_reg(gl_value); 234 235 writel(tx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET); 236 } 237 238 static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector, 239 struct hns3_nic_priv *priv) 240 { 241 /* initialize the configuration for interrupt coalescing. 242 * 1. GL (Interrupt Gap Limiter) 243 * 2. RL (Interrupt Rate Limiter) 244 * 245 * Default: enable interrupt coalescing self-adaptive and GL 246 */ 247 tqp_vector->tx_group.coal.gl_adapt_enable = 1; 248 tqp_vector->rx_group.coal.gl_adapt_enable = 1; 249 250 tqp_vector->tx_group.coal.int_gl = HNS3_INT_GL_50K; 251 tqp_vector->rx_group.coal.int_gl = HNS3_INT_GL_50K; 252 253 tqp_vector->rx_group.coal.flow_level = HNS3_FLOW_LOW; 254 tqp_vector->tx_group.coal.flow_level = HNS3_FLOW_LOW; 255 } 256 257 static void hns3_vector_gl_rl_init_hw(struct hns3_enet_tqp_vector *tqp_vector, 258 struct hns3_nic_priv *priv) 259 { 260 struct hnae3_handle *h = priv->ae_handle; 261 262 hns3_set_vector_coalesce_tx_gl(tqp_vector, 263 tqp_vector->tx_group.coal.int_gl); 264 hns3_set_vector_coalesce_rx_gl(tqp_vector, 265 tqp_vector->rx_group.coal.int_gl); 266 hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting); 267 } 268 269 static int hns3_nic_set_real_num_queue(struct net_device *netdev) 270 { 271 struct hnae3_handle *h = hns3_get_handle(netdev); 272 struct hnae3_knic_private_info *kinfo = &h->kinfo; 273 unsigned int queue_size = kinfo->rss_size * kinfo->num_tc; 274 int i, ret; 275 276 if (kinfo->num_tc <= 1) { 277 netdev_reset_tc(netdev); 278 } else { 279 ret = netdev_set_num_tc(netdev, kinfo->num_tc); 280 if (ret) { 281 netdev_err(netdev, 282 "netdev_set_num_tc fail, ret=%d!\n", ret); 283 return ret; 284 } 285 286 for (i = 0; i < HNAE3_MAX_TC; i++) { 287 if (!kinfo->tc_info[i].enable) 288 continue; 289 290 netdev_set_tc_queue(netdev, 291 kinfo->tc_info[i].tc, 292 kinfo->tc_info[i].tqp_count, 293 kinfo->tc_info[i].tqp_offset); 294 } 295 } 296 297 ret = netif_set_real_num_tx_queues(netdev, queue_size); 298 if (ret) { 299 netdev_err(netdev, 300 "netif_set_real_num_tx_queues fail, ret=%d!\n", ret); 301 return ret; 302 } 303 304 ret = netif_set_real_num_rx_queues(netdev, queue_size); 305 if (ret) { 306 netdev_err(netdev, 307 "netif_set_real_num_rx_queues fail, ret=%d!\n", ret); 308 return ret; 309 } 310 311 return 0; 312 } 313 314 static u16 hns3_get_max_available_channels(struct hnae3_handle *h) 315 { 316 u16 alloc_tqps, max_rss_size, rss_size; 317 318 h->ae_algo->ops->get_tqps_and_rss_info(h, &alloc_tqps, &max_rss_size); 319 rss_size = alloc_tqps / h->kinfo.num_tc; 320 321 return min_t(u16, rss_size, max_rss_size); 322 } 323 324 static void hns3_tqp_enable(struct hnae3_queue *tqp) 325 { 326 u32 rcb_reg; 327 328 rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG); 329 rcb_reg |= BIT(HNS3_RING_EN_B); 330 hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg); 331 } 332 333 static void hns3_tqp_disable(struct hnae3_queue *tqp) 334 { 335 u32 rcb_reg; 336 337 rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG); 338 rcb_reg &= ~BIT(HNS3_RING_EN_B); 339 hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg); 340 } 341 342 static void hns3_free_rx_cpu_rmap(struct net_device *netdev) 343 { 344 #ifdef CONFIG_RFS_ACCEL 345 free_irq_cpu_rmap(netdev->rx_cpu_rmap); 346 netdev->rx_cpu_rmap = NULL; 347 #endif 348 } 349 350 static int hns3_set_rx_cpu_rmap(struct net_device *netdev) 351 { 352 #ifdef CONFIG_RFS_ACCEL 353 struct hns3_nic_priv *priv = netdev_priv(netdev); 354 struct hns3_enet_tqp_vector *tqp_vector; 355 int i, ret; 356 357 if (!netdev->rx_cpu_rmap) { 358 netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->vector_num); 359 if (!netdev->rx_cpu_rmap) 360 return -ENOMEM; 361 } 362 363 for (i = 0; i < priv->vector_num; i++) { 364 tqp_vector = &priv->tqp_vector[i]; 365 ret = irq_cpu_rmap_add(netdev->rx_cpu_rmap, 366 tqp_vector->vector_irq); 367 if (ret) { 368 hns3_free_rx_cpu_rmap(netdev); 369 return ret; 370 } 371 } 372 #endif 373 return 0; 374 } 375 376 static int hns3_nic_net_up(struct net_device *netdev) 377 { 378 struct hns3_nic_priv *priv = netdev_priv(netdev); 379 struct hnae3_handle *h = priv->ae_handle; 380 int i, j; 381 int ret; 382 383 ret = hns3_nic_reset_all_ring(h); 384 if (ret) 385 return ret; 386 387 clear_bit(HNS3_NIC_STATE_DOWN, &priv->state); 388 389 /* enable the vectors */ 390 for (i = 0; i < priv->vector_num; i++) 391 hns3_vector_enable(&priv->tqp_vector[i]); 392 393 /* enable rcb */ 394 for (j = 0; j < h->kinfo.num_tqps; j++) 395 hns3_tqp_enable(h->kinfo.tqp[j]); 396 397 /* start the ae_dev */ 398 ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0; 399 if (ret) { 400 set_bit(HNS3_NIC_STATE_DOWN, &priv->state); 401 while (j--) 402 hns3_tqp_disable(h->kinfo.tqp[j]); 403 404 for (j = i - 1; j >= 0; j--) 405 hns3_vector_disable(&priv->tqp_vector[j]); 406 } 407 408 return ret; 409 } 410 411 static void hns3_config_xps(struct hns3_nic_priv *priv) 412 { 413 int i; 414 415 for (i = 0; i < priv->vector_num; i++) { 416 struct hns3_enet_tqp_vector *tqp_vector = &priv->tqp_vector[i]; 417 struct hns3_enet_ring *ring = tqp_vector->tx_group.ring; 418 419 while (ring) { 420 int ret; 421 422 ret = netif_set_xps_queue(priv->netdev, 423 &tqp_vector->affinity_mask, 424 ring->tqp->tqp_index); 425 if (ret) 426 netdev_warn(priv->netdev, 427 "set xps queue failed: %d", ret); 428 429 ring = ring->next; 430 } 431 } 432 } 433 434 static int hns3_nic_net_open(struct net_device *netdev) 435 { 436 struct hns3_nic_priv *priv = netdev_priv(netdev); 437 struct hnae3_handle *h = hns3_get_handle(netdev); 438 struct hnae3_knic_private_info *kinfo; 439 int i, ret; 440 441 if (hns3_nic_resetting(netdev)) 442 return -EBUSY; 443 444 netif_carrier_off(netdev); 445 446 ret = hns3_nic_set_real_num_queue(netdev); 447 if (ret) 448 return ret; 449 450 ret = hns3_nic_net_up(netdev); 451 if (ret) { 452 netdev_err(netdev, "net up fail, ret=%d!\n", ret); 453 return ret; 454 } 455 456 kinfo = &h->kinfo; 457 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) 458 netdev_set_prio_tc_map(netdev, i, kinfo->prio_tc[i]); 459 460 if (h->ae_algo->ops->set_timer_task) 461 h->ae_algo->ops->set_timer_task(priv->ae_handle, true); 462 463 hns3_config_xps(priv); 464 465 netif_dbg(h, drv, netdev, "net open\n"); 466 467 return 0; 468 } 469 470 static void hns3_reset_tx_queue(struct hnae3_handle *h) 471 { 472 struct net_device *ndev = h->kinfo.netdev; 473 struct hns3_nic_priv *priv = netdev_priv(ndev); 474 struct netdev_queue *dev_queue; 475 u32 i; 476 477 for (i = 0; i < h->kinfo.num_tqps; i++) { 478 dev_queue = netdev_get_tx_queue(ndev, 479 priv->ring[i].queue_index); 480 netdev_tx_reset_queue(dev_queue); 481 } 482 } 483 484 static void hns3_nic_net_down(struct net_device *netdev) 485 { 486 struct hns3_nic_priv *priv = netdev_priv(netdev); 487 struct hnae3_handle *h = hns3_get_handle(netdev); 488 const struct hnae3_ae_ops *ops; 489 int i; 490 491 /* disable vectors */ 492 for (i = 0; i < priv->vector_num; i++) 493 hns3_vector_disable(&priv->tqp_vector[i]); 494 495 /* disable rcb */ 496 for (i = 0; i < h->kinfo.num_tqps; i++) 497 hns3_tqp_disable(h->kinfo.tqp[i]); 498 499 /* stop ae_dev */ 500 ops = priv->ae_handle->ae_algo->ops; 501 if (ops->stop) 502 ops->stop(priv->ae_handle); 503 504 /* delay ring buffer clearing to hns3_reset_notify_uninit_enet 505 * during reset process, because driver may not be able 506 * to disable the ring through firmware when downing the netdev. 507 */ 508 if (!hns3_nic_resetting(netdev)) 509 hns3_clear_all_ring(priv->ae_handle, false); 510 511 hns3_reset_tx_queue(priv->ae_handle); 512 } 513 514 static int hns3_nic_net_stop(struct net_device *netdev) 515 { 516 struct hns3_nic_priv *priv = netdev_priv(netdev); 517 struct hnae3_handle *h = hns3_get_handle(netdev); 518 519 if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state)) 520 return 0; 521 522 netif_dbg(h, drv, netdev, "net stop\n"); 523 524 if (h->ae_algo->ops->set_timer_task) 525 h->ae_algo->ops->set_timer_task(priv->ae_handle, false); 526 527 netif_tx_stop_all_queues(netdev); 528 netif_carrier_off(netdev); 529 530 hns3_nic_net_down(netdev); 531 532 return 0; 533 } 534 535 static int hns3_nic_uc_sync(struct net_device *netdev, 536 const unsigned char *addr) 537 { 538 struct hnae3_handle *h = hns3_get_handle(netdev); 539 540 if (h->ae_algo->ops->add_uc_addr) 541 return h->ae_algo->ops->add_uc_addr(h, addr); 542 543 return 0; 544 } 545 546 static int hns3_nic_uc_unsync(struct net_device *netdev, 547 const unsigned char *addr) 548 { 549 struct hnae3_handle *h = hns3_get_handle(netdev); 550 551 /* need ignore the request of removing device address, because 552 * we store the device address and other addresses of uc list 553 * in the function's mac filter list. 554 */ 555 if (ether_addr_equal(addr, netdev->dev_addr)) 556 return 0; 557 558 if (h->ae_algo->ops->rm_uc_addr) 559 return h->ae_algo->ops->rm_uc_addr(h, addr); 560 561 return 0; 562 } 563 564 static int hns3_nic_mc_sync(struct net_device *netdev, 565 const unsigned char *addr) 566 { 567 struct hnae3_handle *h = hns3_get_handle(netdev); 568 569 if (h->ae_algo->ops->add_mc_addr) 570 return h->ae_algo->ops->add_mc_addr(h, addr); 571 572 return 0; 573 } 574 575 static int hns3_nic_mc_unsync(struct net_device *netdev, 576 const unsigned char *addr) 577 { 578 struct hnae3_handle *h = hns3_get_handle(netdev); 579 580 if (h->ae_algo->ops->rm_mc_addr) 581 return h->ae_algo->ops->rm_mc_addr(h, addr); 582 583 return 0; 584 } 585 586 static u8 hns3_get_netdev_flags(struct net_device *netdev) 587 { 588 u8 flags = 0; 589 590 if (netdev->flags & IFF_PROMISC) { 591 flags = HNAE3_USER_UPE | HNAE3_USER_MPE | HNAE3_BPE; 592 } else { 593 flags |= HNAE3_VLAN_FLTR; 594 if (netdev->flags & IFF_ALLMULTI) 595 flags |= HNAE3_USER_MPE; 596 } 597 598 return flags; 599 } 600 601 static void hns3_nic_set_rx_mode(struct net_device *netdev) 602 { 603 struct hnae3_handle *h = hns3_get_handle(netdev); 604 u8 new_flags; 605 606 new_flags = hns3_get_netdev_flags(netdev); 607 608 __dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync); 609 __dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync); 610 611 /* User mode Promisc mode enable and vlan filtering is disabled to 612 * let all packets in. 613 */ 614 h->netdev_flags = new_flags; 615 hns3_request_update_promisc_mode(h); 616 } 617 618 void hns3_request_update_promisc_mode(struct hnae3_handle *handle) 619 { 620 const struct hnae3_ae_ops *ops = handle->ae_algo->ops; 621 622 if (ops->request_update_promisc_mode) 623 ops->request_update_promisc_mode(handle); 624 } 625 626 int hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags) 627 { 628 struct hns3_nic_priv *priv = netdev_priv(netdev); 629 struct hnae3_handle *h = priv->ae_handle; 630 631 if (h->ae_algo->ops->set_promisc_mode) { 632 return h->ae_algo->ops->set_promisc_mode(h, 633 promisc_flags & HNAE3_UPE, 634 promisc_flags & HNAE3_MPE); 635 } 636 637 return 0; 638 } 639 640 void hns3_enable_vlan_filter(struct net_device *netdev, bool enable) 641 { 642 struct hns3_nic_priv *priv = netdev_priv(netdev); 643 struct hnae3_handle *h = priv->ae_handle; 644 bool last_state; 645 646 if (h->pdev->revision >= 0x21 && h->ae_algo->ops->enable_vlan_filter) { 647 last_state = h->netdev_flags & HNAE3_VLAN_FLTR ? true : false; 648 if (enable != last_state) { 649 netdev_info(netdev, 650 "%s vlan filter\n", 651 enable ? "enable" : "disable"); 652 h->ae_algo->ops->enable_vlan_filter(h, enable); 653 } 654 } 655 } 656 657 static int hns3_set_tso(struct sk_buff *skb, u32 *paylen, 658 u16 *mss, u32 *type_cs_vlan_tso) 659 { 660 u32 l4_offset, hdr_len; 661 union l3_hdr_info l3; 662 union l4_hdr_info l4; 663 u32 l4_paylen; 664 int ret; 665 666 if (!skb_is_gso(skb)) 667 return 0; 668 669 ret = skb_cow_head(skb, 0); 670 if (unlikely(ret < 0)) 671 return ret; 672 673 l3.hdr = skb_network_header(skb); 674 l4.hdr = skb_transport_header(skb); 675 676 /* Software should clear the IPv4's checksum field when tso is 677 * needed. 678 */ 679 if (l3.v4->version == 4) 680 l3.v4->check = 0; 681 682 /* tunnel packet */ 683 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | 684 SKB_GSO_GRE_CSUM | 685 SKB_GSO_UDP_TUNNEL | 686 SKB_GSO_UDP_TUNNEL_CSUM)) { 687 if ((!(skb_shinfo(skb)->gso_type & 688 SKB_GSO_PARTIAL)) && 689 (skb_shinfo(skb)->gso_type & 690 SKB_GSO_UDP_TUNNEL_CSUM)) { 691 /* Software should clear the udp's checksum 692 * field when tso is needed. 693 */ 694 l4.udp->check = 0; 695 } 696 /* reset l3&l4 pointers from outer to inner headers */ 697 l3.hdr = skb_inner_network_header(skb); 698 l4.hdr = skb_inner_transport_header(skb); 699 700 /* Software should clear the IPv4's checksum field when 701 * tso is needed. 702 */ 703 if (l3.v4->version == 4) 704 l3.v4->check = 0; 705 } 706 707 /* normal or tunnel packet */ 708 l4_offset = l4.hdr - skb->data; 709 hdr_len = (l4.tcp->doff << 2) + l4_offset; 710 711 /* remove payload length from inner pseudo checksum when tso */ 712 l4_paylen = skb->len - l4_offset; 713 csum_replace_by_diff(&l4.tcp->check, 714 (__force __wsum)htonl(l4_paylen)); 715 716 /* find the txbd field values */ 717 *paylen = skb->len - hdr_len; 718 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_TSO_B, 1); 719 720 /* get MSS for TSO */ 721 *mss = skb_shinfo(skb)->gso_size; 722 723 trace_hns3_tso(skb); 724 725 return 0; 726 } 727 728 static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto, 729 u8 *il4_proto) 730 { 731 union l3_hdr_info l3; 732 unsigned char *l4_hdr; 733 unsigned char *exthdr; 734 u8 l4_proto_tmp; 735 __be16 frag_off; 736 737 /* find outer header point */ 738 l3.hdr = skb_network_header(skb); 739 l4_hdr = skb_transport_header(skb); 740 741 if (skb->protocol == htons(ETH_P_IPV6)) { 742 exthdr = l3.hdr + sizeof(*l3.v6); 743 l4_proto_tmp = l3.v6->nexthdr; 744 if (l4_hdr != exthdr) 745 ipv6_skip_exthdr(skb, exthdr - skb->data, 746 &l4_proto_tmp, &frag_off); 747 } else if (skb->protocol == htons(ETH_P_IP)) { 748 l4_proto_tmp = l3.v4->protocol; 749 } else { 750 return -EINVAL; 751 } 752 753 *ol4_proto = l4_proto_tmp; 754 755 /* tunnel packet */ 756 if (!skb->encapsulation) { 757 *il4_proto = 0; 758 return 0; 759 } 760 761 /* find inner header point */ 762 l3.hdr = skb_inner_network_header(skb); 763 l4_hdr = skb_inner_transport_header(skb); 764 765 if (l3.v6->version == 6) { 766 exthdr = l3.hdr + sizeof(*l3.v6); 767 l4_proto_tmp = l3.v6->nexthdr; 768 if (l4_hdr != exthdr) 769 ipv6_skip_exthdr(skb, exthdr - skb->data, 770 &l4_proto_tmp, &frag_off); 771 } else if (l3.v4->version == 4) { 772 l4_proto_tmp = l3.v4->protocol; 773 } 774 775 *il4_proto = l4_proto_tmp; 776 777 return 0; 778 } 779 780 /* when skb->encapsulation is 0, skb->ip_summed is CHECKSUM_PARTIAL 781 * and it is udp packet, which has a dest port as the IANA assigned. 782 * the hardware is expected to do the checksum offload, but the 783 * hardware will not do the checksum offload when udp dest port is 784 * 4789 or 6081. 785 */ 786 static bool hns3_tunnel_csum_bug(struct sk_buff *skb) 787 { 788 union l4_hdr_info l4; 789 790 l4.hdr = skb_transport_header(skb); 791 792 if (!(!skb->encapsulation && 793 (l4.udp->dest == htons(IANA_VXLAN_UDP_PORT) || 794 l4.udp->dest == htons(GENEVE_UDP_PORT)))) 795 return false; 796 797 skb_checksum_help(skb); 798 799 return true; 800 } 801 802 static void hns3_set_outer_l2l3l4(struct sk_buff *skb, u8 ol4_proto, 803 u32 *ol_type_vlan_len_msec) 804 { 805 u32 l2_len, l3_len, l4_len; 806 unsigned char *il2_hdr; 807 union l3_hdr_info l3; 808 union l4_hdr_info l4; 809 810 l3.hdr = skb_network_header(skb); 811 l4.hdr = skb_transport_header(skb); 812 813 /* compute OL2 header size, defined in 2 Bytes */ 814 l2_len = l3.hdr - skb->data; 815 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L2LEN_S, l2_len >> 1); 816 817 /* compute OL3 header size, defined in 4 Bytes */ 818 l3_len = l4.hdr - l3.hdr; 819 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_S, l3_len >> 2); 820 821 il2_hdr = skb_inner_mac_header(skb); 822 /* compute OL4 header size, defined in 4 Bytes */ 823 l4_len = il2_hdr - l4.hdr; 824 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_S, l4_len >> 2); 825 826 /* define outer network header type */ 827 if (skb->protocol == htons(ETH_P_IP)) { 828 if (skb_is_gso(skb)) 829 hns3_set_field(*ol_type_vlan_len_msec, 830 HNS3_TXD_OL3T_S, 831 HNS3_OL3T_IPV4_CSUM); 832 else 833 hns3_set_field(*ol_type_vlan_len_msec, 834 HNS3_TXD_OL3T_S, 835 HNS3_OL3T_IPV4_NO_CSUM); 836 837 } else if (skb->protocol == htons(ETH_P_IPV6)) { 838 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_S, 839 HNS3_OL3T_IPV6); 840 } 841 842 if (ol4_proto == IPPROTO_UDP) 843 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_TUNTYPE_S, 844 HNS3_TUN_MAC_IN_UDP); 845 else if (ol4_proto == IPPROTO_GRE) 846 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_TUNTYPE_S, 847 HNS3_TUN_NVGRE); 848 } 849 850 static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto, 851 u8 il4_proto, u32 *type_cs_vlan_tso, 852 u32 *ol_type_vlan_len_msec) 853 { 854 unsigned char *l2_hdr = skb->data; 855 u32 l4_proto = ol4_proto; 856 union l4_hdr_info l4; 857 union l3_hdr_info l3; 858 u32 l2_len, l3_len; 859 860 l4.hdr = skb_transport_header(skb); 861 l3.hdr = skb_network_header(skb); 862 863 /* handle encapsulation skb */ 864 if (skb->encapsulation) { 865 /* If this is a not UDP/GRE encapsulation skb */ 866 if (!(ol4_proto == IPPROTO_UDP || ol4_proto == IPPROTO_GRE)) { 867 /* drop the skb tunnel packet if hardware don't support, 868 * because hardware can't calculate csum when TSO. 869 */ 870 if (skb_is_gso(skb)) 871 return -EDOM; 872 873 /* the stack computes the IP header already, 874 * driver calculate l4 checksum when not TSO. 875 */ 876 skb_checksum_help(skb); 877 return 0; 878 } 879 880 hns3_set_outer_l2l3l4(skb, ol4_proto, ol_type_vlan_len_msec); 881 882 /* switch to inner header */ 883 l2_hdr = skb_inner_mac_header(skb); 884 l3.hdr = skb_inner_network_header(skb); 885 l4.hdr = skb_inner_transport_header(skb); 886 l4_proto = il4_proto; 887 } 888 889 if (l3.v4->version == 4) { 890 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S, 891 HNS3_L3T_IPV4); 892 893 /* the stack computes the IP header already, the only time we 894 * need the hardware to recompute it is in the case of TSO. 895 */ 896 if (skb_is_gso(skb)) 897 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1); 898 } else if (l3.v6->version == 6) { 899 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S, 900 HNS3_L3T_IPV6); 901 } 902 903 /* compute inner(/normal) L2 header size, defined in 2 Bytes */ 904 l2_len = l3.hdr - l2_hdr; 905 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, l2_len >> 1); 906 907 /* compute inner(/normal) L3 header size, defined in 4 Bytes */ 908 l3_len = l4.hdr - l3.hdr; 909 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_S, l3_len >> 2); 910 911 /* compute inner(/normal) L4 header size, defined in 4 Bytes */ 912 switch (l4_proto) { 913 case IPPROTO_TCP: 914 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); 915 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S, 916 HNS3_L4T_TCP); 917 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S, 918 l4.tcp->doff); 919 break; 920 case IPPROTO_UDP: 921 if (hns3_tunnel_csum_bug(skb)) 922 break; 923 924 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); 925 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S, 926 HNS3_L4T_UDP); 927 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S, 928 (sizeof(struct udphdr) >> 2)); 929 break; 930 case IPPROTO_SCTP: 931 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); 932 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S, 933 HNS3_L4T_SCTP); 934 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S, 935 (sizeof(struct sctphdr) >> 2)); 936 break; 937 default: 938 /* drop the skb tunnel packet if hardware don't support, 939 * because hardware can't calculate csum when TSO. 940 */ 941 if (skb_is_gso(skb)) 942 return -EDOM; 943 944 /* the stack computes the IP header already, 945 * driver calculate l4 checksum when not TSO. 946 */ 947 skb_checksum_help(skb); 948 return 0; 949 } 950 951 return 0; 952 } 953 954 static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring, 955 struct sk_buff *skb) 956 { 957 struct hnae3_handle *handle = tx_ring->tqp->handle; 958 struct vlan_ethhdr *vhdr; 959 int rc; 960 961 if (!(skb->protocol == htons(ETH_P_8021Q) || 962 skb_vlan_tag_present(skb))) 963 return 0; 964 965 /* Since HW limitation, if port based insert VLAN enabled, only one VLAN 966 * header is allowed in skb, otherwise it will cause RAS error. 967 */ 968 if (unlikely(skb_vlan_tagged_multi(skb) && 969 handle->port_base_vlan_state == 970 HNAE3_PORT_BASE_VLAN_ENABLE)) 971 return -EINVAL; 972 973 if (skb->protocol == htons(ETH_P_8021Q) && 974 !(handle->kinfo.netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) { 975 /* When HW VLAN acceleration is turned off, and the stack 976 * sets the protocol to 802.1q, the driver just need to 977 * set the protocol to the encapsulated ethertype. 978 */ 979 skb->protocol = vlan_get_protocol(skb); 980 return 0; 981 } 982 983 if (skb_vlan_tag_present(skb)) { 984 /* Based on hw strategy, use out_vtag in two layer tag case, 985 * and use inner_vtag in one tag case. 986 */ 987 if (skb->protocol == htons(ETH_P_8021Q) && 988 handle->port_base_vlan_state == 989 HNAE3_PORT_BASE_VLAN_DISABLE) 990 rc = HNS3_OUTER_VLAN_TAG; 991 else 992 rc = HNS3_INNER_VLAN_TAG; 993 994 skb->protocol = vlan_get_protocol(skb); 995 return rc; 996 } 997 998 rc = skb_cow_head(skb, 0); 999 if (unlikely(rc < 0)) 1000 return rc; 1001 1002 vhdr = (struct vlan_ethhdr *)skb->data; 1003 vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority << VLAN_PRIO_SHIFT) 1004 & VLAN_PRIO_MASK); 1005 1006 skb->protocol = vlan_get_protocol(skb); 1007 return 0; 1008 } 1009 1010 static int hns3_fill_skb_desc(struct hns3_enet_ring *ring, 1011 struct sk_buff *skb, struct hns3_desc *desc) 1012 { 1013 u32 ol_type_vlan_len_msec = 0; 1014 u32 type_cs_vlan_tso = 0; 1015 u32 paylen = skb->len; 1016 u16 inner_vtag = 0; 1017 u16 out_vtag = 0; 1018 u16 mss = 0; 1019 int ret; 1020 1021 ret = hns3_handle_vtags(ring, skb); 1022 if (unlikely(ret < 0)) { 1023 u64_stats_update_begin(&ring->syncp); 1024 ring->stats.tx_vlan_err++; 1025 u64_stats_update_end(&ring->syncp); 1026 return ret; 1027 } else if (ret == HNS3_INNER_VLAN_TAG) { 1028 inner_vtag = skb_vlan_tag_get(skb); 1029 inner_vtag |= (skb->priority << VLAN_PRIO_SHIFT) & 1030 VLAN_PRIO_MASK; 1031 hns3_set_field(type_cs_vlan_tso, HNS3_TXD_VLAN_B, 1); 1032 } else if (ret == HNS3_OUTER_VLAN_TAG) { 1033 out_vtag = skb_vlan_tag_get(skb); 1034 out_vtag |= (skb->priority << VLAN_PRIO_SHIFT) & 1035 VLAN_PRIO_MASK; 1036 hns3_set_field(ol_type_vlan_len_msec, HNS3_TXD_OVLAN_B, 1037 1); 1038 } 1039 1040 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1041 u8 ol4_proto, il4_proto; 1042 1043 skb_reset_mac_len(skb); 1044 1045 ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto); 1046 if (unlikely(ret < 0)) { 1047 u64_stats_update_begin(&ring->syncp); 1048 ring->stats.tx_l4_proto_err++; 1049 u64_stats_update_end(&ring->syncp); 1050 return ret; 1051 } 1052 1053 ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto, 1054 &type_cs_vlan_tso, 1055 &ol_type_vlan_len_msec); 1056 if (unlikely(ret < 0)) { 1057 u64_stats_update_begin(&ring->syncp); 1058 ring->stats.tx_l2l3l4_err++; 1059 u64_stats_update_end(&ring->syncp); 1060 return ret; 1061 } 1062 1063 ret = hns3_set_tso(skb, &paylen, &mss, 1064 &type_cs_vlan_tso); 1065 if (unlikely(ret < 0)) { 1066 u64_stats_update_begin(&ring->syncp); 1067 ring->stats.tx_tso_err++; 1068 u64_stats_update_end(&ring->syncp); 1069 return ret; 1070 } 1071 } 1072 1073 /* Set txbd */ 1074 desc->tx.ol_type_vlan_len_msec = 1075 cpu_to_le32(ol_type_vlan_len_msec); 1076 desc->tx.type_cs_vlan_tso_len = cpu_to_le32(type_cs_vlan_tso); 1077 desc->tx.paylen = cpu_to_le32(paylen); 1078 desc->tx.mss = cpu_to_le16(mss); 1079 desc->tx.vlan_tag = cpu_to_le16(inner_vtag); 1080 desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag); 1081 1082 return 0; 1083 } 1084 1085 static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, 1086 unsigned int size, enum hns_desc_type type) 1087 { 1088 #define HNS3_LIKELY_BD_NUM 1 1089 1090 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; 1091 struct hns3_desc *desc = &ring->desc[ring->next_to_use]; 1092 struct device *dev = ring_to_dev(ring); 1093 skb_frag_t *frag; 1094 unsigned int frag_buf_num; 1095 int k, sizeoflast; 1096 dma_addr_t dma; 1097 1098 if (type == DESC_TYPE_FRAGLIST_SKB || 1099 type == DESC_TYPE_SKB) { 1100 struct sk_buff *skb = (struct sk_buff *)priv; 1101 1102 dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); 1103 } else { 1104 frag = (skb_frag_t *)priv; 1105 dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE); 1106 } 1107 1108 if (unlikely(dma_mapping_error(dev, dma))) { 1109 u64_stats_update_begin(&ring->syncp); 1110 ring->stats.sw_err_cnt++; 1111 u64_stats_update_end(&ring->syncp); 1112 return -ENOMEM; 1113 } 1114 1115 desc_cb->priv = priv; 1116 desc_cb->length = size; 1117 desc_cb->dma = dma; 1118 desc_cb->type = type; 1119 1120 if (likely(size <= HNS3_MAX_BD_SIZE)) { 1121 desc->addr = cpu_to_le64(dma); 1122 desc->tx.send_size = cpu_to_le16(size); 1123 desc->tx.bdtp_fe_sc_vld_ra_ri = 1124 cpu_to_le16(BIT(HNS3_TXD_VLD_B)); 1125 1126 trace_hns3_tx_desc(ring, ring->next_to_use); 1127 ring_ptr_move_fw(ring, next_to_use); 1128 return HNS3_LIKELY_BD_NUM; 1129 } 1130 1131 frag_buf_num = hns3_tx_bd_count(size); 1132 sizeoflast = size % HNS3_MAX_BD_SIZE; 1133 sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE; 1134 1135 /* When frag size is bigger than hardware limit, split this frag */ 1136 for (k = 0; k < frag_buf_num; k++) { 1137 /* now, fill the descriptor */ 1138 desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k); 1139 desc->tx.send_size = cpu_to_le16((k == frag_buf_num - 1) ? 1140 (u16)sizeoflast : (u16)HNS3_MAX_BD_SIZE); 1141 desc->tx.bdtp_fe_sc_vld_ra_ri = 1142 cpu_to_le16(BIT(HNS3_TXD_VLD_B)); 1143 1144 trace_hns3_tx_desc(ring, ring->next_to_use); 1145 /* move ring pointer to next */ 1146 ring_ptr_move_fw(ring, next_to_use); 1147 1148 desc = &ring->desc[ring->next_to_use]; 1149 } 1150 1151 return frag_buf_num; 1152 } 1153 1154 static unsigned int hns3_skb_bd_num(struct sk_buff *skb, unsigned int *bd_size, 1155 unsigned int bd_num) 1156 { 1157 unsigned int size; 1158 int i; 1159 1160 size = skb_headlen(skb); 1161 while (size > HNS3_MAX_BD_SIZE) { 1162 bd_size[bd_num++] = HNS3_MAX_BD_SIZE; 1163 size -= HNS3_MAX_BD_SIZE; 1164 1165 if (bd_num > HNS3_MAX_TSO_BD_NUM) 1166 return bd_num; 1167 } 1168 1169 if (size) { 1170 bd_size[bd_num++] = size; 1171 if (bd_num > HNS3_MAX_TSO_BD_NUM) 1172 return bd_num; 1173 } 1174 1175 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1176 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1177 size = skb_frag_size(frag); 1178 if (!size) 1179 continue; 1180 1181 while (size > HNS3_MAX_BD_SIZE) { 1182 bd_size[bd_num++] = HNS3_MAX_BD_SIZE; 1183 size -= HNS3_MAX_BD_SIZE; 1184 1185 if (bd_num > HNS3_MAX_TSO_BD_NUM) 1186 return bd_num; 1187 } 1188 1189 bd_size[bd_num++] = size; 1190 if (bd_num > HNS3_MAX_TSO_BD_NUM) 1191 return bd_num; 1192 } 1193 1194 return bd_num; 1195 } 1196 1197 static unsigned int hns3_tx_bd_num(struct sk_buff *skb, unsigned int *bd_size) 1198 { 1199 struct sk_buff *frag_skb; 1200 unsigned int bd_num = 0; 1201 1202 /* If the total len is within the max bd limit */ 1203 if (likely(skb->len <= HNS3_MAX_BD_SIZE && !skb_has_frag_list(skb) && 1204 skb_shinfo(skb)->nr_frags < HNS3_MAX_NON_TSO_BD_NUM)) 1205 return skb_shinfo(skb)->nr_frags + 1U; 1206 1207 /* The below case will always be linearized, return 1208 * HNS3_MAX_BD_NUM_TSO + 1U to make sure it is linearized. 1209 */ 1210 if (unlikely(skb->len > HNS3_MAX_TSO_SIZE || 1211 (!skb_is_gso(skb) && skb->len > HNS3_MAX_NON_TSO_SIZE))) 1212 return HNS3_MAX_TSO_BD_NUM + 1U; 1213 1214 bd_num = hns3_skb_bd_num(skb, bd_size, bd_num); 1215 1216 if (!skb_has_frag_list(skb) || bd_num > HNS3_MAX_TSO_BD_NUM) 1217 return bd_num; 1218 1219 skb_walk_frags(skb, frag_skb) { 1220 bd_num = hns3_skb_bd_num(frag_skb, bd_size, bd_num); 1221 if (bd_num > HNS3_MAX_TSO_BD_NUM) 1222 return bd_num; 1223 } 1224 1225 return bd_num; 1226 } 1227 1228 static unsigned int hns3_gso_hdr_len(struct sk_buff *skb) 1229 { 1230 if (!skb->encapsulation) 1231 return skb_transport_offset(skb) + tcp_hdrlen(skb); 1232 1233 return skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb); 1234 } 1235 1236 /* HW need every continuous 8 buffer data to be larger than MSS, 1237 * we simplify it by ensuring skb_headlen + the first continuous 1238 * 7 frags to to be larger than gso header len + mss, and the remaining 1239 * continuous 7 frags to be larger than MSS except the last 7 frags. 1240 */ 1241 static bool hns3_skb_need_linearized(struct sk_buff *skb, unsigned int *bd_size, 1242 unsigned int bd_num) 1243 { 1244 unsigned int tot_len = 0; 1245 int i; 1246 1247 for (i = 0; i < HNS3_MAX_NON_TSO_BD_NUM - 1U; i++) 1248 tot_len += bd_size[i]; 1249 1250 /* ensure the first 8 frags is greater than mss + header */ 1251 if (tot_len + bd_size[HNS3_MAX_NON_TSO_BD_NUM - 1U] < 1252 skb_shinfo(skb)->gso_size + hns3_gso_hdr_len(skb)) 1253 return true; 1254 1255 /* ensure every continuous 7 buffer is greater than mss 1256 * except the last one. 1257 */ 1258 for (i = 0; i < bd_num - HNS3_MAX_NON_TSO_BD_NUM; i++) { 1259 tot_len -= bd_size[i]; 1260 tot_len += bd_size[i + HNS3_MAX_NON_TSO_BD_NUM - 1U]; 1261 1262 if (tot_len < skb_shinfo(skb)->gso_size) 1263 return true; 1264 } 1265 1266 return false; 1267 } 1268 1269 void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size) 1270 { 1271 int i = 0; 1272 1273 for (i = 0; i < MAX_SKB_FRAGS; i++) 1274 size[i] = skb_frag_size(&shinfo->frags[i]); 1275 } 1276 1277 static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring, 1278 struct net_device *netdev, 1279 struct sk_buff *skb) 1280 { 1281 struct hns3_nic_priv *priv = netdev_priv(netdev); 1282 unsigned int bd_size[HNS3_MAX_TSO_BD_NUM + 1U]; 1283 unsigned int bd_num; 1284 1285 bd_num = hns3_tx_bd_num(skb, bd_size); 1286 if (unlikely(bd_num > HNS3_MAX_NON_TSO_BD_NUM)) { 1287 if (bd_num <= HNS3_MAX_TSO_BD_NUM && skb_is_gso(skb) && 1288 !hns3_skb_need_linearized(skb, bd_size, bd_num)) { 1289 trace_hns3_over_8bd(skb); 1290 goto out; 1291 } 1292 1293 if (__skb_linearize(skb)) 1294 return -ENOMEM; 1295 1296 bd_num = hns3_tx_bd_count(skb->len); 1297 if ((skb_is_gso(skb) && bd_num > HNS3_MAX_TSO_BD_NUM) || 1298 (!skb_is_gso(skb) && 1299 bd_num > HNS3_MAX_NON_TSO_BD_NUM)) { 1300 trace_hns3_over_8bd(skb); 1301 return -ENOMEM; 1302 } 1303 1304 u64_stats_update_begin(&ring->syncp); 1305 ring->stats.tx_copy++; 1306 u64_stats_update_end(&ring->syncp); 1307 } 1308 1309 out: 1310 if (likely(ring_space(ring) >= bd_num)) 1311 return bd_num; 1312 1313 netif_stop_subqueue(netdev, ring->queue_index); 1314 smp_mb(); /* Memory barrier before checking ring_space */ 1315 1316 /* Start queue in case hns3_clean_tx_ring has just made room 1317 * available and has not seen the queue stopped state performed 1318 * by netif_stop_subqueue above. 1319 */ 1320 if (ring_space(ring) >= bd_num && netif_carrier_ok(netdev) && 1321 !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) { 1322 netif_start_subqueue(netdev, ring->queue_index); 1323 return bd_num; 1324 } 1325 1326 return -EBUSY; 1327 } 1328 1329 static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig) 1330 { 1331 struct device *dev = ring_to_dev(ring); 1332 unsigned int i; 1333 1334 for (i = 0; i < ring->desc_num; i++) { 1335 struct hns3_desc *desc = &ring->desc[ring->next_to_use]; 1336 1337 memset(desc, 0, sizeof(*desc)); 1338 1339 /* check if this is where we started */ 1340 if (ring->next_to_use == next_to_use_orig) 1341 break; 1342 1343 /* rollback one */ 1344 ring_ptr_move_bw(ring, next_to_use); 1345 1346 if (!ring->desc_cb[ring->next_to_use].dma) 1347 continue; 1348 1349 /* unmap the descriptor dma address */ 1350 if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB || 1351 ring->desc_cb[ring->next_to_use].type == 1352 DESC_TYPE_FRAGLIST_SKB) 1353 dma_unmap_single(dev, 1354 ring->desc_cb[ring->next_to_use].dma, 1355 ring->desc_cb[ring->next_to_use].length, 1356 DMA_TO_DEVICE); 1357 else if (ring->desc_cb[ring->next_to_use].length) 1358 dma_unmap_page(dev, 1359 ring->desc_cb[ring->next_to_use].dma, 1360 ring->desc_cb[ring->next_to_use].length, 1361 DMA_TO_DEVICE); 1362 1363 ring->desc_cb[ring->next_to_use].length = 0; 1364 ring->desc_cb[ring->next_to_use].dma = 0; 1365 ring->desc_cb[ring->next_to_use].type = DESC_TYPE_UNKNOWN; 1366 } 1367 } 1368 1369 static int hns3_fill_skb_to_desc(struct hns3_enet_ring *ring, 1370 struct sk_buff *skb, enum hns_desc_type type) 1371 { 1372 unsigned int size = skb_headlen(skb); 1373 int i, ret, bd_num = 0; 1374 1375 if (size) { 1376 ret = hns3_fill_desc(ring, skb, size, type); 1377 if (unlikely(ret < 0)) 1378 return ret; 1379 1380 bd_num += ret; 1381 } 1382 1383 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1384 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1385 1386 size = skb_frag_size(frag); 1387 if (!size) 1388 continue; 1389 1390 ret = hns3_fill_desc(ring, frag, size, DESC_TYPE_PAGE); 1391 if (unlikely(ret < 0)) 1392 return ret; 1393 1394 bd_num += ret; 1395 } 1396 1397 return bd_num; 1398 } 1399 1400 netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) 1401 { 1402 struct hns3_nic_priv *priv = netdev_priv(netdev); 1403 struct hns3_enet_ring *ring = &priv->ring[skb->queue_mapping]; 1404 struct netdev_queue *dev_queue; 1405 int pre_ntu, next_to_use_head; 1406 struct sk_buff *frag_skb; 1407 int bd_num = 0; 1408 int ret; 1409 1410 /* Hardware can only handle short frames above 32 bytes */ 1411 if (skb_put_padto(skb, HNS3_MIN_TX_LEN)) 1412 return NETDEV_TX_OK; 1413 1414 /* Prefetch the data used later */ 1415 prefetch(skb->data); 1416 1417 ret = hns3_nic_maybe_stop_tx(ring, netdev, skb); 1418 if (unlikely(ret <= 0)) { 1419 if (ret == -EBUSY) { 1420 u64_stats_update_begin(&ring->syncp); 1421 ring->stats.tx_busy++; 1422 u64_stats_update_end(&ring->syncp); 1423 return NETDEV_TX_BUSY; 1424 } else if (ret == -ENOMEM) { 1425 u64_stats_update_begin(&ring->syncp); 1426 ring->stats.sw_err_cnt++; 1427 u64_stats_update_end(&ring->syncp); 1428 } 1429 1430 hns3_rl_err(netdev, "xmit error: %d!\n", ret); 1431 goto out_err_tx_ok; 1432 } 1433 1434 next_to_use_head = ring->next_to_use; 1435 1436 ret = hns3_fill_skb_desc(ring, skb, &ring->desc[ring->next_to_use]); 1437 if (unlikely(ret < 0)) 1438 goto fill_err; 1439 1440 ret = hns3_fill_skb_to_desc(ring, skb, DESC_TYPE_SKB); 1441 if (unlikely(ret < 0)) 1442 goto fill_err; 1443 1444 bd_num += ret; 1445 1446 skb_walk_frags(skb, frag_skb) { 1447 ret = hns3_fill_skb_to_desc(ring, frag_skb, 1448 DESC_TYPE_FRAGLIST_SKB); 1449 if (unlikely(ret < 0)) 1450 goto fill_err; 1451 1452 bd_num += ret; 1453 } 1454 1455 pre_ntu = ring->next_to_use ? (ring->next_to_use - 1) : 1456 (ring->desc_num - 1); 1457 ring->desc[pre_ntu].tx.bdtp_fe_sc_vld_ra_ri |= 1458 cpu_to_le16(BIT(HNS3_TXD_FE_B)); 1459 trace_hns3_tx_desc(ring, pre_ntu); 1460 1461 /* Complete translate all packets */ 1462 dev_queue = netdev_get_tx_queue(netdev, ring->queue_index); 1463 netdev_tx_sent_queue(dev_queue, skb->len); 1464 1465 wmb(); /* Commit all data before submit */ 1466 1467 hnae3_queue_xmit(ring->tqp, bd_num); 1468 1469 return NETDEV_TX_OK; 1470 1471 fill_err: 1472 hns3_clear_desc(ring, next_to_use_head); 1473 1474 out_err_tx_ok: 1475 dev_kfree_skb_any(skb); 1476 return NETDEV_TX_OK; 1477 } 1478 1479 static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p) 1480 { 1481 struct hnae3_handle *h = hns3_get_handle(netdev); 1482 struct sockaddr *mac_addr = p; 1483 int ret; 1484 1485 if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data)) 1486 return -EADDRNOTAVAIL; 1487 1488 if (ether_addr_equal(netdev->dev_addr, mac_addr->sa_data)) { 1489 netdev_info(netdev, "already using mac address %pM\n", 1490 mac_addr->sa_data); 1491 return 0; 1492 } 1493 1494 /* For VF device, if there is a perm_addr, then the user will not 1495 * be allowed to change the address. 1496 */ 1497 if (!hns3_is_phys_func(h->pdev) && 1498 !is_zero_ether_addr(netdev->perm_addr)) { 1499 netdev_err(netdev, "has permanent MAC %pM, user MAC %pM not allow\n", 1500 netdev->perm_addr, mac_addr->sa_data); 1501 return -EPERM; 1502 } 1503 1504 ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data, false); 1505 if (ret) { 1506 netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret); 1507 return ret; 1508 } 1509 1510 ether_addr_copy(netdev->dev_addr, mac_addr->sa_data); 1511 1512 return 0; 1513 } 1514 1515 static int hns3_nic_do_ioctl(struct net_device *netdev, 1516 struct ifreq *ifr, int cmd) 1517 { 1518 struct hnae3_handle *h = hns3_get_handle(netdev); 1519 1520 if (!netif_running(netdev)) 1521 return -EINVAL; 1522 1523 if (!h->ae_algo->ops->do_ioctl) 1524 return -EOPNOTSUPP; 1525 1526 return h->ae_algo->ops->do_ioctl(h, ifr, cmd); 1527 } 1528 1529 static int hns3_nic_set_features(struct net_device *netdev, 1530 netdev_features_t features) 1531 { 1532 netdev_features_t changed = netdev->features ^ features; 1533 struct hns3_nic_priv *priv = netdev_priv(netdev); 1534 struct hnae3_handle *h = priv->ae_handle; 1535 bool enable; 1536 int ret; 1537 1538 if (changed & (NETIF_F_GRO_HW) && h->ae_algo->ops->set_gro_en) { 1539 enable = !!(features & NETIF_F_GRO_HW); 1540 ret = h->ae_algo->ops->set_gro_en(h, enable); 1541 if (ret) 1542 return ret; 1543 } 1544 1545 if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && 1546 h->ae_algo->ops->enable_hw_strip_rxvtag) { 1547 enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX); 1548 ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, enable); 1549 if (ret) 1550 return ret; 1551 } 1552 1553 if ((changed & NETIF_F_NTUPLE) && h->ae_algo->ops->enable_fd) { 1554 enable = !!(features & NETIF_F_NTUPLE); 1555 h->ae_algo->ops->enable_fd(h, enable); 1556 } 1557 1558 netdev->features = features; 1559 return 0; 1560 } 1561 1562 static netdev_features_t hns3_features_check(struct sk_buff *skb, 1563 struct net_device *dev, 1564 netdev_features_t features) 1565 { 1566 #define HNS3_MAX_HDR_LEN 480U 1567 #define HNS3_MAX_L4_HDR_LEN 60U 1568 1569 size_t len; 1570 1571 if (skb->ip_summed != CHECKSUM_PARTIAL) 1572 return features; 1573 1574 if (skb->encapsulation) 1575 len = skb_inner_transport_header(skb) - skb->data; 1576 else 1577 len = skb_transport_header(skb) - skb->data; 1578 1579 /* Assume L4 is 60 byte as TCP is the only protocol with a 1580 * a flexible value, and it's max len is 60 bytes. 1581 */ 1582 len += HNS3_MAX_L4_HDR_LEN; 1583 1584 /* Hardware only supports checksum on the skb with a max header 1585 * len of 480 bytes. 1586 */ 1587 if (len > HNS3_MAX_HDR_LEN) 1588 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 1589 1590 return features; 1591 } 1592 1593 static void hns3_nic_get_stats64(struct net_device *netdev, 1594 struct rtnl_link_stats64 *stats) 1595 { 1596 struct hns3_nic_priv *priv = netdev_priv(netdev); 1597 int queue_num = priv->ae_handle->kinfo.num_tqps; 1598 struct hnae3_handle *handle = priv->ae_handle; 1599 struct hns3_enet_ring *ring; 1600 u64 rx_length_errors = 0; 1601 u64 rx_crc_errors = 0; 1602 u64 rx_multicast = 0; 1603 unsigned int start; 1604 u64 tx_errors = 0; 1605 u64 rx_errors = 0; 1606 unsigned int idx; 1607 u64 tx_bytes = 0; 1608 u64 rx_bytes = 0; 1609 u64 tx_pkts = 0; 1610 u64 rx_pkts = 0; 1611 u64 tx_drop = 0; 1612 u64 rx_drop = 0; 1613 1614 if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) 1615 return; 1616 1617 handle->ae_algo->ops->update_stats(handle, &netdev->stats); 1618 1619 for (idx = 0; idx < queue_num; idx++) { 1620 /* fetch the tx stats */ 1621 ring = &priv->ring[idx]; 1622 do { 1623 start = u64_stats_fetch_begin_irq(&ring->syncp); 1624 tx_bytes += ring->stats.tx_bytes; 1625 tx_pkts += ring->stats.tx_pkts; 1626 tx_drop += ring->stats.sw_err_cnt; 1627 tx_drop += ring->stats.tx_vlan_err; 1628 tx_drop += ring->stats.tx_l4_proto_err; 1629 tx_drop += ring->stats.tx_l2l3l4_err; 1630 tx_drop += ring->stats.tx_tso_err; 1631 tx_errors += ring->stats.sw_err_cnt; 1632 tx_errors += ring->stats.tx_vlan_err; 1633 tx_errors += ring->stats.tx_l4_proto_err; 1634 tx_errors += ring->stats.tx_l2l3l4_err; 1635 tx_errors += ring->stats.tx_tso_err; 1636 } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); 1637 1638 /* fetch the rx stats */ 1639 ring = &priv->ring[idx + queue_num]; 1640 do { 1641 start = u64_stats_fetch_begin_irq(&ring->syncp); 1642 rx_bytes += ring->stats.rx_bytes; 1643 rx_pkts += ring->stats.rx_pkts; 1644 rx_drop += ring->stats.l2_err; 1645 rx_errors += ring->stats.l2_err; 1646 rx_errors += ring->stats.l3l4_csum_err; 1647 rx_crc_errors += ring->stats.l2_err; 1648 rx_multicast += ring->stats.rx_multicast; 1649 rx_length_errors += ring->stats.err_pkt_len; 1650 } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); 1651 } 1652 1653 stats->tx_bytes = tx_bytes; 1654 stats->tx_packets = tx_pkts; 1655 stats->rx_bytes = rx_bytes; 1656 stats->rx_packets = rx_pkts; 1657 1658 stats->rx_errors = rx_errors; 1659 stats->multicast = rx_multicast; 1660 stats->rx_length_errors = rx_length_errors; 1661 stats->rx_crc_errors = rx_crc_errors; 1662 stats->rx_missed_errors = netdev->stats.rx_missed_errors; 1663 1664 stats->tx_errors = tx_errors; 1665 stats->rx_dropped = rx_drop; 1666 stats->tx_dropped = tx_drop; 1667 stats->collisions = netdev->stats.collisions; 1668 stats->rx_over_errors = netdev->stats.rx_over_errors; 1669 stats->rx_frame_errors = netdev->stats.rx_frame_errors; 1670 stats->rx_fifo_errors = netdev->stats.rx_fifo_errors; 1671 stats->tx_aborted_errors = netdev->stats.tx_aborted_errors; 1672 stats->tx_carrier_errors = netdev->stats.tx_carrier_errors; 1673 stats->tx_fifo_errors = netdev->stats.tx_fifo_errors; 1674 stats->tx_heartbeat_errors = netdev->stats.tx_heartbeat_errors; 1675 stats->tx_window_errors = netdev->stats.tx_window_errors; 1676 stats->rx_compressed = netdev->stats.rx_compressed; 1677 stats->tx_compressed = netdev->stats.tx_compressed; 1678 } 1679 1680 static int hns3_setup_tc(struct net_device *netdev, void *type_data) 1681 { 1682 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data; 1683 u8 *prio_tc = mqprio_qopt->qopt.prio_tc_map; 1684 struct hnae3_knic_private_info *kinfo; 1685 u8 tc = mqprio_qopt->qopt.num_tc; 1686 u16 mode = mqprio_qopt->mode; 1687 u8 hw = mqprio_qopt->qopt.hw; 1688 struct hnae3_handle *h; 1689 1690 if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS && 1691 mode == TC_MQPRIO_MODE_CHANNEL) || (!hw && tc == 0))) 1692 return -EOPNOTSUPP; 1693 1694 if (tc > HNAE3_MAX_TC) 1695 return -EINVAL; 1696 1697 if (!netdev) 1698 return -EINVAL; 1699 1700 h = hns3_get_handle(netdev); 1701 kinfo = &h->kinfo; 1702 1703 netif_dbg(h, drv, netdev, "setup tc: num_tc=%u\n", tc); 1704 1705 return (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ? 1706 kinfo->dcb_ops->setup_tc(h, tc ? tc : 1, prio_tc) : -EOPNOTSUPP; 1707 } 1708 1709 static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type, 1710 void *type_data) 1711 { 1712 if (type != TC_SETUP_QDISC_MQPRIO) 1713 return -EOPNOTSUPP; 1714 1715 return hns3_setup_tc(dev, type_data); 1716 } 1717 1718 static int hns3_vlan_rx_add_vid(struct net_device *netdev, 1719 __be16 proto, u16 vid) 1720 { 1721 struct hnae3_handle *h = hns3_get_handle(netdev); 1722 int ret = -EIO; 1723 1724 if (h->ae_algo->ops->set_vlan_filter) 1725 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false); 1726 1727 return ret; 1728 } 1729 1730 static int hns3_vlan_rx_kill_vid(struct net_device *netdev, 1731 __be16 proto, u16 vid) 1732 { 1733 struct hnae3_handle *h = hns3_get_handle(netdev); 1734 int ret = -EIO; 1735 1736 if (h->ae_algo->ops->set_vlan_filter) 1737 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true); 1738 1739 return ret; 1740 } 1741 1742 static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, 1743 u8 qos, __be16 vlan_proto) 1744 { 1745 struct hnae3_handle *h = hns3_get_handle(netdev); 1746 int ret = -EIO; 1747 1748 netif_dbg(h, drv, netdev, 1749 "set vf vlan: vf=%d, vlan=%u, qos=%u, vlan_proto=0x%x\n", 1750 vf, vlan, qos, ntohs(vlan_proto)); 1751 1752 if (h->ae_algo->ops->set_vf_vlan_filter) 1753 ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan, 1754 qos, vlan_proto); 1755 1756 return ret; 1757 } 1758 1759 static int hns3_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable) 1760 { 1761 struct hnae3_handle *handle = hns3_get_handle(netdev); 1762 1763 if (hns3_nic_resetting(netdev)) 1764 return -EBUSY; 1765 1766 if (!handle->ae_algo->ops->set_vf_spoofchk) 1767 return -EOPNOTSUPP; 1768 1769 return handle->ae_algo->ops->set_vf_spoofchk(handle, vf, enable); 1770 } 1771 1772 static int hns3_set_vf_trust(struct net_device *netdev, int vf, bool enable) 1773 { 1774 struct hnae3_handle *handle = hns3_get_handle(netdev); 1775 1776 if (!handle->ae_algo->ops->set_vf_trust) 1777 return -EOPNOTSUPP; 1778 1779 return handle->ae_algo->ops->set_vf_trust(handle, vf, enable); 1780 } 1781 1782 static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu) 1783 { 1784 struct hnae3_handle *h = hns3_get_handle(netdev); 1785 int ret; 1786 1787 if (hns3_nic_resetting(netdev)) 1788 return -EBUSY; 1789 1790 if (!h->ae_algo->ops->set_mtu) 1791 return -EOPNOTSUPP; 1792 1793 netif_dbg(h, drv, netdev, 1794 "change mtu from %u to %d\n", netdev->mtu, new_mtu); 1795 1796 ret = h->ae_algo->ops->set_mtu(h, new_mtu); 1797 if (ret) 1798 netdev_err(netdev, "failed to change MTU in hardware %d\n", 1799 ret); 1800 else 1801 netdev->mtu = new_mtu; 1802 1803 return ret; 1804 } 1805 1806 static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) 1807 { 1808 struct hns3_nic_priv *priv = netdev_priv(ndev); 1809 struct hnae3_handle *h = hns3_get_handle(ndev); 1810 struct hns3_enet_ring *tx_ring; 1811 struct napi_struct *napi; 1812 int timeout_queue = 0; 1813 int hw_head, hw_tail; 1814 int fbd_num, fbd_oft; 1815 int ebd_num, ebd_oft; 1816 int bd_num, bd_err; 1817 int ring_en, tc; 1818 int i; 1819 1820 /* Find the stopped queue the same way the stack does */ 1821 for (i = 0; i < ndev->num_tx_queues; i++) { 1822 struct netdev_queue *q; 1823 unsigned long trans_start; 1824 1825 q = netdev_get_tx_queue(ndev, i); 1826 trans_start = q->trans_start; 1827 if (netif_xmit_stopped(q) && 1828 time_after(jiffies, 1829 (trans_start + ndev->watchdog_timeo))) { 1830 timeout_queue = i; 1831 netdev_info(ndev, "queue state: 0x%lx, delta msecs: %u\n", 1832 q->state, 1833 jiffies_to_msecs(jiffies - trans_start)); 1834 break; 1835 } 1836 } 1837 1838 if (i == ndev->num_tx_queues) { 1839 netdev_info(ndev, 1840 "no netdev TX timeout queue found, timeout count: %llu\n", 1841 priv->tx_timeout_count); 1842 return false; 1843 } 1844 1845 priv->tx_timeout_count++; 1846 1847 tx_ring = &priv->ring[timeout_queue]; 1848 napi = &tx_ring->tqp_vector->napi; 1849 1850 netdev_info(ndev, 1851 "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, napi state: %lu\n", 1852 priv->tx_timeout_count, timeout_queue, tx_ring->next_to_use, 1853 tx_ring->next_to_clean, napi->state); 1854 1855 netdev_info(ndev, 1856 "tx_pkts: %llu, tx_bytes: %llu, io_err_cnt: %llu, sw_err_cnt: %llu\n", 1857 tx_ring->stats.tx_pkts, tx_ring->stats.tx_bytes, 1858 tx_ring->stats.io_err_cnt, tx_ring->stats.sw_err_cnt); 1859 1860 netdev_info(ndev, 1861 "seg_pkt_cnt: %llu, tx_err_cnt: %llu, restart_queue: %llu, tx_busy: %llu\n", 1862 tx_ring->stats.seg_pkt_cnt, tx_ring->stats.tx_err_cnt, 1863 tx_ring->stats.restart_queue, tx_ring->stats.tx_busy); 1864 1865 /* When mac received many pause frames continuous, it's unable to send 1866 * packets, which may cause tx timeout 1867 */ 1868 if (h->ae_algo->ops->get_mac_stats) { 1869 struct hns3_mac_stats mac_stats; 1870 1871 h->ae_algo->ops->get_mac_stats(h, &mac_stats); 1872 netdev_info(ndev, "tx_pause_cnt: %llu, rx_pause_cnt: %llu\n", 1873 mac_stats.tx_pause_cnt, mac_stats.rx_pause_cnt); 1874 } 1875 1876 hw_head = readl_relaxed(tx_ring->tqp->io_base + 1877 HNS3_RING_TX_RING_HEAD_REG); 1878 hw_tail = readl_relaxed(tx_ring->tqp->io_base + 1879 HNS3_RING_TX_RING_TAIL_REG); 1880 fbd_num = readl_relaxed(tx_ring->tqp->io_base + 1881 HNS3_RING_TX_RING_FBDNUM_REG); 1882 fbd_oft = readl_relaxed(tx_ring->tqp->io_base + 1883 HNS3_RING_TX_RING_OFFSET_REG); 1884 ebd_num = readl_relaxed(tx_ring->tqp->io_base + 1885 HNS3_RING_TX_RING_EBDNUM_REG); 1886 ebd_oft = readl_relaxed(tx_ring->tqp->io_base + 1887 HNS3_RING_TX_RING_EBD_OFFSET_REG); 1888 bd_num = readl_relaxed(tx_ring->tqp->io_base + 1889 HNS3_RING_TX_RING_BD_NUM_REG); 1890 bd_err = readl_relaxed(tx_ring->tqp->io_base + 1891 HNS3_RING_TX_RING_BD_ERR_REG); 1892 ring_en = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_EN_REG); 1893 tc = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_TX_RING_TC_REG); 1894 1895 netdev_info(ndev, 1896 "BD_NUM: 0x%x HW_HEAD: 0x%x, HW_TAIL: 0x%x, BD_ERR: 0x%x, INT: 0x%x\n", 1897 bd_num, hw_head, hw_tail, bd_err, 1898 readl(tx_ring->tqp_vector->mask_addr)); 1899 netdev_info(ndev, 1900 "RING_EN: 0x%x, TC: 0x%x, FBD_NUM: 0x%x FBD_OFT: 0x%x, EBD_NUM: 0x%x, EBD_OFT: 0x%x\n", 1901 ring_en, tc, fbd_num, fbd_oft, ebd_num, ebd_oft); 1902 1903 return true; 1904 } 1905 1906 static void hns3_nic_net_timeout(struct net_device *ndev, unsigned int txqueue) 1907 { 1908 struct hns3_nic_priv *priv = netdev_priv(ndev); 1909 struct hnae3_handle *h = priv->ae_handle; 1910 1911 if (!hns3_get_tx_timeo_queue_info(ndev)) 1912 return; 1913 1914 /* request the reset, and let the hclge to determine 1915 * which reset level should be done 1916 */ 1917 if (h->ae_algo->ops->reset_event) 1918 h->ae_algo->ops->reset_event(h->pdev, h); 1919 } 1920 1921 #ifdef CONFIG_RFS_ACCEL 1922 static int hns3_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, 1923 u16 rxq_index, u32 flow_id) 1924 { 1925 struct hnae3_handle *h = hns3_get_handle(dev); 1926 struct flow_keys fkeys; 1927 1928 if (!h->ae_algo->ops->add_arfs_entry) 1929 return -EOPNOTSUPP; 1930 1931 if (skb->encapsulation) 1932 return -EPROTONOSUPPORT; 1933 1934 if (!skb_flow_dissect_flow_keys(skb, &fkeys, 0)) 1935 return -EPROTONOSUPPORT; 1936 1937 if ((fkeys.basic.n_proto != htons(ETH_P_IP) && 1938 fkeys.basic.n_proto != htons(ETH_P_IPV6)) || 1939 (fkeys.basic.ip_proto != IPPROTO_TCP && 1940 fkeys.basic.ip_proto != IPPROTO_UDP)) 1941 return -EPROTONOSUPPORT; 1942 1943 return h->ae_algo->ops->add_arfs_entry(h, rxq_index, flow_id, &fkeys); 1944 } 1945 #endif 1946 1947 static int hns3_nic_get_vf_config(struct net_device *ndev, int vf, 1948 struct ifla_vf_info *ivf) 1949 { 1950 struct hnae3_handle *h = hns3_get_handle(ndev); 1951 1952 if (!h->ae_algo->ops->get_vf_config) 1953 return -EOPNOTSUPP; 1954 1955 return h->ae_algo->ops->get_vf_config(h, vf, ivf); 1956 } 1957 1958 static int hns3_nic_set_vf_link_state(struct net_device *ndev, int vf, 1959 int link_state) 1960 { 1961 struct hnae3_handle *h = hns3_get_handle(ndev); 1962 1963 if (!h->ae_algo->ops->set_vf_link_state) 1964 return -EOPNOTSUPP; 1965 1966 return h->ae_algo->ops->set_vf_link_state(h, vf, link_state); 1967 } 1968 1969 static int hns3_nic_set_vf_rate(struct net_device *ndev, int vf, 1970 int min_tx_rate, int max_tx_rate) 1971 { 1972 struct hnae3_handle *h = hns3_get_handle(ndev); 1973 1974 if (!h->ae_algo->ops->set_vf_rate) 1975 return -EOPNOTSUPP; 1976 1977 return h->ae_algo->ops->set_vf_rate(h, vf, min_tx_rate, max_tx_rate, 1978 false); 1979 } 1980 1981 static int hns3_nic_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) 1982 { 1983 struct hnae3_handle *h = hns3_get_handle(netdev); 1984 1985 if (!h->ae_algo->ops->set_vf_mac) 1986 return -EOPNOTSUPP; 1987 1988 if (is_multicast_ether_addr(mac)) { 1989 netdev_err(netdev, 1990 "Invalid MAC:%pM specified. Could not set MAC\n", 1991 mac); 1992 return -EINVAL; 1993 } 1994 1995 return h->ae_algo->ops->set_vf_mac(h, vf_id, mac); 1996 } 1997 1998 static const struct net_device_ops hns3_nic_netdev_ops = { 1999 .ndo_open = hns3_nic_net_open, 2000 .ndo_stop = hns3_nic_net_stop, 2001 .ndo_start_xmit = hns3_nic_net_xmit, 2002 .ndo_tx_timeout = hns3_nic_net_timeout, 2003 .ndo_set_mac_address = hns3_nic_net_set_mac_address, 2004 .ndo_do_ioctl = hns3_nic_do_ioctl, 2005 .ndo_change_mtu = hns3_nic_change_mtu, 2006 .ndo_set_features = hns3_nic_set_features, 2007 .ndo_features_check = hns3_features_check, 2008 .ndo_get_stats64 = hns3_nic_get_stats64, 2009 .ndo_setup_tc = hns3_nic_setup_tc, 2010 .ndo_set_rx_mode = hns3_nic_set_rx_mode, 2011 .ndo_vlan_rx_add_vid = hns3_vlan_rx_add_vid, 2012 .ndo_vlan_rx_kill_vid = hns3_vlan_rx_kill_vid, 2013 .ndo_set_vf_vlan = hns3_ndo_set_vf_vlan, 2014 .ndo_set_vf_spoofchk = hns3_set_vf_spoofchk, 2015 .ndo_set_vf_trust = hns3_set_vf_trust, 2016 #ifdef CONFIG_RFS_ACCEL 2017 .ndo_rx_flow_steer = hns3_rx_flow_steer, 2018 #endif 2019 .ndo_get_vf_config = hns3_nic_get_vf_config, 2020 .ndo_set_vf_link_state = hns3_nic_set_vf_link_state, 2021 .ndo_set_vf_rate = hns3_nic_set_vf_rate, 2022 .ndo_set_vf_mac = hns3_nic_set_vf_mac, 2023 }; 2024 2025 bool hns3_is_phys_func(struct pci_dev *pdev) 2026 { 2027 u32 dev_id = pdev->device; 2028 2029 switch (dev_id) { 2030 case HNAE3_DEV_ID_GE: 2031 case HNAE3_DEV_ID_25GE: 2032 case HNAE3_DEV_ID_25GE_RDMA: 2033 case HNAE3_DEV_ID_25GE_RDMA_MACSEC: 2034 case HNAE3_DEV_ID_50GE_RDMA: 2035 case HNAE3_DEV_ID_50GE_RDMA_MACSEC: 2036 case HNAE3_DEV_ID_100G_RDMA_MACSEC: 2037 return true; 2038 case HNAE3_DEV_ID_100G_VF: 2039 case HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF: 2040 return false; 2041 default: 2042 dev_warn(&pdev->dev, "un-recognized pci device-id %u", 2043 dev_id); 2044 } 2045 2046 return false; 2047 } 2048 2049 static void hns3_disable_sriov(struct pci_dev *pdev) 2050 { 2051 /* If our VFs are assigned we cannot shut down SR-IOV 2052 * without causing issues, so just leave the hardware 2053 * available but disabled 2054 */ 2055 if (pci_vfs_assigned(pdev)) { 2056 dev_warn(&pdev->dev, 2057 "disabling driver while VFs are assigned\n"); 2058 return; 2059 } 2060 2061 pci_disable_sriov(pdev); 2062 } 2063 2064 static void hns3_get_dev_capability(struct pci_dev *pdev, 2065 struct hnae3_ae_dev *ae_dev) 2066 { 2067 if (pdev->revision >= 0x21) { 2068 hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B, 1); 2069 hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_GRO_B, 1); 2070 } 2071 } 2072 2073 /* hns3_probe - Device initialization routine 2074 * @pdev: PCI device information struct 2075 * @ent: entry in hns3_pci_tbl 2076 * 2077 * hns3_probe initializes a PF identified by a pci_dev structure. 2078 * The OS initialization, configuring of the PF private structure, 2079 * and a hardware reset occur. 2080 * 2081 * Returns 0 on success, negative on failure 2082 */ 2083 static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 2084 { 2085 struct hnae3_ae_dev *ae_dev; 2086 int ret; 2087 2088 ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev), GFP_KERNEL); 2089 if (!ae_dev) 2090 return -ENOMEM; 2091 2092 ae_dev->pdev = pdev; 2093 ae_dev->flag = ent->driver_data; 2094 hns3_get_dev_capability(pdev, ae_dev); 2095 pci_set_drvdata(pdev, ae_dev); 2096 2097 ret = hnae3_register_ae_dev(ae_dev); 2098 if (ret) 2099 pci_set_drvdata(pdev, NULL); 2100 2101 return ret; 2102 } 2103 2104 /* hns3_remove - Device removal routine 2105 * @pdev: PCI device information struct 2106 */ 2107 static void hns3_remove(struct pci_dev *pdev) 2108 { 2109 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 2110 2111 if (hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV)) 2112 hns3_disable_sriov(pdev); 2113 2114 hnae3_unregister_ae_dev(ae_dev); 2115 pci_set_drvdata(pdev, NULL); 2116 } 2117 2118 /** 2119 * hns3_pci_sriov_configure 2120 * @pdev: pointer to a pci_dev structure 2121 * @num_vfs: number of VFs to allocate 2122 * 2123 * Enable or change the number of VFs. Called when the user updates the number 2124 * of VFs in sysfs. 2125 **/ 2126 static int hns3_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) 2127 { 2128 int ret; 2129 2130 if (!(hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))) { 2131 dev_warn(&pdev->dev, "Can not config SRIOV\n"); 2132 return -EINVAL; 2133 } 2134 2135 if (num_vfs) { 2136 ret = pci_enable_sriov(pdev, num_vfs); 2137 if (ret) 2138 dev_err(&pdev->dev, "SRIOV enable failed %d\n", ret); 2139 else 2140 return num_vfs; 2141 } else if (!pci_vfs_assigned(pdev)) { 2142 pci_disable_sriov(pdev); 2143 } else { 2144 dev_warn(&pdev->dev, 2145 "Unable to free VFs because some are assigned to VMs.\n"); 2146 } 2147 2148 return 0; 2149 } 2150 2151 static void hns3_shutdown(struct pci_dev *pdev) 2152 { 2153 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 2154 2155 hnae3_unregister_ae_dev(ae_dev); 2156 pci_set_drvdata(pdev, NULL); 2157 2158 if (system_state == SYSTEM_POWER_OFF) 2159 pci_set_power_state(pdev, PCI_D3hot); 2160 } 2161 2162 static pci_ers_result_t hns3_error_detected(struct pci_dev *pdev, 2163 pci_channel_state_t state) 2164 { 2165 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 2166 pci_ers_result_t ret; 2167 2168 dev_info(&pdev->dev, "PCI error detected, state(=%d)!!\n", state); 2169 2170 if (state == pci_channel_io_perm_failure) 2171 return PCI_ERS_RESULT_DISCONNECT; 2172 2173 if (!ae_dev || !ae_dev->ops) { 2174 dev_err(&pdev->dev, 2175 "Can't recover - error happened before device initialized\n"); 2176 return PCI_ERS_RESULT_NONE; 2177 } 2178 2179 if (ae_dev->ops->handle_hw_ras_error) 2180 ret = ae_dev->ops->handle_hw_ras_error(ae_dev); 2181 else 2182 return PCI_ERS_RESULT_NONE; 2183 2184 return ret; 2185 } 2186 2187 static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev) 2188 { 2189 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 2190 const struct hnae3_ae_ops *ops; 2191 enum hnae3_reset_type reset_type; 2192 struct device *dev = &pdev->dev; 2193 2194 if (!ae_dev || !ae_dev->ops) 2195 return PCI_ERS_RESULT_NONE; 2196 2197 ops = ae_dev->ops; 2198 /* request the reset */ 2199 if (ops->reset_event && ops->get_reset_level && 2200 ops->set_default_reset_request) { 2201 if (ae_dev->hw_err_reset_req) { 2202 reset_type = ops->get_reset_level(ae_dev, 2203 &ae_dev->hw_err_reset_req); 2204 ops->set_default_reset_request(ae_dev, reset_type); 2205 dev_info(dev, "requesting reset due to PCI error\n"); 2206 ops->reset_event(pdev, NULL); 2207 } 2208 2209 return PCI_ERS_RESULT_RECOVERED; 2210 } 2211 2212 return PCI_ERS_RESULT_DISCONNECT; 2213 } 2214 2215 static void hns3_reset_prepare(struct pci_dev *pdev) 2216 { 2217 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 2218 2219 dev_info(&pdev->dev, "FLR prepare\n"); 2220 if (ae_dev && ae_dev->ops && ae_dev->ops->flr_prepare) 2221 ae_dev->ops->flr_prepare(ae_dev); 2222 } 2223 2224 static void hns3_reset_done(struct pci_dev *pdev) 2225 { 2226 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 2227 2228 dev_info(&pdev->dev, "FLR done\n"); 2229 if (ae_dev && ae_dev->ops && ae_dev->ops->flr_done) 2230 ae_dev->ops->flr_done(ae_dev); 2231 } 2232 2233 static const struct pci_error_handlers hns3_err_handler = { 2234 .error_detected = hns3_error_detected, 2235 .slot_reset = hns3_slot_reset, 2236 .reset_prepare = hns3_reset_prepare, 2237 .reset_done = hns3_reset_done, 2238 }; 2239 2240 static struct pci_driver hns3_driver = { 2241 .name = hns3_driver_name, 2242 .id_table = hns3_pci_tbl, 2243 .probe = hns3_probe, 2244 .remove = hns3_remove, 2245 .shutdown = hns3_shutdown, 2246 .sriov_configure = hns3_pci_sriov_configure, 2247 .err_handler = &hns3_err_handler, 2248 }; 2249 2250 /* set default feature to hns3 */ 2251 static void hns3_set_default_feature(struct net_device *netdev) 2252 { 2253 struct hnae3_handle *h = hns3_get_handle(netdev); 2254 struct pci_dev *pdev = h->pdev; 2255 2256 netdev->priv_flags |= IFF_UNICAST_FLT; 2257 2258 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 2259 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | 2260 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | 2261 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | 2262 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC | 2263 NETIF_F_TSO_MANGLEID | NETIF_F_FRAGLIST; 2264 2265 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; 2266 2267 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 2268 NETIF_F_HW_VLAN_CTAG_FILTER | 2269 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | 2270 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | 2271 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | 2272 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | 2273 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC | 2274 NETIF_F_FRAGLIST; 2275 2276 netdev->vlan_features |= 2277 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | 2278 NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO | 2279 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | 2280 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | 2281 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC | 2282 NETIF_F_FRAGLIST; 2283 2284 netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 2285 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | 2286 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | 2287 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | 2288 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | 2289 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC | 2290 NETIF_F_FRAGLIST; 2291 2292 if (pdev->revision >= 0x21) { 2293 netdev->hw_features |= NETIF_F_GRO_HW; 2294 netdev->features |= NETIF_F_GRO_HW; 2295 2296 if (!(h->flags & HNAE3_SUPPORT_VF)) { 2297 netdev->hw_features |= NETIF_F_NTUPLE; 2298 netdev->features |= NETIF_F_NTUPLE; 2299 } 2300 } 2301 } 2302 2303 static int hns3_alloc_buffer(struct hns3_enet_ring *ring, 2304 struct hns3_desc_cb *cb) 2305 { 2306 unsigned int order = hns3_page_order(ring); 2307 struct page *p; 2308 2309 p = dev_alloc_pages(order); 2310 if (!p) 2311 return -ENOMEM; 2312 2313 cb->priv = p; 2314 cb->page_offset = 0; 2315 cb->reuse_flag = 0; 2316 cb->buf = page_address(p); 2317 cb->length = hns3_page_size(ring); 2318 cb->type = DESC_TYPE_PAGE; 2319 2320 return 0; 2321 } 2322 2323 static void hns3_free_buffer(struct hns3_enet_ring *ring, 2324 struct hns3_desc_cb *cb) 2325 { 2326 if (cb->type == DESC_TYPE_SKB) 2327 dev_kfree_skb_any((struct sk_buff *)cb->priv); 2328 else if (!HNAE3_IS_TX_RING(ring)) 2329 put_page((struct page *)cb->priv); 2330 memset(cb, 0, sizeof(*cb)); 2331 } 2332 2333 static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb) 2334 { 2335 cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0, 2336 cb->length, ring_to_dma_dir(ring)); 2337 2338 if (unlikely(dma_mapping_error(ring_to_dev(ring), cb->dma))) 2339 return -EIO; 2340 2341 return 0; 2342 } 2343 2344 static void hns3_unmap_buffer(struct hns3_enet_ring *ring, 2345 struct hns3_desc_cb *cb) 2346 { 2347 if (cb->type == DESC_TYPE_SKB || cb->type == DESC_TYPE_FRAGLIST_SKB) 2348 dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length, 2349 ring_to_dma_dir(ring)); 2350 else if (cb->length) 2351 dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length, 2352 ring_to_dma_dir(ring)); 2353 } 2354 2355 static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i) 2356 { 2357 hns3_unmap_buffer(ring, &ring->desc_cb[i]); 2358 ring->desc[i].addr = 0; 2359 } 2360 2361 static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i) 2362 { 2363 struct hns3_desc_cb *cb = &ring->desc_cb[i]; 2364 2365 if (!ring->desc_cb[i].dma) 2366 return; 2367 2368 hns3_buffer_detach(ring, i); 2369 hns3_free_buffer(ring, cb); 2370 } 2371 2372 static void hns3_free_buffers(struct hns3_enet_ring *ring) 2373 { 2374 int i; 2375 2376 for (i = 0; i < ring->desc_num; i++) 2377 hns3_free_buffer_detach(ring, i); 2378 } 2379 2380 /* free desc along with its attached buffer */ 2381 static void hns3_free_desc(struct hns3_enet_ring *ring) 2382 { 2383 int size = ring->desc_num * sizeof(ring->desc[0]); 2384 2385 hns3_free_buffers(ring); 2386 2387 if (ring->desc) { 2388 dma_free_coherent(ring_to_dev(ring), size, 2389 ring->desc, ring->desc_dma_addr); 2390 ring->desc = NULL; 2391 } 2392 } 2393 2394 static int hns3_alloc_desc(struct hns3_enet_ring *ring) 2395 { 2396 int size = ring->desc_num * sizeof(ring->desc[0]); 2397 2398 ring->desc = dma_alloc_coherent(ring_to_dev(ring), size, 2399 &ring->desc_dma_addr, GFP_KERNEL); 2400 if (!ring->desc) 2401 return -ENOMEM; 2402 2403 return 0; 2404 } 2405 2406 static int hns3_alloc_and_map_buffer(struct hns3_enet_ring *ring, 2407 struct hns3_desc_cb *cb) 2408 { 2409 int ret; 2410 2411 ret = hns3_alloc_buffer(ring, cb); 2412 if (ret) 2413 goto out; 2414 2415 ret = hns3_map_buffer(ring, cb); 2416 if (ret) 2417 goto out_with_buf; 2418 2419 return 0; 2420 2421 out_with_buf: 2422 hns3_free_buffer(ring, cb); 2423 out: 2424 return ret; 2425 } 2426 2427 static int hns3_alloc_and_attach_buffer(struct hns3_enet_ring *ring, int i) 2428 { 2429 int ret = hns3_alloc_and_map_buffer(ring, &ring->desc_cb[i]); 2430 2431 if (ret) 2432 return ret; 2433 2434 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma); 2435 2436 return 0; 2437 } 2438 2439 /* Allocate memory for raw pkg, and map with dma */ 2440 static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring) 2441 { 2442 int i, j, ret; 2443 2444 for (i = 0; i < ring->desc_num; i++) { 2445 ret = hns3_alloc_and_attach_buffer(ring, i); 2446 if (ret) 2447 goto out_buffer_fail; 2448 } 2449 2450 return 0; 2451 2452 out_buffer_fail: 2453 for (j = i - 1; j >= 0; j--) 2454 hns3_free_buffer_detach(ring, j); 2455 return ret; 2456 } 2457 2458 /* detach a in-used buffer and replace with a reserved one */ 2459 static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i, 2460 struct hns3_desc_cb *res_cb) 2461 { 2462 hns3_unmap_buffer(ring, &ring->desc_cb[i]); 2463 ring->desc_cb[i] = *res_cb; 2464 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma); 2465 ring->desc[i].rx.bd_base_info = 0; 2466 } 2467 2468 static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i) 2469 { 2470 ring->desc_cb[i].reuse_flag = 0; 2471 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma + 2472 ring->desc_cb[i].page_offset); 2473 ring->desc[i].rx.bd_base_info = 0; 2474 2475 dma_sync_single_for_device(ring_to_dev(ring), 2476 ring->desc_cb[i].dma + ring->desc_cb[i].page_offset, 2477 hns3_buf_size(ring), 2478 DMA_FROM_DEVICE); 2479 } 2480 2481 static void hns3_nic_reclaim_desc(struct hns3_enet_ring *ring, int head, 2482 int *bytes, int *pkts) 2483 { 2484 int ntc = ring->next_to_clean; 2485 struct hns3_desc_cb *desc_cb; 2486 2487 while (head != ntc) { 2488 desc_cb = &ring->desc_cb[ntc]; 2489 (*pkts) += (desc_cb->type == DESC_TYPE_SKB); 2490 (*bytes) += desc_cb->length; 2491 /* desc_cb will be cleaned, after hnae3_free_buffer_detach */ 2492 hns3_free_buffer_detach(ring, ntc); 2493 2494 if (++ntc == ring->desc_num) 2495 ntc = 0; 2496 2497 /* Issue prefetch for next Tx descriptor */ 2498 prefetch(&ring->desc_cb[ntc]); 2499 } 2500 2501 /* This smp_store_release() pairs with smp_load_acquire() in 2502 * ring_space called by hns3_nic_net_xmit. 2503 */ 2504 smp_store_release(&ring->next_to_clean, ntc); 2505 } 2506 2507 static int is_valid_clean_head(struct hns3_enet_ring *ring, int h) 2508 { 2509 int u = ring->next_to_use; 2510 int c = ring->next_to_clean; 2511 2512 if (unlikely(h > ring->desc_num)) 2513 return 0; 2514 2515 return u > c ? (h > c && h <= u) : (h > c || h <= u); 2516 } 2517 2518 void hns3_clean_tx_ring(struct hns3_enet_ring *ring) 2519 { 2520 struct net_device *netdev = ring_to_netdev(ring); 2521 struct hns3_nic_priv *priv = netdev_priv(netdev); 2522 struct netdev_queue *dev_queue; 2523 int bytes, pkts; 2524 int head; 2525 2526 head = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG); 2527 2528 if (is_ring_empty(ring) || head == ring->next_to_clean) 2529 return; /* no data to poll */ 2530 2531 rmb(); /* Make sure head is ready before touch any data */ 2532 2533 if (unlikely(!is_valid_clean_head(ring, head))) { 2534 hns3_rl_err(netdev, "wrong head (%d, %d-%d)\n", head, 2535 ring->next_to_use, ring->next_to_clean); 2536 2537 u64_stats_update_begin(&ring->syncp); 2538 ring->stats.io_err_cnt++; 2539 u64_stats_update_end(&ring->syncp); 2540 return; 2541 } 2542 2543 bytes = 0; 2544 pkts = 0; 2545 hns3_nic_reclaim_desc(ring, head, &bytes, &pkts); 2546 2547 ring->tqp_vector->tx_group.total_bytes += bytes; 2548 ring->tqp_vector->tx_group.total_packets += pkts; 2549 2550 u64_stats_update_begin(&ring->syncp); 2551 ring->stats.tx_bytes += bytes; 2552 ring->stats.tx_pkts += pkts; 2553 u64_stats_update_end(&ring->syncp); 2554 2555 dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index); 2556 netdev_tx_completed_queue(dev_queue, pkts, bytes); 2557 2558 if (unlikely(netif_carrier_ok(netdev) && 2559 ring_space(ring) > HNS3_MAX_TSO_BD_NUM)) { 2560 /* Make sure that anybody stopping the queue after this 2561 * sees the new next_to_clean. 2562 */ 2563 smp_mb(); 2564 if (netif_tx_queue_stopped(dev_queue) && 2565 !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) { 2566 netif_tx_wake_queue(dev_queue); 2567 ring->stats.restart_queue++; 2568 } 2569 } 2570 } 2571 2572 static int hns3_desc_unused(struct hns3_enet_ring *ring) 2573 { 2574 int ntc = ring->next_to_clean; 2575 int ntu = ring->next_to_use; 2576 2577 return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu; 2578 } 2579 2580 static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, 2581 int cleand_count) 2582 { 2583 struct hns3_desc_cb *desc_cb; 2584 struct hns3_desc_cb res_cbs; 2585 int i, ret; 2586 2587 for (i = 0; i < cleand_count; i++) { 2588 desc_cb = &ring->desc_cb[ring->next_to_use]; 2589 if (desc_cb->reuse_flag) { 2590 u64_stats_update_begin(&ring->syncp); 2591 ring->stats.reuse_pg_cnt++; 2592 u64_stats_update_end(&ring->syncp); 2593 2594 hns3_reuse_buffer(ring, ring->next_to_use); 2595 } else { 2596 ret = hns3_alloc_and_map_buffer(ring, &res_cbs); 2597 if (ret) { 2598 u64_stats_update_begin(&ring->syncp); 2599 ring->stats.sw_err_cnt++; 2600 u64_stats_update_end(&ring->syncp); 2601 2602 hns3_rl_err(ring_to_netdev(ring), 2603 "alloc rx buffer failed: %d\n", 2604 ret); 2605 break; 2606 } 2607 hns3_replace_buffer(ring, ring->next_to_use, &res_cbs); 2608 2609 u64_stats_update_begin(&ring->syncp); 2610 ring->stats.non_reuse_pg++; 2611 u64_stats_update_end(&ring->syncp); 2612 } 2613 2614 ring_ptr_move_fw(ring, next_to_use); 2615 } 2616 2617 wmb(); /* Make all data has been write before submit */ 2618 writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG); 2619 } 2620 2621 static bool hns3_page_is_reusable(struct page *page) 2622 { 2623 return page_to_nid(page) == numa_mem_id() && 2624 !page_is_pfmemalloc(page); 2625 } 2626 2627 static void hns3_nic_reuse_page(struct sk_buff *skb, int i, 2628 struct hns3_enet_ring *ring, int pull_len, 2629 struct hns3_desc_cb *desc_cb) 2630 { 2631 struct hns3_desc *desc = &ring->desc[ring->next_to_clean]; 2632 int size = le16_to_cpu(desc->rx.size); 2633 u32 truesize = hns3_buf_size(ring); 2634 2635 skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len, 2636 size - pull_len, truesize); 2637 2638 /* Avoid re-using remote pages, or the stack is still using the page 2639 * when page_offset rollback to zero, flag default unreuse 2640 */ 2641 if (unlikely(!hns3_page_is_reusable(desc_cb->priv)) || 2642 (!desc_cb->page_offset && page_count(desc_cb->priv) > 1)) 2643 return; 2644 2645 /* Move offset up to the next cache line */ 2646 desc_cb->page_offset += truesize; 2647 2648 if (desc_cb->page_offset + truesize <= hns3_page_size(ring)) { 2649 desc_cb->reuse_flag = 1; 2650 /* Bump ref count on page before it is given */ 2651 get_page(desc_cb->priv); 2652 } else if (page_count(desc_cb->priv) == 1) { 2653 desc_cb->reuse_flag = 1; 2654 desc_cb->page_offset = 0; 2655 get_page(desc_cb->priv); 2656 } 2657 } 2658 2659 static int hns3_gro_complete(struct sk_buff *skb, u32 l234info) 2660 { 2661 __be16 type = skb->protocol; 2662 struct tcphdr *th; 2663 int depth = 0; 2664 2665 while (eth_type_vlan(type)) { 2666 struct vlan_hdr *vh; 2667 2668 if ((depth + VLAN_HLEN) > skb_headlen(skb)) 2669 return -EFAULT; 2670 2671 vh = (struct vlan_hdr *)(skb->data + depth); 2672 type = vh->h_vlan_encapsulated_proto; 2673 depth += VLAN_HLEN; 2674 } 2675 2676 skb_set_network_header(skb, depth); 2677 2678 if (type == htons(ETH_P_IP)) { 2679 const struct iphdr *iph = ip_hdr(skb); 2680 2681 depth += sizeof(struct iphdr); 2682 skb_set_transport_header(skb, depth); 2683 th = tcp_hdr(skb); 2684 th->check = ~tcp_v4_check(skb->len - depth, iph->saddr, 2685 iph->daddr, 0); 2686 } else if (type == htons(ETH_P_IPV6)) { 2687 const struct ipv6hdr *iph = ipv6_hdr(skb); 2688 2689 depth += sizeof(struct ipv6hdr); 2690 skb_set_transport_header(skb, depth); 2691 th = tcp_hdr(skb); 2692 th->check = ~tcp_v6_check(skb->len - depth, &iph->saddr, 2693 &iph->daddr, 0); 2694 } else { 2695 hns3_rl_err(skb->dev, 2696 "Error: FW GRO supports only IPv4/IPv6, not 0x%04x, depth: %d\n", 2697 be16_to_cpu(type), depth); 2698 return -EFAULT; 2699 } 2700 2701 skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count; 2702 if (th->cwr) 2703 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; 2704 2705 if (l234info & BIT(HNS3_RXD_GRO_FIXID_B)) 2706 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID; 2707 2708 skb->csum_start = (unsigned char *)th - skb->head; 2709 skb->csum_offset = offsetof(struct tcphdr, check); 2710 skb->ip_summed = CHECKSUM_PARTIAL; 2711 2712 trace_hns3_gro(skb); 2713 2714 return 0; 2715 } 2716 2717 static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, 2718 u32 l234info, u32 bd_base_info, u32 ol_info) 2719 { 2720 struct net_device *netdev = ring_to_netdev(ring); 2721 int l3_type, l4_type; 2722 int ol4_type; 2723 2724 skb->ip_summed = CHECKSUM_NONE; 2725 2726 skb_checksum_none_assert(skb); 2727 2728 if (!(netdev->features & NETIF_F_RXCSUM)) 2729 return; 2730 2731 /* check if hardware has done checksum */ 2732 if (!(bd_base_info & BIT(HNS3_RXD_L3L4P_B))) 2733 return; 2734 2735 if (unlikely(l234info & (BIT(HNS3_RXD_L3E_B) | BIT(HNS3_RXD_L4E_B) | 2736 BIT(HNS3_RXD_OL3E_B) | 2737 BIT(HNS3_RXD_OL4E_B)))) { 2738 u64_stats_update_begin(&ring->syncp); 2739 ring->stats.l3l4_csum_err++; 2740 u64_stats_update_end(&ring->syncp); 2741 2742 return; 2743 } 2744 2745 ol4_type = hnae3_get_field(ol_info, HNS3_RXD_OL4ID_M, 2746 HNS3_RXD_OL4ID_S); 2747 switch (ol4_type) { 2748 case HNS3_OL4_TYPE_MAC_IN_UDP: 2749 case HNS3_OL4_TYPE_NVGRE: 2750 skb->csum_level = 1; 2751 fallthrough; 2752 case HNS3_OL4_TYPE_NO_TUN: 2753 l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, 2754 HNS3_RXD_L3ID_S); 2755 l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M, 2756 HNS3_RXD_L4ID_S); 2757 2758 /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */ 2759 if ((l3_type == HNS3_L3_TYPE_IPV4 || 2760 l3_type == HNS3_L3_TYPE_IPV6) && 2761 (l4_type == HNS3_L4_TYPE_UDP || 2762 l4_type == HNS3_L4_TYPE_TCP || 2763 l4_type == HNS3_L4_TYPE_SCTP)) 2764 skb->ip_summed = CHECKSUM_UNNECESSARY; 2765 break; 2766 default: 2767 break; 2768 } 2769 } 2770 2771 static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb) 2772 { 2773 if (skb_has_frag_list(skb)) 2774 napi_gro_flush(&ring->tqp_vector->napi, false); 2775 2776 napi_gro_receive(&ring->tqp_vector->napi, skb); 2777 } 2778 2779 static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring, 2780 struct hns3_desc *desc, u32 l234info, 2781 u16 *vlan_tag) 2782 { 2783 struct hnae3_handle *handle = ring->tqp->handle; 2784 struct pci_dev *pdev = ring->tqp->handle->pdev; 2785 2786 if (pdev->revision == 0x20) { 2787 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); 2788 if (!(*vlan_tag & VLAN_VID_MASK)) 2789 *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); 2790 2791 return (*vlan_tag != 0); 2792 } 2793 2794 #define HNS3_STRP_OUTER_VLAN 0x1 2795 #define HNS3_STRP_INNER_VLAN 0x2 2796 #define HNS3_STRP_BOTH 0x3 2797 2798 /* Hardware always insert VLAN tag into RX descriptor when 2799 * remove the tag from packet, driver needs to determine 2800 * reporting which tag to stack. 2801 */ 2802 switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M, 2803 HNS3_RXD_STRP_TAGP_S)) { 2804 case HNS3_STRP_OUTER_VLAN: 2805 if (handle->port_base_vlan_state != 2806 HNAE3_PORT_BASE_VLAN_DISABLE) 2807 return false; 2808 2809 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); 2810 return true; 2811 case HNS3_STRP_INNER_VLAN: 2812 if (handle->port_base_vlan_state != 2813 HNAE3_PORT_BASE_VLAN_DISABLE) 2814 return false; 2815 2816 *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); 2817 return true; 2818 case HNS3_STRP_BOTH: 2819 if (handle->port_base_vlan_state == 2820 HNAE3_PORT_BASE_VLAN_DISABLE) 2821 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); 2822 else 2823 *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); 2824 2825 return true; 2826 default: 2827 return false; 2828 } 2829 } 2830 2831 static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length, 2832 unsigned char *va) 2833 { 2834 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean]; 2835 struct net_device *netdev = ring_to_netdev(ring); 2836 struct sk_buff *skb; 2837 2838 ring->skb = napi_alloc_skb(&ring->tqp_vector->napi, HNS3_RX_HEAD_SIZE); 2839 skb = ring->skb; 2840 if (unlikely(!skb)) { 2841 hns3_rl_err(netdev, "alloc rx skb fail\n"); 2842 2843 u64_stats_update_begin(&ring->syncp); 2844 ring->stats.sw_err_cnt++; 2845 u64_stats_update_end(&ring->syncp); 2846 2847 return -ENOMEM; 2848 } 2849 2850 trace_hns3_rx_desc(ring); 2851 prefetchw(skb->data); 2852 2853 ring->pending_buf = 1; 2854 ring->frag_num = 0; 2855 ring->tail_skb = NULL; 2856 if (length <= HNS3_RX_HEAD_SIZE) { 2857 memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long))); 2858 2859 /* We can reuse buffer as-is, just make sure it is local */ 2860 if (likely(hns3_page_is_reusable(desc_cb->priv))) 2861 desc_cb->reuse_flag = 1; 2862 else /* This page cannot be reused so discard it */ 2863 put_page(desc_cb->priv); 2864 2865 ring_ptr_move_fw(ring, next_to_clean); 2866 return 0; 2867 } 2868 u64_stats_update_begin(&ring->syncp); 2869 ring->stats.seg_pkt_cnt++; 2870 u64_stats_update_end(&ring->syncp); 2871 2872 ring->pull_len = eth_get_headlen(netdev, va, HNS3_RX_HEAD_SIZE); 2873 __skb_put(skb, ring->pull_len); 2874 hns3_nic_reuse_page(skb, ring->frag_num++, ring, ring->pull_len, 2875 desc_cb); 2876 ring_ptr_move_fw(ring, next_to_clean); 2877 2878 return 0; 2879 } 2880 2881 static int hns3_add_frag(struct hns3_enet_ring *ring) 2882 { 2883 struct sk_buff *skb = ring->skb; 2884 struct sk_buff *head_skb = skb; 2885 struct sk_buff *new_skb; 2886 struct hns3_desc_cb *desc_cb; 2887 struct hns3_desc *desc; 2888 u32 bd_base_info; 2889 2890 do { 2891 desc = &ring->desc[ring->next_to_clean]; 2892 desc_cb = &ring->desc_cb[ring->next_to_clean]; 2893 bd_base_info = le32_to_cpu(desc->rx.bd_base_info); 2894 /* make sure HW write desc complete */ 2895 dma_rmb(); 2896 if (!(bd_base_info & BIT(HNS3_RXD_VLD_B))) 2897 return -ENXIO; 2898 2899 if (unlikely(ring->frag_num >= MAX_SKB_FRAGS)) { 2900 new_skb = napi_alloc_skb(&ring->tqp_vector->napi, 0); 2901 if (unlikely(!new_skb)) { 2902 hns3_rl_err(ring_to_netdev(ring), 2903 "alloc rx fraglist skb fail\n"); 2904 return -ENXIO; 2905 } 2906 ring->frag_num = 0; 2907 2908 if (ring->tail_skb) { 2909 ring->tail_skb->next = new_skb; 2910 ring->tail_skb = new_skb; 2911 } else { 2912 skb_shinfo(skb)->frag_list = new_skb; 2913 ring->tail_skb = new_skb; 2914 } 2915 } 2916 2917 if (ring->tail_skb) { 2918 head_skb->truesize += hns3_buf_size(ring); 2919 head_skb->data_len += le16_to_cpu(desc->rx.size); 2920 head_skb->len += le16_to_cpu(desc->rx.size); 2921 skb = ring->tail_skb; 2922 } 2923 2924 dma_sync_single_for_cpu(ring_to_dev(ring), 2925 desc_cb->dma + desc_cb->page_offset, 2926 hns3_buf_size(ring), 2927 DMA_FROM_DEVICE); 2928 2929 hns3_nic_reuse_page(skb, ring->frag_num++, ring, 0, desc_cb); 2930 trace_hns3_rx_desc(ring); 2931 ring_ptr_move_fw(ring, next_to_clean); 2932 ring->pending_buf++; 2933 } while (!(bd_base_info & BIT(HNS3_RXD_FE_B))); 2934 2935 return 0; 2936 } 2937 2938 static int hns3_set_gro_and_checksum(struct hns3_enet_ring *ring, 2939 struct sk_buff *skb, u32 l234info, 2940 u32 bd_base_info, u32 ol_info) 2941 { 2942 u32 l3_type; 2943 2944 skb_shinfo(skb)->gso_size = hnae3_get_field(bd_base_info, 2945 HNS3_RXD_GRO_SIZE_M, 2946 HNS3_RXD_GRO_SIZE_S); 2947 /* if there is no HW GRO, do not set gro params */ 2948 if (!skb_shinfo(skb)->gso_size) { 2949 hns3_rx_checksum(ring, skb, l234info, bd_base_info, ol_info); 2950 return 0; 2951 } 2952 2953 NAPI_GRO_CB(skb)->count = hnae3_get_field(l234info, 2954 HNS3_RXD_GRO_COUNT_M, 2955 HNS3_RXD_GRO_COUNT_S); 2956 2957 l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S); 2958 if (l3_type == HNS3_L3_TYPE_IPV4) 2959 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; 2960 else if (l3_type == HNS3_L3_TYPE_IPV6) 2961 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; 2962 else 2963 return -EFAULT; 2964 2965 return hns3_gro_complete(skb, l234info); 2966 } 2967 2968 static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring, 2969 struct sk_buff *skb, u32 rss_hash) 2970 { 2971 struct hnae3_handle *handle = ring->tqp->handle; 2972 enum pkt_hash_types rss_type; 2973 2974 if (rss_hash) 2975 rss_type = handle->kinfo.rss_type; 2976 else 2977 rss_type = PKT_HASH_TYPE_NONE; 2978 2979 skb_set_hash(skb, rss_hash, rss_type); 2980 } 2981 2982 static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb) 2983 { 2984 struct net_device *netdev = ring_to_netdev(ring); 2985 enum hns3_pkt_l2t_type l2_frame_type; 2986 u32 bd_base_info, l234info, ol_info; 2987 struct hns3_desc *desc; 2988 unsigned int len; 2989 int pre_ntc, ret; 2990 2991 /* bdinfo handled below is only valid on the last BD of the 2992 * current packet, and ring->next_to_clean indicates the first 2993 * descriptor of next packet, so need - 1 below. 2994 */ 2995 pre_ntc = ring->next_to_clean ? (ring->next_to_clean - 1) : 2996 (ring->desc_num - 1); 2997 desc = &ring->desc[pre_ntc]; 2998 bd_base_info = le32_to_cpu(desc->rx.bd_base_info); 2999 l234info = le32_to_cpu(desc->rx.l234_info); 3000 ol_info = le32_to_cpu(desc->rx.ol_info); 3001 3002 /* Based on hw strategy, the tag offloaded will be stored at 3003 * ot_vlan_tag in two layer tag case, and stored at vlan_tag 3004 * in one layer tag case. 3005 */ 3006 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) { 3007 u16 vlan_tag; 3008 3009 if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag)) 3010 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 3011 vlan_tag); 3012 } 3013 3014 if (unlikely(!desc->rx.pkt_len || (l234info & (BIT(HNS3_RXD_TRUNCAT_B) | 3015 BIT(HNS3_RXD_L2E_B))))) { 3016 u64_stats_update_begin(&ring->syncp); 3017 if (l234info & BIT(HNS3_RXD_L2E_B)) 3018 ring->stats.l2_err++; 3019 else 3020 ring->stats.err_pkt_len++; 3021 u64_stats_update_end(&ring->syncp); 3022 3023 return -EFAULT; 3024 } 3025 3026 len = skb->len; 3027 3028 /* Do update ip stack process */ 3029 skb->protocol = eth_type_trans(skb, netdev); 3030 3031 /* This is needed in order to enable forwarding support */ 3032 ret = hns3_set_gro_and_checksum(ring, skb, l234info, 3033 bd_base_info, ol_info); 3034 if (unlikely(ret)) { 3035 u64_stats_update_begin(&ring->syncp); 3036 ring->stats.rx_err_cnt++; 3037 u64_stats_update_end(&ring->syncp); 3038 return ret; 3039 } 3040 3041 l2_frame_type = hnae3_get_field(l234info, HNS3_RXD_DMAC_M, 3042 HNS3_RXD_DMAC_S); 3043 3044 u64_stats_update_begin(&ring->syncp); 3045 ring->stats.rx_pkts++; 3046 ring->stats.rx_bytes += len; 3047 3048 if (l2_frame_type == HNS3_L2_TYPE_MULTICAST) 3049 ring->stats.rx_multicast++; 3050 3051 u64_stats_update_end(&ring->syncp); 3052 3053 ring->tqp_vector->rx_group.total_bytes += len; 3054 3055 hns3_set_rx_skb_rss_type(ring, skb, le32_to_cpu(desc->rx.rss_hash)); 3056 return 0; 3057 } 3058 3059 static int hns3_handle_rx_bd(struct hns3_enet_ring *ring) 3060 { 3061 struct sk_buff *skb = ring->skb; 3062 struct hns3_desc_cb *desc_cb; 3063 struct hns3_desc *desc; 3064 unsigned int length; 3065 u32 bd_base_info; 3066 int ret; 3067 3068 desc = &ring->desc[ring->next_to_clean]; 3069 desc_cb = &ring->desc_cb[ring->next_to_clean]; 3070 3071 prefetch(desc); 3072 3073 length = le16_to_cpu(desc->rx.size); 3074 bd_base_info = le32_to_cpu(desc->rx.bd_base_info); 3075 3076 /* Check valid BD */ 3077 if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B)))) 3078 return -ENXIO; 3079 3080 if (!skb) { 3081 ring->va = desc_cb->buf + desc_cb->page_offset; 3082 3083 dma_sync_single_for_cpu(ring_to_dev(ring), 3084 desc_cb->dma + desc_cb->page_offset, 3085 hns3_buf_size(ring), 3086 DMA_FROM_DEVICE); 3087 } 3088 3089 /* Prefetch first cache line of first page 3090 * Idea is to cache few bytes of the header of the packet. Our L1 Cache 3091 * line size is 64B so need to prefetch twice to make it 128B. But in 3092 * actual we can have greater size of caches with 128B Level 1 cache 3093 * lines. In such a case, single fetch would suffice to cache in the 3094 * relevant part of the header. 3095 */ 3096 prefetch(ring->va); 3097 #if L1_CACHE_BYTES < 128 3098 prefetch(ring->va + L1_CACHE_BYTES); 3099 #endif 3100 3101 if (!skb) { 3102 ret = hns3_alloc_skb(ring, length, ring->va); 3103 skb = ring->skb; 3104 3105 if (ret < 0) /* alloc buffer fail */ 3106 return ret; 3107 if (!(bd_base_info & BIT(HNS3_RXD_FE_B))) { /* need add frag */ 3108 ret = hns3_add_frag(ring); 3109 if (ret) 3110 return ret; 3111 } 3112 } else { 3113 ret = hns3_add_frag(ring); 3114 if (ret) 3115 return ret; 3116 } 3117 3118 /* As the head data may be changed when GRO enable, copy 3119 * the head data in after other data rx completed 3120 */ 3121 if (skb->len > HNS3_RX_HEAD_SIZE) 3122 memcpy(skb->data, ring->va, 3123 ALIGN(ring->pull_len, sizeof(long))); 3124 3125 ret = hns3_handle_bdinfo(ring, skb); 3126 if (unlikely(ret)) { 3127 dev_kfree_skb_any(skb); 3128 return ret; 3129 } 3130 3131 skb_record_rx_queue(skb, ring->tqp->tqp_index); 3132 return 0; 3133 } 3134 3135 int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget, 3136 void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *)) 3137 { 3138 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16 3139 int unused_count = hns3_desc_unused(ring); 3140 int recv_pkts = 0; 3141 int recv_bds = 0; 3142 int err, num; 3143 3144 num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG); 3145 num -= unused_count; 3146 unused_count -= ring->pending_buf; 3147 3148 if (num <= 0) 3149 goto out; 3150 3151 rmb(); /* Make sure num taken effect before the other data is touched */ 3152 3153 while (recv_pkts < budget && recv_bds < num) { 3154 /* Reuse or realloc buffers */ 3155 if (unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) { 3156 hns3_nic_alloc_rx_buffers(ring, unused_count); 3157 unused_count = hns3_desc_unused(ring) - 3158 ring->pending_buf; 3159 } 3160 3161 /* Poll one pkt */ 3162 err = hns3_handle_rx_bd(ring); 3163 /* Do not get FE for the packet or failed to alloc skb */ 3164 if (unlikely(!ring->skb || err == -ENXIO)) { 3165 goto out; 3166 } else if (likely(!err)) { 3167 rx_fn(ring, ring->skb); 3168 recv_pkts++; 3169 } 3170 3171 recv_bds += ring->pending_buf; 3172 unused_count += ring->pending_buf; 3173 ring->skb = NULL; 3174 ring->pending_buf = 0; 3175 } 3176 3177 out: 3178 /* Make all data has been write before submit */ 3179 if (unused_count > 0) 3180 hns3_nic_alloc_rx_buffers(ring, unused_count); 3181 3182 return recv_pkts; 3183 } 3184 3185 static bool hns3_get_new_flow_lvl(struct hns3_enet_ring_group *ring_group) 3186 { 3187 #define HNS3_RX_LOW_BYTE_RATE 10000 3188 #define HNS3_RX_MID_BYTE_RATE 20000 3189 #define HNS3_RX_ULTRA_PACKET_RATE 40 3190 3191 enum hns3_flow_level_range new_flow_level; 3192 struct hns3_enet_tqp_vector *tqp_vector; 3193 int packets_per_msecs, bytes_per_msecs; 3194 u32 time_passed_ms; 3195 3196 tqp_vector = ring_group->ring->tqp_vector; 3197 time_passed_ms = 3198 jiffies_to_msecs(jiffies - tqp_vector->last_jiffies); 3199 if (!time_passed_ms) 3200 return false; 3201 3202 do_div(ring_group->total_packets, time_passed_ms); 3203 packets_per_msecs = ring_group->total_packets; 3204 3205 do_div(ring_group->total_bytes, time_passed_ms); 3206 bytes_per_msecs = ring_group->total_bytes; 3207 3208 new_flow_level = ring_group->coal.flow_level; 3209 3210 /* Simple throttlerate management 3211 * 0-10MB/s lower (50000 ints/s) 3212 * 10-20MB/s middle (20000 ints/s) 3213 * 20-1249MB/s high (18000 ints/s) 3214 * > 40000pps ultra (8000 ints/s) 3215 */ 3216 switch (new_flow_level) { 3217 case HNS3_FLOW_LOW: 3218 if (bytes_per_msecs > HNS3_RX_LOW_BYTE_RATE) 3219 new_flow_level = HNS3_FLOW_MID; 3220 break; 3221 case HNS3_FLOW_MID: 3222 if (bytes_per_msecs > HNS3_RX_MID_BYTE_RATE) 3223 new_flow_level = HNS3_FLOW_HIGH; 3224 else if (bytes_per_msecs <= HNS3_RX_LOW_BYTE_RATE) 3225 new_flow_level = HNS3_FLOW_LOW; 3226 break; 3227 case HNS3_FLOW_HIGH: 3228 case HNS3_FLOW_ULTRA: 3229 default: 3230 if (bytes_per_msecs <= HNS3_RX_MID_BYTE_RATE) 3231 new_flow_level = HNS3_FLOW_MID; 3232 break; 3233 } 3234 3235 if (packets_per_msecs > HNS3_RX_ULTRA_PACKET_RATE && 3236 &tqp_vector->rx_group == ring_group) 3237 new_flow_level = HNS3_FLOW_ULTRA; 3238 3239 ring_group->total_bytes = 0; 3240 ring_group->total_packets = 0; 3241 ring_group->coal.flow_level = new_flow_level; 3242 3243 return true; 3244 } 3245 3246 static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group) 3247 { 3248 struct hns3_enet_tqp_vector *tqp_vector; 3249 u16 new_int_gl; 3250 3251 if (!ring_group->ring) 3252 return false; 3253 3254 tqp_vector = ring_group->ring->tqp_vector; 3255 if (!tqp_vector->last_jiffies) 3256 return false; 3257 3258 if (ring_group->total_packets == 0) { 3259 ring_group->coal.int_gl = HNS3_INT_GL_50K; 3260 ring_group->coal.flow_level = HNS3_FLOW_LOW; 3261 return true; 3262 } 3263 3264 if (!hns3_get_new_flow_lvl(ring_group)) 3265 return false; 3266 3267 new_int_gl = ring_group->coal.int_gl; 3268 switch (ring_group->coal.flow_level) { 3269 case HNS3_FLOW_LOW: 3270 new_int_gl = HNS3_INT_GL_50K; 3271 break; 3272 case HNS3_FLOW_MID: 3273 new_int_gl = HNS3_INT_GL_20K; 3274 break; 3275 case HNS3_FLOW_HIGH: 3276 new_int_gl = HNS3_INT_GL_18K; 3277 break; 3278 case HNS3_FLOW_ULTRA: 3279 new_int_gl = HNS3_INT_GL_8K; 3280 break; 3281 default: 3282 break; 3283 } 3284 3285 if (new_int_gl != ring_group->coal.int_gl) { 3286 ring_group->coal.int_gl = new_int_gl; 3287 return true; 3288 } 3289 return false; 3290 } 3291 3292 static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector) 3293 { 3294 struct hns3_enet_ring_group *rx_group = &tqp_vector->rx_group; 3295 struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group; 3296 bool rx_update, tx_update; 3297 3298 /* update param every 1000ms */ 3299 if (time_before(jiffies, 3300 tqp_vector->last_jiffies + msecs_to_jiffies(1000))) 3301 return; 3302 3303 if (rx_group->coal.gl_adapt_enable) { 3304 rx_update = hns3_get_new_int_gl(rx_group); 3305 if (rx_update) 3306 hns3_set_vector_coalesce_rx_gl(tqp_vector, 3307 rx_group->coal.int_gl); 3308 } 3309 3310 if (tx_group->coal.gl_adapt_enable) { 3311 tx_update = hns3_get_new_int_gl(tx_group); 3312 if (tx_update) 3313 hns3_set_vector_coalesce_tx_gl(tqp_vector, 3314 tx_group->coal.int_gl); 3315 } 3316 3317 tqp_vector->last_jiffies = jiffies; 3318 } 3319 3320 static int hns3_nic_common_poll(struct napi_struct *napi, int budget) 3321 { 3322 struct hns3_nic_priv *priv = netdev_priv(napi->dev); 3323 struct hns3_enet_ring *ring; 3324 int rx_pkt_total = 0; 3325 3326 struct hns3_enet_tqp_vector *tqp_vector = 3327 container_of(napi, struct hns3_enet_tqp_vector, napi); 3328 bool clean_complete = true; 3329 int rx_budget = budget; 3330 3331 if (unlikely(test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) { 3332 napi_complete(napi); 3333 return 0; 3334 } 3335 3336 /* Since the actual Tx work is minimal, we can give the Tx a larger 3337 * budget and be more aggressive about cleaning up the Tx descriptors. 3338 */ 3339 hns3_for_each_ring(ring, tqp_vector->tx_group) 3340 hns3_clean_tx_ring(ring); 3341 3342 /* make sure rx ring budget not smaller than 1 */ 3343 if (tqp_vector->num_tqps > 1) 3344 rx_budget = max(budget / tqp_vector->num_tqps, 1); 3345 3346 hns3_for_each_ring(ring, tqp_vector->rx_group) { 3347 int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget, 3348 hns3_rx_skb); 3349 3350 if (rx_cleaned >= rx_budget) 3351 clean_complete = false; 3352 3353 rx_pkt_total += rx_cleaned; 3354 } 3355 3356 tqp_vector->rx_group.total_packets += rx_pkt_total; 3357 3358 if (!clean_complete) 3359 return budget; 3360 3361 if (napi_complete(napi) && 3362 likely(!test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) { 3363 hns3_update_new_int_gl(tqp_vector); 3364 hns3_mask_vector_irq(tqp_vector, 1); 3365 } 3366 3367 return rx_pkt_total; 3368 } 3369 3370 static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, 3371 struct hnae3_ring_chain_node *head) 3372 { 3373 struct pci_dev *pdev = tqp_vector->handle->pdev; 3374 struct hnae3_ring_chain_node *cur_chain = head; 3375 struct hnae3_ring_chain_node *chain; 3376 struct hns3_enet_ring *tx_ring; 3377 struct hns3_enet_ring *rx_ring; 3378 3379 tx_ring = tqp_vector->tx_group.ring; 3380 if (tx_ring) { 3381 cur_chain->tqp_index = tx_ring->tqp->tqp_index; 3382 hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, 3383 HNAE3_RING_TYPE_TX); 3384 hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, 3385 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX); 3386 3387 cur_chain->next = NULL; 3388 3389 while (tx_ring->next) { 3390 tx_ring = tx_ring->next; 3391 3392 chain = devm_kzalloc(&pdev->dev, sizeof(*chain), 3393 GFP_KERNEL); 3394 if (!chain) 3395 goto err_free_chain; 3396 3397 cur_chain->next = chain; 3398 chain->tqp_index = tx_ring->tqp->tqp_index; 3399 hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B, 3400 HNAE3_RING_TYPE_TX); 3401 hnae3_set_field(chain->int_gl_idx, 3402 HNAE3_RING_GL_IDX_M, 3403 HNAE3_RING_GL_IDX_S, 3404 HNAE3_RING_GL_TX); 3405 3406 cur_chain = chain; 3407 } 3408 } 3409 3410 rx_ring = tqp_vector->rx_group.ring; 3411 if (!tx_ring && rx_ring) { 3412 cur_chain->next = NULL; 3413 cur_chain->tqp_index = rx_ring->tqp->tqp_index; 3414 hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, 3415 HNAE3_RING_TYPE_RX); 3416 hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, 3417 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX); 3418 3419 rx_ring = rx_ring->next; 3420 } 3421 3422 while (rx_ring) { 3423 chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL); 3424 if (!chain) 3425 goto err_free_chain; 3426 3427 cur_chain->next = chain; 3428 chain->tqp_index = rx_ring->tqp->tqp_index; 3429 hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B, 3430 HNAE3_RING_TYPE_RX); 3431 hnae3_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M, 3432 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX); 3433 3434 cur_chain = chain; 3435 3436 rx_ring = rx_ring->next; 3437 } 3438 3439 return 0; 3440 3441 err_free_chain: 3442 cur_chain = head->next; 3443 while (cur_chain) { 3444 chain = cur_chain->next; 3445 devm_kfree(&pdev->dev, cur_chain); 3446 cur_chain = chain; 3447 } 3448 head->next = NULL; 3449 3450 return -ENOMEM; 3451 } 3452 3453 static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, 3454 struct hnae3_ring_chain_node *head) 3455 { 3456 struct pci_dev *pdev = tqp_vector->handle->pdev; 3457 struct hnae3_ring_chain_node *chain_tmp, *chain; 3458 3459 chain = head->next; 3460 3461 while (chain) { 3462 chain_tmp = chain->next; 3463 devm_kfree(&pdev->dev, chain); 3464 chain = chain_tmp; 3465 } 3466 } 3467 3468 static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group, 3469 struct hns3_enet_ring *ring) 3470 { 3471 ring->next = group->ring; 3472 group->ring = ring; 3473 3474 group->count++; 3475 } 3476 3477 static void hns3_nic_set_cpumask(struct hns3_nic_priv *priv) 3478 { 3479 struct pci_dev *pdev = priv->ae_handle->pdev; 3480 struct hns3_enet_tqp_vector *tqp_vector; 3481 int num_vectors = priv->vector_num; 3482 int numa_node; 3483 int vector_i; 3484 3485 numa_node = dev_to_node(&pdev->dev); 3486 3487 for (vector_i = 0; vector_i < num_vectors; vector_i++) { 3488 tqp_vector = &priv->tqp_vector[vector_i]; 3489 cpumask_set_cpu(cpumask_local_spread(vector_i, numa_node), 3490 &tqp_vector->affinity_mask); 3491 } 3492 } 3493 3494 static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv) 3495 { 3496 struct hnae3_ring_chain_node vector_ring_chain; 3497 struct hnae3_handle *h = priv->ae_handle; 3498 struct hns3_enet_tqp_vector *tqp_vector; 3499 int ret = 0; 3500 int i; 3501 3502 hns3_nic_set_cpumask(priv); 3503 3504 for (i = 0; i < priv->vector_num; i++) { 3505 tqp_vector = &priv->tqp_vector[i]; 3506 hns3_vector_gl_rl_init_hw(tqp_vector, priv); 3507 tqp_vector->num_tqps = 0; 3508 } 3509 3510 for (i = 0; i < h->kinfo.num_tqps; i++) { 3511 u16 vector_i = i % priv->vector_num; 3512 u16 tqp_num = h->kinfo.num_tqps; 3513 3514 tqp_vector = &priv->tqp_vector[vector_i]; 3515 3516 hns3_add_ring_to_group(&tqp_vector->tx_group, 3517 &priv->ring[i]); 3518 3519 hns3_add_ring_to_group(&tqp_vector->rx_group, 3520 &priv->ring[i + tqp_num]); 3521 3522 priv->ring[i].tqp_vector = tqp_vector; 3523 priv->ring[i + tqp_num].tqp_vector = tqp_vector; 3524 tqp_vector->num_tqps++; 3525 } 3526 3527 for (i = 0; i < priv->vector_num; i++) { 3528 tqp_vector = &priv->tqp_vector[i]; 3529 3530 tqp_vector->rx_group.total_bytes = 0; 3531 tqp_vector->rx_group.total_packets = 0; 3532 tqp_vector->tx_group.total_bytes = 0; 3533 tqp_vector->tx_group.total_packets = 0; 3534 tqp_vector->handle = h; 3535 3536 ret = hns3_get_vector_ring_chain(tqp_vector, 3537 &vector_ring_chain); 3538 if (ret) 3539 goto map_ring_fail; 3540 3541 ret = h->ae_algo->ops->map_ring_to_vector(h, 3542 tqp_vector->vector_irq, &vector_ring_chain); 3543 3544 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); 3545 3546 if (ret) 3547 goto map_ring_fail; 3548 3549 netif_napi_add(priv->netdev, &tqp_vector->napi, 3550 hns3_nic_common_poll, NAPI_POLL_WEIGHT); 3551 } 3552 3553 return 0; 3554 3555 map_ring_fail: 3556 while (i--) 3557 netif_napi_del(&priv->tqp_vector[i].napi); 3558 3559 return ret; 3560 } 3561 3562 static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv) 3563 { 3564 #define HNS3_VECTOR_PF_MAX_NUM 64 3565 3566 struct hnae3_handle *h = priv->ae_handle; 3567 struct hns3_enet_tqp_vector *tqp_vector; 3568 struct hnae3_vector_info *vector; 3569 struct pci_dev *pdev = h->pdev; 3570 u16 tqp_num = h->kinfo.num_tqps; 3571 u16 vector_num; 3572 int ret = 0; 3573 u16 i; 3574 3575 /* RSS size, cpu online and vector_num should be the same */ 3576 /* Should consider 2p/4p later */ 3577 vector_num = min_t(u16, num_online_cpus(), tqp_num); 3578 vector_num = min_t(u16, vector_num, HNS3_VECTOR_PF_MAX_NUM); 3579 3580 vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector), 3581 GFP_KERNEL); 3582 if (!vector) 3583 return -ENOMEM; 3584 3585 /* save the actual available vector number */ 3586 vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector); 3587 3588 priv->vector_num = vector_num; 3589 priv->tqp_vector = (struct hns3_enet_tqp_vector *) 3590 devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector), 3591 GFP_KERNEL); 3592 if (!priv->tqp_vector) { 3593 ret = -ENOMEM; 3594 goto out; 3595 } 3596 3597 for (i = 0; i < priv->vector_num; i++) { 3598 tqp_vector = &priv->tqp_vector[i]; 3599 tqp_vector->idx = i; 3600 tqp_vector->mask_addr = vector[i].io_addr; 3601 tqp_vector->vector_irq = vector[i].vector; 3602 hns3_vector_gl_rl_init(tqp_vector, priv); 3603 } 3604 3605 out: 3606 devm_kfree(&pdev->dev, vector); 3607 return ret; 3608 } 3609 3610 static void hns3_clear_ring_group(struct hns3_enet_ring_group *group) 3611 { 3612 group->ring = NULL; 3613 group->count = 0; 3614 } 3615 3616 static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv) 3617 { 3618 struct hnae3_ring_chain_node vector_ring_chain; 3619 struct hnae3_handle *h = priv->ae_handle; 3620 struct hns3_enet_tqp_vector *tqp_vector; 3621 int i; 3622 3623 for (i = 0; i < priv->vector_num; i++) { 3624 tqp_vector = &priv->tqp_vector[i]; 3625 3626 if (!tqp_vector->rx_group.ring && !tqp_vector->tx_group.ring) 3627 continue; 3628 3629 /* Since the mapping can be overwritten, when fail to get the 3630 * chain between vector and ring, we should go on to deal with 3631 * the remaining options. 3632 */ 3633 if (hns3_get_vector_ring_chain(tqp_vector, &vector_ring_chain)) 3634 dev_warn(priv->dev, "failed to get ring chain\n"); 3635 3636 h->ae_algo->ops->unmap_ring_from_vector(h, 3637 tqp_vector->vector_irq, &vector_ring_chain); 3638 3639 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); 3640 3641 hns3_clear_ring_group(&tqp_vector->rx_group); 3642 hns3_clear_ring_group(&tqp_vector->tx_group); 3643 netif_napi_del(&priv->tqp_vector[i].napi); 3644 } 3645 } 3646 3647 static void hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv) 3648 { 3649 struct hnae3_handle *h = priv->ae_handle; 3650 struct pci_dev *pdev = h->pdev; 3651 int i, ret; 3652 3653 for (i = 0; i < priv->vector_num; i++) { 3654 struct hns3_enet_tqp_vector *tqp_vector; 3655 3656 tqp_vector = &priv->tqp_vector[i]; 3657 ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq); 3658 if (ret) 3659 return; 3660 } 3661 3662 devm_kfree(&pdev->dev, priv->tqp_vector); 3663 } 3664 3665 static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv, 3666 unsigned int ring_type) 3667 { 3668 int queue_num = priv->ae_handle->kinfo.num_tqps; 3669 struct hns3_enet_ring *ring; 3670 int desc_num; 3671 3672 if (ring_type == HNAE3_RING_TYPE_TX) { 3673 ring = &priv->ring[q->tqp_index]; 3674 desc_num = priv->ae_handle->kinfo.num_tx_desc; 3675 ring->queue_index = q->tqp_index; 3676 ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET; 3677 } else { 3678 ring = &priv->ring[q->tqp_index + queue_num]; 3679 desc_num = priv->ae_handle->kinfo.num_rx_desc; 3680 ring->queue_index = q->tqp_index; 3681 ring->io_base = q->io_base; 3682 } 3683 3684 hnae3_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type); 3685 3686 ring->tqp = q; 3687 ring->desc = NULL; 3688 ring->desc_cb = NULL; 3689 ring->dev = priv->dev; 3690 ring->desc_dma_addr = 0; 3691 ring->buf_size = q->buf_size; 3692 ring->desc_num = desc_num; 3693 ring->next_to_use = 0; 3694 ring->next_to_clean = 0; 3695 } 3696 3697 static void hns3_queue_to_ring(struct hnae3_queue *tqp, 3698 struct hns3_nic_priv *priv) 3699 { 3700 hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX); 3701 hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX); 3702 } 3703 3704 static int hns3_get_ring_config(struct hns3_nic_priv *priv) 3705 { 3706 struct hnae3_handle *h = priv->ae_handle; 3707 struct pci_dev *pdev = h->pdev; 3708 int i; 3709 3710 priv->ring = devm_kzalloc(&pdev->dev, 3711 array3_size(h->kinfo.num_tqps, 3712 sizeof(*priv->ring), 2), 3713 GFP_KERNEL); 3714 if (!priv->ring) 3715 return -ENOMEM; 3716 3717 for (i = 0; i < h->kinfo.num_tqps; i++) 3718 hns3_queue_to_ring(h->kinfo.tqp[i], priv); 3719 3720 return 0; 3721 } 3722 3723 static void hns3_put_ring_config(struct hns3_nic_priv *priv) 3724 { 3725 if (!priv->ring) 3726 return; 3727 3728 devm_kfree(priv->dev, priv->ring); 3729 priv->ring = NULL; 3730 } 3731 3732 static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring) 3733 { 3734 int ret; 3735 3736 if (ring->desc_num <= 0 || ring->buf_size <= 0) 3737 return -EINVAL; 3738 3739 ring->desc_cb = devm_kcalloc(ring_to_dev(ring), ring->desc_num, 3740 sizeof(ring->desc_cb[0]), GFP_KERNEL); 3741 if (!ring->desc_cb) { 3742 ret = -ENOMEM; 3743 goto out; 3744 } 3745 3746 ret = hns3_alloc_desc(ring); 3747 if (ret) 3748 goto out_with_desc_cb; 3749 3750 if (!HNAE3_IS_TX_RING(ring)) { 3751 ret = hns3_alloc_ring_buffers(ring); 3752 if (ret) 3753 goto out_with_desc; 3754 } 3755 3756 return 0; 3757 3758 out_with_desc: 3759 hns3_free_desc(ring); 3760 out_with_desc_cb: 3761 devm_kfree(ring_to_dev(ring), ring->desc_cb); 3762 ring->desc_cb = NULL; 3763 out: 3764 return ret; 3765 } 3766 3767 void hns3_fini_ring(struct hns3_enet_ring *ring) 3768 { 3769 hns3_free_desc(ring); 3770 devm_kfree(ring_to_dev(ring), ring->desc_cb); 3771 ring->desc_cb = NULL; 3772 ring->next_to_clean = 0; 3773 ring->next_to_use = 0; 3774 ring->pending_buf = 0; 3775 if (ring->skb) { 3776 dev_kfree_skb_any(ring->skb); 3777 ring->skb = NULL; 3778 } 3779 } 3780 3781 static int hns3_buf_size2type(u32 buf_size) 3782 { 3783 int bd_size_type; 3784 3785 switch (buf_size) { 3786 case 512: 3787 bd_size_type = HNS3_BD_SIZE_512_TYPE; 3788 break; 3789 case 1024: 3790 bd_size_type = HNS3_BD_SIZE_1024_TYPE; 3791 break; 3792 case 2048: 3793 bd_size_type = HNS3_BD_SIZE_2048_TYPE; 3794 break; 3795 case 4096: 3796 bd_size_type = HNS3_BD_SIZE_4096_TYPE; 3797 break; 3798 default: 3799 bd_size_type = HNS3_BD_SIZE_2048_TYPE; 3800 } 3801 3802 return bd_size_type; 3803 } 3804 3805 static void hns3_init_ring_hw(struct hns3_enet_ring *ring) 3806 { 3807 dma_addr_t dma = ring->desc_dma_addr; 3808 struct hnae3_queue *q = ring->tqp; 3809 3810 if (!HNAE3_IS_TX_RING(ring)) { 3811 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG, (u32)dma); 3812 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG, 3813 (u32)((dma >> 31) >> 1)); 3814 3815 hns3_write_dev(q, HNS3_RING_RX_RING_BD_LEN_REG, 3816 hns3_buf_size2type(ring->buf_size)); 3817 hns3_write_dev(q, HNS3_RING_RX_RING_BD_NUM_REG, 3818 ring->desc_num / 8 - 1); 3819 3820 } else { 3821 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_L_REG, 3822 (u32)dma); 3823 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG, 3824 (u32)((dma >> 31) >> 1)); 3825 3826 hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG, 3827 ring->desc_num / 8 - 1); 3828 } 3829 } 3830 3831 static void hns3_init_tx_ring_tc(struct hns3_nic_priv *priv) 3832 { 3833 struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo; 3834 int i; 3835 3836 for (i = 0; i < HNAE3_MAX_TC; i++) { 3837 struct hnae3_tc_info *tc_info = &kinfo->tc_info[i]; 3838 int j; 3839 3840 if (!tc_info->enable) 3841 continue; 3842 3843 for (j = 0; j < tc_info->tqp_count; j++) { 3844 struct hnae3_queue *q; 3845 3846 q = priv->ring[tc_info->tqp_offset + j].tqp; 3847 hns3_write_dev(q, HNS3_RING_TX_RING_TC_REG, 3848 tc_info->tc); 3849 } 3850 } 3851 } 3852 3853 int hns3_init_all_ring(struct hns3_nic_priv *priv) 3854 { 3855 struct hnae3_handle *h = priv->ae_handle; 3856 int ring_num = h->kinfo.num_tqps * 2; 3857 int i, j; 3858 int ret; 3859 3860 for (i = 0; i < ring_num; i++) { 3861 ret = hns3_alloc_ring_memory(&priv->ring[i]); 3862 if (ret) { 3863 dev_err(priv->dev, 3864 "Alloc ring memory fail! ret=%d\n", ret); 3865 goto out_when_alloc_ring_memory; 3866 } 3867 3868 u64_stats_init(&priv->ring[i].syncp); 3869 } 3870 3871 return 0; 3872 3873 out_when_alloc_ring_memory: 3874 for (j = i - 1; j >= 0; j--) 3875 hns3_fini_ring(&priv->ring[j]); 3876 3877 return -ENOMEM; 3878 } 3879 3880 int hns3_uninit_all_ring(struct hns3_nic_priv *priv) 3881 { 3882 struct hnae3_handle *h = priv->ae_handle; 3883 int i; 3884 3885 for (i = 0; i < h->kinfo.num_tqps; i++) { 3886 hns3_fini_ring(&priv->ring[i]); 3887 hns3_fini_ring(&priv->ring[i + h->kinfo.num_tqps]); 3888 } 3889 return 0; 3890 } 3891 3892 /* Set mac addr if it is configured. or leave it to the AE driver */ 3893 static int hns3_init_mac_addr(struct net_device *netdev) 3894 { 3895 struct hns3_nic_priv *priv = netdev_priv(netdev); 3896 struct hnae3_handle *h = priv->ae_handle; 3897 u8 mac_addr_temp[ETH_ALEN]; 3898 int ret = 0; 3899 3900 if (h->ae_algo->ops->get_mac_addr) 3901 h->ae_algo->ops->get_mac_addr(h, mac_addr_temp); 3902 3903 /* Check if the MAC address is valid, if not get a random one */ 3904 if (!is_valid_ether_addr(mac_addr_temp)) { 3905 eth_hw_addr_random(netdev); 3906 dev_warn(priv->dev, "using random MAC address %pM\n", 3907 netdev->dev_addr); 3908 } else if (!ether_addr_equal(netdev->dev_addr, mac_addr_temp)) { 3909 ether_addr_copy(netdev->dev_addr, mac_addr_temp); 3910 ether_addr_copy(netdev->perm_addr, mac_addr_temp); 3911 } else { 3912 return 0; 3913 } 3914 3915 if (h->ae_algo->ops->set_mac_addr) 3916 ret = h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr, true); 3917 3918 return ret; 3919 } 3920 3921 static int hns3_init_phy(struct net_device *netdev) 3922 { 3923 struct hnae3_handle *h = hns3_get_handle(netdev); 3924 int ret = 0; 3925 3926 if (h->ae_algo->ops->mac_connect_phy) 3927 ret = h->ae_algo->ops->mac_connect_phy(h); 3928 3929 return ret; 3930 } 3931 3932 static void hns3_uninit_phy(struct net_device *netdev) 3933 { 3934 struct hnae3_handle *h = hns3_get_handle(netdev); 3935 3936 if (h->ae_algo->ops->mac_disconnect_phy) 3937 h->ae_algo->ops->mac_disconnect_phy(h); 3938 } 3939 3940 static void hns3_del_all_fd_rules(struct net_device *netdev, bool clear_list) 3941 { 3942 struct hnae3_handle *h = hns3_get_handle(netdev); 3943 3944 if (h->ae_algo->ops->del_all_fd_entries) 3945 h->ae_algo->ops->del_all_fd_entries(h, clear_list); 3946 } 3947 3948 static int hns3_client_start(struct hnae3_handle *handle) 3949 { 3950 if (!handle->ae_algo->ops->client_start) 3951 return 0; 3952 3953 return handle->ae_algo->ops->client_start(handle); 3954 } 3955 3956 static void hns3_client_stop(struct hnae3_handle *handle) 3957 { 3958 if (!handle->ae_algo->ops->client_stop) 3959 return; 3960 3961 handle->ae_algo->ops->client_stop(handle); 3962 } 3963 3964 static void hns3_info_show(struct hns3_nic_priv *priv) 3965 { 3966 struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo; 3967 3968 dev_info(priv->dev, "MAC address: %pM\n", priv->netdev->dev_addr); 3969 dev_info(priv->dev, "Task queue pairs numbers: %u\n", kinfo->num_tqps); 3970 dev_info(priv->dev, "RSS size: %u\n", kinfo->rss_size); 3971 dev_info(priv->dev, "Allocated RSS size: %u\n", kinfo->req_rss_size); 3972 dev_info(priv->dev, "RX buffer length: %u\n", kinfo->rx_buf_len); 3973 dev_info(priv->dev, "Desc num per TX queue: %u\n", kinfo->num_tx_desc); 3974 dev_info(priv->dev, "Desc num per RX queue: %u\n", kinfo->num_rx_desc); 3975 dev_info(priv->dev, "Total number of enabled TCs: %u\n", kinfo->num_tc); 3976 dev_info(priv->dev, "Max mtu size: %u\n", priv->netdev->max_mtu); 3977 } 3978 3979 static int hns3_client_init(struct hnae3_handle *handle) 3980 { 3981 struct pci_dev *pdev = handle->pdev; 3982 u16 alloc_tqps, max_rss_size; 3983 struct hns3_nic_priv *priv; 3984 struct net_device *netdev; 3985 int ret; 3986 3987 handle->ae_algo->ops->get_tqps_and_rss_info(handle, &alloc_tqps, 3988 &max_rss_size); 3989 netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv), alloc_tqps); 3990 if (!netdev) 3991 return -ENOMEM; 3992 3993 priv = netdev_priv(netdev); 3994 priv->dev = &pdev->dev; 3995 priv->netdev = netdev; 3996 priv->ae_handle = handle; 3997 priv->tx_timeout_count = 0; 3998 set_bit(HNS3_NIC_STATE_DOWN, &priv->state); 3999 4000 handle->msg_enable = netif_msg_init(debug, DEFAULT_MSG_LEVEL); 4001 4002 handle->kinfo.netdev = netdev; 4003 handle->priv = (void *)priv; 4004 4005 hns3_init_mac_addr(netdev); 4006 4007 hns3_set_default_feature(netdev); 4008 4009 netdev->watchdog_timeo = HNS3_TX_TIMEOUT; 4010 netdev->priv_flags |= IFF_UNICAST_FLT; 4011 netdev->netdev_ops = &hns3_nic_netdev_ops; 4012 SET_NETDEV_DEV(netdev, &pdev->dev); 4013 hns3_ethtool_set_ops(netdev); 4014 4015 /* Carrier off reporting is important to ethtool even BEFORE open */ 4016 netif_carrier_off(netdev); 4017 4018 ret = hns3_get_ring_config(priv); 4019 if (ret) { 4020 ret = -ENOMEM; 4021 goto out_get_ring_cfg; 4022 } 4023 4024 ret = hns3_nic_alloc_vector_data(priv); 4025 if (ret) { 4026 ret = -ENOMEM; 4027 goto out_alloc_vector_data; 4028 } 4029 4030 ret = hns3_nic_init_vector_data(priv); 4031 if (ret) { 4032 ret = -ENOMEM; 4033 goto out_init_vector_data; 4034 } 4035 4036 ret = hns3_init_all_ring(priv); 4037 if (ret) { 4038 ret = -ENOMEM; 4039 goto out_init_ring; 4040 } 4041 4042 ret = hns3_init_phy(netdev); 4043 if (ret) 4044 goto out_init_phy; 4045 4046 ret = register_netdev(netdev); 4047 if (ret) { 4048 dev_err(priv->dev, "probe register netdev fail!\n"); 4049 goto out_reg_netdev_fail; 4050 } 4051 4052 /* the device can work without cpu rmap, only aRFS needs it */ 4053 ret = hns3_set_rx_cpu_rmap(netdev); 4054 if (ret) 4055 dev_warn(priv->dev, "set rx cpu rmap fail, ret=%d\n", ret); 4056 4057 ret = hns3_nic_init_irq(priv); 4058 if (ret) { 4059 dev_err(priv->dev, "init irq failed! ret=%d\n", ret); 4060 hns3_free_rx_cpu_rmap(netdev); 4061 goto out_init_irq_fail; 4062 } 4063 4064 ret = hns3_client_start(handle); 4065 if (ret) { 4066 dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret); 4067 goto out_client_start; 4068 } 4069 4070 hns3_dcbnl_setup(handle); 4071 4072 hns3_dbg_init(handle); 4073 4074 /* MTU range: (ETH_MIN_MTU(kernel default) - 9702) */ 4075 netdev->max_mtu = HNS3_MAX_MTU; 4076 4077 set_bit(HNS3_NIC_STATE_INITED, &priv->state); 4078 4079 if (netif_msg_drv(handle)) 4080 hns3_info_show(priv); 4081 4082 return ret; 4083 4084 out_client_start: 4085 hns3_free_rx_cpu_rmap(netdev); 4086 hns3_nic_uninit_irq(priv); 4087 out_init_irq_fail: 4088 unregister_netdev(netdev); 4089 out_reg_netdev_fail: 4090 hns3_uninit_phy(netdev); 4091 out_init_phy: 4092 hns3_uninit_all_ring(priv); 4093 out_init_ring: 4094 hns3_nic_uninit_vector_data(priv); 4095 out_init_vector_data: 4096 hns3_nic_dealloc_vector_data(priv); 4097 out_alloc_vector_data: 4098 priv->ring = NULL; 4099 out_get_ring_cfg: 4100 priv->ae_handle = NULL; 4101 free_netdev(netdev); 4102 return ret; 4103 } 4104 4105 static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) 4106 { 4107 struct net_device *netdev = handle->kinfo.netdev; 4108 struct hns3_nic_priv *priv = netdev_priv(netdev); 4109 int ret; 4110 4111 if (netdev->reg_state != NETREG_UNINITIALIZED) 4112 unregister_netdev(netdev); 4113 4114 hns3_client_stop(handle); 4115 4116 hns3_uninit_phy(netdev); 4117 4118 if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) { 4119 netdev_warn(netdev, "already uninitialized\n"); 4120 goto out_netdev_free; 4121 } 4122 4123 hns3_free_rx_cpu_rmap(netdev); 4124 4125 hns3_nic_uninit_irq(priv); 4126 4127 hns3_del_all_fd_rules(netdev, true); 4128 4129 hns3_clear_all_ring(handle, true); 4130 4131 hns3_nic_uninit_vector_data(priv); 4132 4133 hns3_nic_dealloc_vector_data(priv); 4134 4135 ret = hns3_uninit_all_ring(priv); 4136 if (ret) 4137 netdev_err(netdev, "uninit ring error\n"); 4138 4139 hns3_put_ring_config(priv); 4140 4141 out_netdev_free: 4142 hns3_dbg_uninit(handle); 4143 free_netdev(netdev); 4144 } 4145 4146 static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup) 4147 { 4148 struct net_device *netdev = handle->kinfo.netdev; 4149 4150 if (!netdev) 4151 return; 4152 4153 if (linkup) { 4154 netif_tx_wake_all_queues(netdev); 4155 netif_carrier_on(netdev); 4156 if (netif_msg_link(handle)) 4157 netdev_info(netdev, "link up\n"); 4158 } else { 4159 netif_carrier_off(netdev); 4160 netif_tx_stop_all_queues(netdev); 4161 if (netif_msg_link(handle)) 4162 netdev_info(netdev, "link down\n"); 4163 } 4164 } 4165 4166 static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc) 4167 { 4168 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 4169 struct net_device *ndev = kinfo->netdev; 4170 4171 if (tc > HNAE3_MAX_TC) 4172 return -EINVAL; 4173 4174 if (!ndev) 4175 return -ENODEV; 4176 4177 return hns3_nic_set_real_num_queue(ndev); 4178 } 4179 4180 static void hns3_clear_tx_ring(struct hns3_enet_ring *ring) 4181 { 4182 while (ring->next_to_clean != ring->next_to_use) { 4183 ring->desc[ring->next_to_clean].tx.bdtp_fe_sc_vld_ra_ri = 0; 4184 hns3_free_buffer_detach(ring, ring->next_to_clean); 4185 ring_ptr_move_fw(ring, next_to_clean); 4186 } 4187 } 4188 4189 static int hns3_clear_rx_ring(struct hns3_enet_ring *ring) 4190 { 4191 struct hns3_desc_cb res_cbs; 4192 int ret; 4193 4194 while (ring->next_to_use != ring->next_to_clean) { 4195 /* When a buffer is not reused, it's memory has been 4196 * freed in hns3_handle_rx_bd or will be freed by 4197 * stack, so we need to replace the buffer here. 4198 */ 4199 if (!ring->desc_cb[ring->next_to_use].reuse_flag) { 4200 ret = hns3_alloc_and_map_buffer(ring, &res_cbs); 4201 if (ret) { 4202 u64_stats_update_begin(&ring->syncp); 4203 ring->stats.sw_err_cnt++; 4204 u64_stats_update_end(&ring->syncp); 4205 /* if alloc new buffer fail, exit directly 4206 * and reclear in up flow. 4207 */ 4208 netdev_warn(ring_to_netdev(ring), 4209 "reserve buffer map failed, ret = %d\n", 4210 ret); 4211 return ret; 4212 } 4213 hns3_replace_buffer(ring, ring->next_to_use, &res_cbs); 4214 } 4215 ring_ptr_move_fw(ring, next_to_use); 4216 } 4217 4218 /* Free the pending skb in rx ring */ 4219 if (ring->skb) { 4220 dev_kfree_skb_any(ring->skb); 4221 ring->skb = NULL; 4222 ring->pending_buf = 0; 4223 } 4224 4225 return 0; 4226 } 4227 4228 static void hns3_force_clear_rx_ring(struct hns3_enet_ring *ring) 4229 { 4230 while (ring->next_to_use != ring->next_to_clean) { 4231 /* When a buffer is not reused, it's memory has been 4232 * freed in hns3_handle_rx_bd or will be freed by 4233 * stack, so only need to unmap the buffer here. 4234 */ 4235 if (!ring->desc_cb[ring->next_to_use].reuse_flag) { 4236 hns3_unmap_buffer(ring, 4237 &ring->desc_cb[ring->next_to_use]); 4238 ring->desc_cb[ring->next_to_use].dma = 0; 4239 } 4240 4241 ring_ptr_move_fw(ring, next_to_use); 4242 } 4243 } 4244 4245 static void hns3_clear_all_ring(struct hnae3_handle *h, bool force) 4246 { 4247 struct net_device *ndev = h->kinfo.netdev; 4248 struct hns3_nic_priv *priv = netdev_priv(ndev); 4249 u32 i; 4250 4251 for (i = 0; i < h->kinfo.num_tqps; i++) { 4252 struct hns3_enet_ring *ring; 4253 4254 ring = &priv->ring[i]; 4255 hns3_clear_tx_ring(ring); 4256 4257 ring = &priv->ring[i + h->kinfo.num_tqps]; 4258 /* Continue to clear other rings even if clearing some 4259 * rings failed. 4260 */ 4261 if (force) 4262 hns3_force_clear_rx_ring(ring); 4263 else 4264 hns3_clear_rx_ring(ring); 4265 } 4266 } 4267 4268 int hns3_nic_reset_all_ring(struct hnae3_handle *h) 4269 { 4270 struct net_device *ndev = h->kinfo.netdev; 4271 struct hns3_nic_priv *priv = netdev_priv(ndev); 4272 struct hns3_enet_ring *rx_ring; 4273 int i, j; 4274 int ret; 4275 4276 for (i = 0; i < h->kinfo.num_tqps; i++) { 4277 ret = h->ae_algo->ops->reset_queue(h, i); 4278 if (ret) 4279 return ret; 4280 4281 hns3_init_ring_hw(&priv->ring[i]); 4282 4283 /* We need to clear tx ring here because self test will 4284 * use the ring and will not run down before up 4285 */ 4286 hns3_clear_tx_ring(&priv->ring[i]); 4287 priv->ring[i].next_to_clean = 0; 4288 priv->ring[i].next_to_use = 0; 4289 4290 rx_ring = &priv->ring[i + h->kinfo.num_tqps]; 4291 hns3_init_ring_hw(rx_ring); 4292 ret = hns3_clear_rx_ring(rx_ring); 4293 if (ret) 4294 return ret; 4295 4296 /* We can not know the hardware head and tail when this 4297 * function is called in reset flow, so we reuse all desc. 4298 */ 4299 for (j = 0; j < rx_ring->desc_num; j++) 4300 hns3_reuse_buffer(rx_ring, j); 4301 4302 rx_ring->next_to_clean = 0; 4303 rx_ring->next_to_use = 0; 4304 } 4305 4306 hns3_init_tx_ring_tc(priv); 4307 4308 return 0; 4309 } 4310 4311 static void hns3_store_coal(struct hns3_nic_priv *priv) 4312 { 4313 /* ethtool only support setting and querying one coal 4314 * configuration for now, so save the vector 0' coal 4315 * configuration here in order to restore it. 4316 */ 4317 memcpy(&priv->tx_coal, &priv->tqp_vector[0].tx_group.coal, 4318 sizeof(struct hns3_enet_coalesce)); 4319 memcpy(&priv->rx_coal, &priv->tqp_vector[0].rx_group.coal, 4320 sizeof(struct hns3_enet_coalesce)); 4321 } 4322 4323 static void hns3_restore_coal(struct hns3_nic_priv *priv) 4324 { 4325 u16 vector_num = priv->vector_num; 4326 int i; 4327 4328 for (i = 0; i < vector_num; i++) { 4329 memcpy(&priv->tqp_vector[i].tx_group.coal, &priv->tx_coal, 4330 sizeof(struct hns3_enet_coalesce)); 4331 memcpy(&priv->tqp_vector[i].rx_group.coal, &priv->rx_coal, 4332 sizeof(struct hns3_enet_coalesce)); 4333 } 4334 } 4335 4336 static int hns3_reset_notify_down_enet(struct hnae3_handle *handle) 4337 { 4338 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 4339 struct net_device *ndev = kinfo->netdev; 4340 struct hns3_nic_priv *priv = netdev_priv(ndev); 4341 4342 if (test_and_set_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) 4343 return 0; 4344 4345 if (!netif_running(ndev)) 4346 return 0; 4347 4348 return hns3_nic_net_stop(ndev); 4349 } 4350 4351 static int hns3_reset_notify_up_enet(struct hnae3_handle *handle) 4352 { 4353 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 4354 struct hns3_nic_priv *priv = netdev_priv(kinfo->netdev); 4355 int ret = 0; 4356 4357 clear_bit(HNS3_NIC_STATE_RESETTING, &priv->state); 4358 4359 if (netif_running(kinfo->netdev)) { 4360 ret = hns3_nic_net_open(kinfo->netdev); 4361 if (ret) { 4362 set_bit(HNS3_NIC_STATE_RESETTING, &priv->state); 4363 netdev_err(kinfo->netdev, 4364 "net up fail, ret=%d!\n", ret); 4365 return ret; 4366 } 4367 } 4368 4369 return ret; 4370 } 4371 4372 static int hns3_reset_notify_init_enet(struct hnae3_handle *handle) 4373 { 4374 struct net_device *netdev = handle->kinfo.netdev; 4375 struct hns3_nic_priv *priv = netdev_priv(netdev); 4376 int ret; 4377 4378 /* Carrier off reporting is important to ethtool even BEFORE open */ 4379 netif_carrier_off(netdev); 4380 4381 ret = hns3_get_ring_config(priv); 4382 if (ret) 4383 return ret; 4384 4385 ret = hns3_nic_alloc_vector_data(priv); 4386 if (ret) 4387 goto err_put_ring; 4388 4389 hns3_restore_coal(priv); 4390 4391 ret = hns3_nic_init_vector_data(priv); 4392 if (ret) 4393 goto err_dealloc_vector; 4394 4395 ret = hns3_init_all_ring(priv); 4396 if (ret) 4397 goto err_uninit_vector; 4398 4399 /* the device can work without cpu rmap, only aRFS needs it */ 4400 ret = hns3_set_rx_cpu_rmap(netdev); 4401 if (ret) 4402 dev_warn(priv->dev, "set rx cpu rmap fail, ret=%d\n", ret); 4403 4404 ret = hns3_nic_init_irq(priv); 4405 if (ret) { 4406 dev_err(priv->dev, "init irq failed! ret=%d\n", ret); 4407 hns3_free_rx_cpu_rmap(netdev); 4408 goto err_init_irq_fail; 4409 } 4410 4411 if (!hns3_is_phys_func(handle->pdev)) 4412 hns3_init_mac_addr(netdev); 4413 4414 ret = hns3_client_start(handle); 4415 if (ret) { 4416 dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret); 4417 goto err_client_start_fail; 4418 } 4419 4420 set_bit(HNS3_NIC_STATE_INITED, &priv->state); 4421 4422 return ret; 4423 4424 err_client_start_fail: 4425 hns3_free_rx_cpu_rmap(netdev); 4426 hns3_nic_uninit_irq(priv); 4427 err_init_irq_fail: 4428 hns3_uninit_all_ring(priv); 4429 err_uninit_vector: 4430 hns3_nic_uninit_vector_data(priv); 4431 err_dealloc_vector: 4432 hns3_nic_dealloc_vector_data(priv); 4433 err_put_ring: 4434 hns3_put_ring_config(priv); 4435 4436 return ret; 4437 } 4438 4439 static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) 4440 { 4441 struct net_device *netdev = handle->kinfo.netdev; 4442 struct hns3_nic_priv *priv = netdev_priv(netdev); 4443 int ret; 4444 4445 if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) { 4446 netdev_warn(netdev, "already uninitialized\n"); 4447 return 0; 4448 } 4449 4450 hns3_free_rx_cpu_rmap(netdev); 4451 hns3_nic_uninit_irq(priv); 4452 hns3_clear_all_ring(handle, true); 4453 hns3_reset_tx_queue(priv->ae_handle); 4454 4455 hns3_nic_uninit_vector_data(priv); 4456 4457 hns3_store_coal(priv); 4458 4459 hns3_nic_dealloc_vector_data(priv); 4460 4461 ret = hns3_uninit_all_ring(priv); 4462 if (ret) 4463 netdev_err(netdev, "uninit ring error\n"); 4464 4465 hns3_put_ring_config(priv); 4466 4467 return ret; 4468 } 4469 4470 static int hns3_reset_notify(struct hnae3_handle *handle, 4471 enum hnae3_reset_notify_type type) 4472 { 4473 int ret = 0; 4474 4475 switch (type) { 4476 case HNAE3_UP_CLIENT: 4477 ret = hns3_reset_notify_up_enet(handle); 4478 break; 4479 case HNAE3_DOWN_CLIENT: 4480 ret = hns3_reset_notify_down_enet(handle); 4481 break; 4482 case HNAE3_INIT_CLIENT: 4483 ret = hns3_reset_notify_init_enet(handle); 4484 break; 4485 case HNAE3_UNINIT_CLIENT: 4486 ret = hns3_reset_notify_uninit_enet(handle); 4487 break; 4488 default: 4489 break; 4490 } 4491 4492 return ret; 4493 } 4494 4495 static int hns3_change_channels(struct hnae3_handle *handle, u32 new_tqp_num, 4496 bool rxfh_configured) 4497 { 4498 int ret; 4499 4500 ret = handle->ae_algo->ops->set_channels(handle, new_tqp_num, 4501 rxfh_configured); 4502 if (ret) { 4503 dev_err(&handle->pdev->dev, 4504 "Change tqp num(%u) fail.\n", new_tqp_num); 4505 return ret; 4506 } 4507 4508 ret = hns3_reset_notify(handle, HNAE3_INIT_CLIENT); 4509 if (ret) 4510 return ret; 4511 4512 ret = hns3_reset_notify(handle, HNAE3_UP_CLIENT); 4513 if (ret) 4514 hns3_reset_notify(handle, HNAE3_UNINIT_CLIENT); 4515 4516 return ret; 4517 } 4518 4519 int hns3_set_channels(struct net_device *netdev, 4520 struct ethtool_channels *ch) 4521 { 4522 struct hnae3_handle *h = hns3_get_handle(netdev); 4523 struct hnae3_knic_private_info *kinfo = &h->kinfo; 4524 bool rxfh_configured = netif_is_rxfh_configured(netdev); 4525 u32 new_tqp_num = ch->combined_count; 4526 u16 org_tqp_num; 4527 int ret; 4528 4529 if (hns3_nic_resetting(netdev)) 4530 return -EBUSY; 4531 4532 if (ch->rx_count || ch->tx_count) 4533 return -EINVAL; 4534 4535 if (new_tqp_num > hns3_get_max_available_channels(h) || 4536 new_tqp_num < 1) { 4537 dev_err(&netdev->dev, 4538 "Change tqps fail, the tqp range is from 1 to %u", 4539 hns3_get_max_available_channels(h)); 4540 return -EINVAL; 4541 } 4542 4543 if (kinfo->rss_size == new_tqp_num) 4544 return 0; 4545 4546 netif_dbg(h, drv, netdev, 4547 "set channels: tqp_num=%u, rxfh=%d\n", 4548 new_tqp_num, rxfh_configured); 4549 4550 ret = hns3_reset_notify(h, HNAE3_DOWN_CLIENT); 4551 if (ret) 4552 return ret; 4553 4554 ret = hns3_reset_notify(h, HNAE3_UNINIT_CLIENT); 4555 if (ret) 4556 return ret; 4557 4558 org_tqp_num = h->kinfo.num_tqps; 4559 ret = hns3_change_channels(h, new_tqp_num, rxfh_configured); 4560 if (ret) { 4561 int ret1; 4562 4563 netdev_warn(netdev, 4564 "Change channels fail, revert to old value\n"); 4565 ret1 = hns3_change_channels(h, org_tqp_num, rxfh_configured); 4566 if (ret1) { 4567 netdev_err(netdev, 4568 "revert to old channel fail\n"); 4569 return ret1; 4570 } 4571 4572 return ret; 4573 } 4574 4575 return 0; 4576 } 4577 4578 static const struct hns3_hw_error_info hns3_hw_err[] = { 4579 { .type = HNAE3_PPU_POISON_ERROR, 4580 .msg = "PPU poison" }, 4581 { .type = HNAE3_CMDQ_ECC_ERROR, 4582 .msg = "IMP CMDQ error" }, 4583 { .type = HNAE3_IMP_RD_POISON_ERROR, 4584 .msg = "IMP RD poison" }, 4585 }; 4586 4587 static void hns3_process_hw_error(struct hnae3_handle *handle, 4588 enum hnae3_hw_error_type type) 4589 { 4590 int i; 4591 4592 for (i = 0; i < ARRAY_SIZE(hns3_hw_err); i++) { 4593 if (hns3_hw_err[i].type == type) { 4594 dev_err(&handle->pdev->dev, "Detected %s!\n", 4595 hns3_hw_err[i].msg); 4596 break; 4597 } 4598 } 4599 } 4600 4601 static const struct hnae3_client_ops client_ops = { 4602 .init_instance = hns3_client_init, 4603 .uninit_instance = hns3_client_uninit, 4604 .link_status_change = hns3_link_status_change, 4605 .setup_tc = hns3_client_setup_tc, 4606 .reset_notify = hns3_reset_notify, 4607 .process_hw_error = hns3_process_hw_error, 4608 }; 4609 4610 /* hns3_init_module - Driver registration routine 4611 * hns3_init_module is the first routine called when the driver is 4612 * loaded. All it does is register with the PCI subsystem. 4613 */ 4614 static int __init hns3_init_module(void) 4615 { 4616 int ret; 4617 4618 pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string); 4619 pr_info("%s: %s\n", hns3_driver_name, hns3_copyright); 4620 4621 client.type = HNAE3_CLIENT_KNIC; 4622 snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH, "%s", 4623 hns3_driver_name); 4624 4625 client.ops = &client_ops; 4626 4627 INIT_LIST_HEAD(&client.node); 4628 4629 hns3_dbg_register_debugfs(hns3_driver_name); 4630 4631 ret = hnae3_register_client(&client); 4632 if (ret) 4633 goto err_reg_client; 4634 4635 ret = pci_register_driver(&hns3_driver); 4636 if (ret) 4637 goto err_reg_driver; 4638 4639 return ret; 4640 4641 err_reg_driver: 4642 hnae3_unregister_client(&client); 4643 err_reg_client: 4644 hns3_dbg_unregister_debugfs(); 4645 return ret; 4646 } 4647 module_init(hns3_init_module); 4648 4649 /* hns3_exit_module - Driver exit cleanup routine 4650 * hns3_exit_module is called just before the driver is removed 4651 * from memory. 4652 */ 4653 static void __exit hns3_exit_module(void) 4654 { 4655 pci_unregister_driver(&hns3_driver); 4656 hnae3_unregister_client(&client); 4657 hns3_dbg_unregister_debugfs(); 4658 } 4659 module_exit(hns3_exit_module); 4660 4661 MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver"); 4662 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 4663 MODULE_LICENSE("GPL"); 4664 MODULE_ALIAS("pci:hns-nic"); 4665