1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/dma-mapping.h> 5 #include <linux/etherdevice.h> 6 #include <linux/interrupt.h> 7 #ifdef CONFIG_RFS_ACCEL 8 #include <linux/cpu_rmap.h> 9 #endif 10 #include <linux/if_vlan.h> 11 #include <linux/irq.h> 12 #include <linux/ip.h> 13 #include <linux/ipv6.h> 14 #include <linux/module.h> 15 #include <linux/pci.h> 16 #include <linux/aer.h> 17 #include <linux/skbuff.h> 18 #include <linux/sctp.h> 19 #include <net/gre.h> 20 #include <net/ip6_checksum.h> 21 #include <net/pkt_cls.h> 22 #include <net/tcp.h> 23 #include <net/vxlan.h> 24 #include <net/geneve.h> 25 26 #include "hnae3.h" 27 #include "hns3_enet.h" 28 /* All hns3 tracepoints are defined by the include below, which 29 * must be included exactly once across the whole kernel with 30 * CREATE_TRACE_POINTS defined 31 */ 32 #define CREATE_TRACE_POINTS 33 #include "hns3_trace.h" 34 35 #define hns3_set_field(origin, shift, val) ((origin) |= (val) << (shift)) 36 #define hns3_tx_bd_count(S) DIV_ROUND_UP(S, HNS3_MAX_BD_SIZE) 37 38 #define hns3_rl_err(fmt, ...) \ 39 do { \ 40 if (net_ratelimit()) \ 41 netdev_err(fmt, ##__VA_ARGS__); \ 42 } while (0) 43 44 static void hns3_clear_all_ring(struct hnae3_handle *h, bool force); 45 46 static const char hns3_driver_name[] = "hns3"; 47 static const char hns3_driver_string[] = 48 "Hisilicon Ethernet Network Driver for Hip08 Family"; 49 static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation."; 50 static struct hnae3_client client; 51 52 static int debug = -1; 53 module_param(debug, int, 0); 54 MODULE_PARM_DESC(debug, " Network interface message level setting"); 55 56 #define DEFAULT_MSG_LEVEL (NETIF_MSG_PROBE | NETIF_MSG_LINK | \ 57 NETIF_MSG_IFDOWN | NETIF_MSG_IFUP) 58 59 #define HNS3_INNER_VLAN_TAG 1 60 #define HNS3_OUTER_VLAN_TAG 2 61 62 #define HNS3_MIN_TX_LEN 33U 63 64 /* hns3_pci_tbl - PCI Device ID Table 65 * 66 * Last entry must be all 0s 67 * 68 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 69 * Class, Class Mask, private data (not used) } 70 */ 71 static const struct pci_device_id hns3_pci_tbl[] = { 72 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0}, 73 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0}, 74 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 75 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 76 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 77 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 78 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 79 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 80 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 81 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 82 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 83 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 84 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 85 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 86 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_VF), 0}, 87 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF), 88 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 89 /* required last entry */ 90 {0, } 91 }; 92 MODULE_DEVICE_TABLE(pci, hns3_pci_tbl); 93 94 static irqreturn_t hns3_irq_handle(int irq, void *vector) 95 { 96 struct hns3_enet_tqp_vector *tqp_vector = vector; 97 98 napi_schedule_irqoff(&tqp_vector->napi); 99 100 return IRQ_HANDLED; 101 } 102 103 static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv) 104 { 105 struct hns3_enet_tqp_vector *tqp_vectors; 106 unsigned int i; 107 108 for (i = 0; i < priv->vector_num; i++) { 109 tqp_vectors = &priv->tqp_vector[i]; 110 111 if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED) 112 continue; 113 114 /* clear the affinity mask */ 115 irq_set_affinity_hint(tqp_vectors->vector_irq, NULL); 116 117 /* release the irq resource */ 118 free_irq(tqp_vectors->vector_irq, tqp_vectors); 119 tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED; 120 } 121 } 122 123 static int hns3_nic_init_irq(struct hns3_nic_priv *priv) 124 { 125 struct hns3_enet_tqp_vector *tqp_vectors; 126 int txrx_int_idx = 0; 127 int rx_int_idx = 0; 128 int tx_int_idx = 0; 129 unsigned int i; 130 int ret; 131 132 for (i = 0; i < priv->vector_num; i++) { 133 tqp_vectors = &priv->tqp_vector[i]; 134 135 if (tqp_vectors->irq_init_flag == HNS3_VECTOR_INITED) 136 continue; 137 138 if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) { 139 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN, 140 "%s-%s-%s-%d", hns3_driver_name, 141 pci_name(priv->ae_handle->pdev), 142 "TxRx", txrx_int_idx++); 143 txrx_int_idx++; 144 } else if (tqp_vectors->rx_group.ring) { 145 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN, 146 "%s-%s-%s-%d", hns3_driver_name, 147 pci_name(priv->ae_handle->pdev), 148 "Rx", rx_int_idx++); 149 } else if (tqp_vectors->tx_group.ring) { 150 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN, 151 "%s-%s-%s-%d", hns3_driver_name, 152 pci_name(priv->ae_handle->pdev), 153 "Tx", tx_int_idx++); 154 } else { 155 /* Skip this unused q_vector */ 156 continue; 157 } 158 159 tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0'; 160 161 irq_set_status_flags(tqp_vectors->vector_irq, IRQ_NOAUTOEN); 162 ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0, 163 tqp_vectors->name, tqp_vectors); 164 if (ret) { 165 netdev_err(priv->netdev, "request irq(%d) fail\n", 166 tqp_vectors->vector_irq); 167 hns3_nic_uninit_irq(priv); 168 return ret; 169 } 170 171 irq_set_affinity_hint(tqp_vectors->vector_irq, 172 &tqp_vectors->affinity_mask); 173 174 tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED; 175 } 176 177 return 0; 178 } 179 180 static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector, 181 u32 mask_en) 182 { 183 writel(mask_en, tqp_vector->mask_addr); 184 } 185 186 static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector) 187 { 188 napi_enable(&tqp_vector->napi); 189 enable_irq(tqp_vector->vector_irq); 190 191 /* enable vector */ 192 hns3_mask_vector_irq(tqp_vector, 1); 193 } 194 195 static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector) 196 { 197 /* disable vector */ 198 hns3_mask_vector_irq(tqp_vector, 0); 199 200 disable_irq(tqp_vector->vector_irq); 201 napi_disable(&tqp_vector->napi); 202 } 203 204 void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector, 205 u32 rl_value) 206 { 207 u32 rl_reg = hns3_rl_usec_to_reg(rl_value); 208 209 /* this defines the configuration for RL (Interrupt Rate Limiter). 210 * Rl defines rate of interrupts i.e. number of interrupts-per-second 211 * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing 212 */ 213 214 if (rl_reg > 0 && !tqp_vector->tx_group.coal.adapt_enable && 215 !tqp_vector->rx_group.coal.adapt_enable) 216 /* According to the hardware, the range of rl_reg is 217 * 0-59 and the unit is 4. 218 */ 219 rl_reg |= HNS3_INT_RL_ENABLE_MASK; 220 221 writel(rl_reg, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET); 222 } 223 224 void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector, 225 u32 gl_value) 226 { 227 u32 new_val; 228 229 if (tqp_vector->rx_group.coal.unit_1us) 230 new_val = gl_value | HNS3_INT_GL_1US; 231 else 232 new_val = hns3_gl_usec_to_reg(gl_value); 233 234 writel(new_val, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET); 235 } 236 237 void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector, 238 u32 gl_value) 239 { 240 u32 new_val; 241 242 if (tqp_vector->tx_group.coal.unit_1us) 243 new_val = gl_value | HNS3_INT_GL_1US; 244 else 245 new_val = hns3_gl_usec_to_reg(gl_value); 246 247 writel(new_val, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET); 248 } 249 250 void hns3_set_vector_coalesce_tx_ql(struct hns3_enet_tqp_vector *tqp_vector, 251 u32 ql_value) 252 { 253 writel(ql_value, tqp_vector->mask_addr + HNS3_VECTOR_TX_QL_OFFSET); 254 } 255 256 void hns3_set_vector_coalesce_rx_ql(struct hns3_enet_tqp_vector *tqp_vector, 257 u32 ql_value) 258 { 259 writel(ql_value, tqp_vector->mask_addr + HNS3_VECTOR_RX_QL_OFFSET); 260 } 261 262 static void hns3_vector_coalesce_init(struct hns3_enet_tqp_vector *tqp_vector, 263 struct hns3_nic_priv *priv) 264 { 265 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev); 266 struct hns3_enet_coalesce *tx_coal = &tqp_vector->tx_group.coal; 267 struct hns3_enet_coalesce *rx_coal = &tqp_vector->rx_group.coal; 268 269 /* initialize the configuration for interrupt coalescing. 270 * 1. GL (Interrupt Gap Limiter) 271 * 2. RL (Interrupt Rate Limiter) 272 * 3. QL (Interrupt Quantity Limiter) 273 * 274 * Default: enable interrupt coalescing self-adaptive and GL 275 */ 276 tx_coal->adapt_enable = 1; 277 rx_coal->adapt_enable = 1; 278 279 tx_coal->int_gl = HNS3_INT_GL_50K; 280 rx_coal->int_gl = HNS3_INT_GL_50K; 281 282 rx_coal->flow_level = HNS3_FLOW_LOW; 283 tx_coal->flow_level = HNS3_FLOW_LOW; 284 285 /* device version above V3(include V3), GL can configure 1us 286 * unit, so uses 1us unit. 287 */ 288 if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) { 289 tx_coal->unit_1us = 1; 290 rx_coal->unit_1us = 1; 291 } 292 293 if (ae_dev->dev_specs.int_ql_max) { 294 tx_coal->ql_enable = 1; 295 rx_coal->ql_enable = 1; 296 tx_coal->int_ql_max = ae_dev->dev_specs.int_ql_max; 297 rx_coal->int_ql_max = ae_dev->dev_specs.int_ql_max; 298 tx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG; 299 rx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG; 300 } 301 } 302 303 static void 304 hns3_vector_coalesce_init_hw(struct hns3_enet_tqp_vector *tqp_vector, 305 struct hns3_nic_priv *priv) 306 { 307 struct hns3_enet_coalesce *tx_coal = &tqp_vector->tx_group.coal; 308 struct hns3_enet_coalesce *rx_coal = &tqp_vector->rx_group.coal; 309 struct hnae3_handle *h = priv->ae_handle; 310 311 hns3_set_vector_coalesce_tx_gl(tqp_vector, tx_coal->int_gl); 312 hns3_set_vector_coalesce_rx_gl(tqp_vector, rx_coal->int_gl); 313 hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting); 314 315 if (tx_coal->ql_enable) 316 hns3_set_vector_coalesce_tx_ql(tqp_vector, tx_coal->int_ql); 317 318 if (rx_coal->ql_enable) 319 hns3_set_vector_coalesce_rx_ql(tqp_vector, rx_coal->int_ql); 320 } 321 322 static int hns3_nic_set_real_num_queue(struct net_device *netdev) 323 { 324 struct hnae3_handle *h = hns3_get_handle(netdev); 325 struct hnae3_knic_private_info *kinfo = &h->kinfo; 326 struct hnae3_tc_info *tc_info = &kinfo->tc_info; 327 unsigned int queue_size = kinfo->num_tqps; 328 int i, ret; 329 330 if (tc_info->num_tc <= 1 && !tc_info->mqprio_active) { 331 netdev_reset_tc(netdev); 332 } else { 333 ret = netdev_set_num_tc(netdev, tc_info->num_tc); 334 if (ret) { 335 netdev_err(netdev, 336 "netdev_set_num_tc fail, ret=%d!\n", ret); 337 return ret; 338 } 339 340 for (i = 0; i < HNAE3_MAX_TC; i++) { 341 if (!test_bit(i, &tc_info->tc_en)) 342 continue; 343 344 netdev_set_tc_queue(netdev, i, tc_info->tqp_count[i], 345 tc_info->tqp_offset[i]); 346 } 347 } 348 349 ret = netif_set_real_num_tx_queues(netdev, queue_size); 350 if (ret) { 351 netdev_err(netdev, 352 "netif_set_real_num_tx_queues fail, ret=%d!\n", ret); 353 return ret; 354 } 355 356 ret = netif_set_real_num_rx_queues(netdev, queue_size); 357 if (ret) { 358 netdev_err(netdev, 359 "netif_set_real_num_rx_queues fail, ret=%d!\n", ret); 360 return ret; 361 } 362 363 return 0; 364 } 365 366 static u16 hns3_get_max_available_channels(struct hnae3_handle *h) 367 { 368 u16 alloc_tqps, max_rss_size, rss_size; 369 370 h->ae_algo->ops->get_tqps_and_rss_info(h, &alloc_tqps, &max_rss_size); 371 rss_size = alloc_tqps / h->kinfo.tc_info.num_tc; 372 373 return min_t(u16, rss_size, max_rss_size); 374 } 375 376 static void hns3_tqp_enable(struct hnae3_queue *tqp) 377 { 378 u32 rcb_reg; 379 380 rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG); 381 rcb_reg |= BIT(HNS3_RING_EN_B); 382 hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg); 383 } 384 385 static void hns3_tqp_disable(struct hnae3_queue *tqp) 386 { 387 u32 rcb_reg; 388 389 rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG); 390 rcb_reg &= ~BIT(HNS3_RING_EN_B); 391 hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg); 392 } 393 394 static void hns3_free_rx_cpu_rmap(struct net_device *netdev) 395 { 396 #ifdef CONFIG_RFS_ACCEL 397 free_irq_cpu_rmap(netdev->rx_cpu_rmap); 398 netdev->rx_cpu_rmap = NULL; 399 #endif 400 } 401 402 static int hns3_set_rx_cpu_rmap(struct net_device *netdev) 403 { 404 #ifdef CONFIG_RFS_ACCEL 405 struct hns3_nic_priv *priv = netdev_priv(netdev); 406 struct hns3_enet_tqp_vector *tqp_vector; 407 int i, ret; 408 409 if (!netdev->rx_cpu_rmap) { 410 netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->vector_num); 411 if (!netdev->rx_cpu_rmap) 412 return -ENOMEM; 413 } 414 415 for (i = 0; i < priv->vector_num; i++) { 416 tqp_vector = &priv->tqp_vector[i]; 417 ret = irq_cpu_rmap_add(netdev->rx_cpu_rmap, 418 tqp_vector->vector_irq); 419 if (ret) { 420 hns3_free_rx_cpu_rmap(netdev); 421 return ret; 422 } 423 } 424 #endif 425 return 0; 426 } 427 428 static int hns3_nic_net_up(struct net_device *netdev) 429 { 430 struct hns3_nic_priv *priv = netdev_priv(netdev); 431 struct hnae3_handle *h = priv->ae_handle; 432 int i, j; 433 int ret; 434 435 ret = hns3_nic_reset_all_ring(h); 436 if (ret) 437 return ret; 438 439 clear_bit(HNS3_NIC_STATE_DOWN, &priv->state); 440 441 /* enable the vectors */ 442 for (i = 0; i < priv->vector_num; i++) 443 hns3_vector_enable(&priv->tqp_vector[i]); 444 445 /* enable rcb */ 446 for (j = 0; j < h->kinfo.num_tqps; j++) 447 hns3_tqp_enable(h->kinfo.tqp[j]); 448 449 /* start the ae_dev */ 450 ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0; 451 if (ret) { 452 set_bit(HNS3_NIC_STATE_DOWN, &priv->state); 453 while (j--) 454 hns3_tqp_disable(h->kinfo.tqp[j]); 455 456 for (j = i - 1; j >= 0; j--) 457 hns3_vector_disable(&priv->tqp_vector[j]); 458 } 459 460 return ret; 461 } 462 463 static void hns3_config_xps(struct hns3_nic_priv *priv) 464 { 465 int i; 466 467 for (i = 0; i < priv->vector_num; i++) { 468 struct hns3_enet_tqp_vector *tqp_vector = &priv->tqp_vector[i]; 469 struct hns3_enet_ring *ring = tqp_vector->tx_group.ring; 470 471 while (ring) { 472 int ret; 473 474 ret = netif_set_xps_queue(priv->netdev, 475 &tqp_vector->affinity_mask, 476 ring->tqp->tqp_index); 477 if (ret) 478 netdev_warn(priv->netdev, 479 "set xps queue failed: %d", ret); 480 481 ring = ring->next; 482 } 483 } 484 } 485 486 static int hns3_nic_net_open(struct net_device *netdev) 487 { 488 struct hns3_nic_priv *priv = netdev_priv(netdev); 489 struct hnae3_handle *h = hns3_get_handle(netdev); 490 struct hnae3_knic_private_info *kinfo; 491 int i, ret; 492 493 if (hns3_nic_resetting(netdev)) 494 return -EBUSY; 495 496 netif_carrier_off(netdev); 497 498 ret = hns3_nic_set_real_num_queue(netdev); 499 if (ret) 500 return ret; 501 502 ret = hns3_nic_net_up(netdev); 503 if (ret) { 504 netdev_err(netdev, "net up fail, ret=%d!\n", ret); 505 return ret; 506 } 507 508 kinfo = &h->kinfo; 509 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) 510 netdev_set_prio_tc_map(netdev, i, kinfo->tc_info.prio_tc[i]); 511 512 if (h->ae_algo->ops->set_timer_task) 513 h->ae_algo->ops->set_timer_task(priv->ae_handle, true); 514 515 hns3_config_xps(priv); 516 517 netif_dbg(h, drv, netdev, "net open\n"); 518 519 return 0; 520 } 521 522 static void hns3_reset_tx_queue(struct hnae3_handle *h) 523 { 524 struct net_device *ndev = h->kinfo.netdev; 525 struct hns3_nic_priv *priv = netdev_priv(ndev); 526 struct netdev_queue *dev_queue; 527 u32 i; 528 529 for (i = 0; i < h->kinfo.num_tqps; i++) { 530 dev_queue = netdev_get_tx_queue(ndev, 531 priv->ring[i].queue_index); 532 netdev_tx_reset_queue(dev_queue); 533 } 534 } 535 536 static void hns3_nic_net_down(struct net_device *netdev) 537 { 538 struct hns3_nic_priv *priv = netdev_priv(netdev); 539 struct hnae3_handle *h = hns3_get_handle(netdev); 540 const struct hnae3_ae_ops *ops; 541 int i; 542 543 /* disable vectors */ 544 for (i = 0; i < priv->vector_num; i++) 545 hns3_vector_disable(&priv->tqp_vector[i]); 546 547 /* disable rcb */ 548 for (i = 0; i < h->kinfo.num_tqps; i++) 549 hns3_tqp_disable(h->kinfo.tqp[i]); 550 551 /* stop ae_dev */ 552 ops = priv->ae_handle->ae_algo->ops; 553 if (ops->stop) 554 ops->stop(priv->ae_handle); 555 556 /* delay ring buffer clearing to hns3_reset_notify_uninit_enet 557 * during reset process, because driver may not be able 558 * to disable the ring through firmware when downing the netdev. 559 */ 560 if (!hns3_nic_resetting(netdev)) 561 hns3_clear_all_ring(priv->ae_handle, false); 562 563 hns3_reset_tx_queue(priv->ae_handle); 564 } 565 566 static int hns3_nic_net_stop(struct net_device *netdev) 567 { 568 struct hns3_nic_priv *priv = netdev_priv(netdev); 569 struct hnae3_handle *h = hns3_get_handle(netdev); 570 571 if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state)) 572 return 0; 573 574 netif_dbg(h, drv, netdev, "net stop\n"); 575 576 if (h->ae_algo->ops->set_timer_task) 577 h->ae_algo->ops->set_timer_task(priv->ae_handle, false); 578 579 netif_tx_stop_all_queues(netdev); 580 netif_carrier_off(netdev); 581 582 hns3_nic_net_down(netdev); 583 584 return 0; 585 } 586 587 static int hns3_nic_uc_sync(struct net_device *netdev, 588 const unsigned char *addr) 589 { 590 struct hnae3_handle *h = hns3_get_handle(netdev); 591 592 if (h->ae_algo->ops->add_uc_addr) 593 return h->ae_algo->ops->add_uc_addr(h, addr); 594 595 return 0; 596 } 597 598 static int hns3_nic_uc_unsync(struct net_device *netdev, 599 const unsigned char *addr) 600 { 601 struct hnae3_handle *h = hns3_get_handle(netdev); 602 603 /* need ignore the request of removing device address, because 604 * we store the device address and other addresses of uc list 605 * in the function's mac filter list. 606 */ 607 if (ether_addr_equal(addr, netdev->dev_addr)) 608 return 0; 609 610 if (h->ae_algo->ops->rm_uc_addr) 611 return h->ae_algo->ops->rm_uc_addr(h, addr); 612 613 return 0; 614 } 615 616 static int hns3_nic_mc_sync(struct net_device *netdev, 617 const unsigned char *addr) 618 { 619 struct hnae3_handle *h = hns3_get_handle(netdev); 620 621 if (h->ae_algo->ops->add_mc_addr) 622 return h->ae_algo->ops->add_mc_addr(h, addr); 623 624 return 0; 625 } 626 627 static int hns3_nic_mc_unsync(struct net_device *netdev, 628 const unsigned char *addr) 629 { 630 struct hnae3_handle *h = hns3_get_handle(netdev); 631 632 if (h->ae_algo->ops->rm_mc_addr) 633 return h->ae_algo->ops->rm_mc_addr(h, addr); 634 635 return 0; 636 } 637 638 static u8 hns3_get_netdev_flags(struct net_device *netdev) 639 { 640 u8 flags = 0; 641 642 if (netdev->flags & IFF_PROMISC) { 643 flags = HNAE3_USER_UPE | HNAE3_USER_MPE | HNAE3_BPE; 644 } else { 645 flags |= HNAE3_VLAN_FLTR; 646 if (netdev->flags & IFF_ALLMULTI) 647 flags |= HNAE3_USER_MPE; 648 } 649 650 return flags; 651 } 652 653 static void hns3_nic_set_rx_mode(struct net_device *netdev) 654 { 655 struct hnae3_handle *h = hns3_get_handle(netdev); 656 u8 new_flags; 657 658 new_flags = hns3_get_netdev_flags(netdev); 659 660 __dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync); 661 __dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync); 662 663 /* User mode Promisc mode enable and vlan filtering is disabled to 664 * let all packets in. 665 */ 666 h->netdev_flags = new_flags; 667 hns3_request_update_promisc_mode(h); 668 } 669 670 void hns3_request_update_promisc_mode(struct hnae3_handle *handle) 671 { 672 const struct hnae3_ae_ops *ops = handle->ae_algo->ops; 673 674 if (ops->request_update_promisc_mode) 675 ops->request_update_promisc_mode(handle); 676 } 677 678 void hns3_enable_vlan_filter(struct net_device *netdev, bool enable) 679 { 680 struct hns3_nic_priv *priv = netdev_priv(netdev); 681 struct hnae3_handle *h = priv->ae_handle; 682 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev); 683 bool last_state; 684 685 if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 && 686 h->ae_algo->ops->enable_vlan_filter) { 687 last_state = h->netdev_flags & HNAE3_VLAN_FLTR ? true : false; 688 if (enable != last_state) { 689 netdev_info(netdev, 690 "%s vlan filter\n", 691 enable ? "enable" : "disable"); 692 h->ae_algo->ops->enable_vlan_filter(h, enable); 693 } 694 } 695 } 696 697 static int hns3_set_tso(struct sk_buff *skb, u32 *paylen_fdop_ol4cs, 698 u16 *mss, u32 *type_cs_vlan_tso) 699 { 700 u32 l4_offset, hdr_len; 701 union l3_hdr_info l3; 702 union l4_hdr_info l4; 703 u32 l4_paylen; 704 int ret; 705 706 if (!skb_is_gso(skb)) 707 return 0; 708 709 ret = skb_cow_head(skb, 0); 710 if (unlikely(ret < 0)) 711 return ret; 712 713 l3.hdr = skb_network_header(skb); 714 l4.hdr = skb_transport_header(skb); 715 716 /* Software should clear the IPv4's checksum field when tso is 717 * needed. 718 */ 719 if (l3.v4->version == 4) 720 l3.v4->check = 0; 721 722 /* tunnel packet */ 723 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | 724 SKB_GSO_GRE_CSUM | 725 SKB_GSO_UDP_TUNNEL | 726 SKB_GSO_UDP_TUNNEL_CSUM)) { 727 /* reset l3&l4 pointers from outer to inner headers */ 728 l3.hdr = skb_inner_network_header(skb); 729 l4.hdr = skb_inner_transport_header(skb); 730 731 /* Software should clear the IPv4's checksum field when 732 * tso is needed. 733 */ 734 if (l3.v4->version == 4) 735 l3.v4->check = 0; 736 } 737 738 /* normal or tunnel packet */ 739 l4_offset = l4.hdr - skb->data; 740 741 /* remove payload length from inner pseudo checksum when tso */ 742 l4_paylen = skb->len - l4_offset; 743 744 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { 745 hdr_len = sizeof(*l4.udp) + l4_offset; 746 csum_replace_by_diff(&l4.udp->check, 747 (__force __wsum)htonl(l4_paylen)); 748 } else { 749 hdr_len = (l4.tcp->doff << 2) + l4_offset; 750 csum_replace_by_diff(&l4.tcp->check, 751 (__force __wsum)htonl(l4_paylen)); 752 } 753 754 /* find the txbd field values */ 755 *paylen_fdop_ol4cs = skb->len - hdr_len; 756 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_TSO_B, 1); 757 758 /* offload outer UDP header checksum */ 759 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) 760 hns3_set_field(*paylen_fdop_ol4cs, HNS3_TXD_OL4CS_B, 1); 761 762 /* get MSS for TSO */ 763 *mss = skb_shinfo(skb)->gso_size; 764 765 trace_hns3_tso(skb); 766 767 return 0; 768 } 769 770 static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto, 771 u8 *il4_proto) 772 { 773 union l3_hdr_info l3; 774 unsigned char *l4_hdr; 775 unsigned char *exthdr; 776 u8 l4_proto_tmp; 777 __be16 frag_off; 778 779 /* find outer header point */ 780 l3.hdr = skb_network_header(skb); 781 l4_hdr = skb_transport_header(skb); 782 783 if (skb->protocol == htons(ETH_P_IPV6)) { 784 exthdr = l3.hdr + sizeof(*l3.v6); 785 l4_proto_tmp = l3.v6->nexthdr; 786 if (l4_hdr != exthdr) 787 ipv6_skip_exthdr(skb, exthdr - skb->data, 788 &l4_proto_tmp, &frag_off); 789 } else if (skb->protocol == htons(ETH_P_IP)) { 790 l4_proto_tmp = l3.v4->protocol; 791 } else { 792 return -EINVAL; 793 } 794 795 *ol4_proto = l4_proto_tmp; 796 797 /* tunnel packet */ 798 if (!skb->encapsulation) { 799 *il4_proto = 0; 800 return 0; 801 } 802 803 /* find inner header point */ 804 l3.hdr = skb_inner_network_header(skb); 805 l4_hdr = skb_inner_transport_header(skb); 806 807 if (l3.v6->version == 6) { 808 exthdr = l3.hdr + sizeof(*l3.v6); 809 l4_proto_tmp = l3.v6->nexthdr; 810 if (l4_hdr != exthdr) 811 ipv6_skip_exthdr(skb, exthdr - skb->data, 812 &l4_proto_tmp, &frag_off); 813 } else if (l3.v4->version == 4) { 814 l4_proto_tmp = l3.v4->protocol; 815 } 816 817 *il4_proto = l4_proto_tmp; 818 819 return 0; 820 } 821 822 /* when skb->encapsulation is 0, skb->ip_summed is CHECKSUM_PARTIAL 823 * and it is udp packet, which has a dest port as the IANA assigned. 824 * the hardware is expected to do the checksum offload, but the 825 * hardware will not do the checksum offload when udp dest port is 826 * 4789 or 6081. 827 */ 828 static bool hns3_tunnel_csum_bug(struct sk_buff *skb) 829 { 830 struct hns3_nic_priv *priv = netdev_priv(skb->dev); 831 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev); 832 union l4_hdr_info l4; 833 834 /* device version above V3(include V3), the hardware can 835 * do this checksum offload. 836 */ 837 if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) 838 return false; 839 840 l4.hdr = skb_transport_header(skb); 841 842 if (!(!skb->encapsulation && 843 (l4.udp->dest == htons(IANA_VXLAN_UDP_PORT) || 844 l4.udp->dest == htons(GENEVE_UDP_PORT)))) 845 return false; 846 847 skb_checksum_help(skb); 848 849 return true; 850 } 851 852 static void hns3_set_outer_l2l3l4(struct sk_buff *skb, u8 ol4_proto, 853 u32 *ol_type_vlan_len_msec) 854 { 855 u32 l2_len, l3_len, l4_len; 856 unsigned char *il2_hdr; 857 union l3_hdr_info l3; 858 union l4_hdr_info l4; 859 860 l3.hdr = skb_network_header(skb); 861 l4.hdr = skb_transport_header(skb); 862 863 /* compute OL2 header size, defined in 2 Bytes */ 864 l2_len = l3.hdr - skb->data; 865 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L2LEN_S, l2_len >> 1); 866 867 /* compute OL3 header size, defined in 4 Bytes */ 868 l3_len = l4.hdr - l3.hdr; 869 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_S, l3_len >> 2); 870 871 il2_hdr = skb_inner_mac_header(skb); 872 /* compute OL4 header size, defined in 4 Bytes */ 873 l4_len = il2_hdr - l4.hdr; 874 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_S, l4_len >> 2); 875 876 /* define outer network header type */ 877 if (skb->protocol == htons(ETH_P_IP)) { 878 if (skb_is_gso(skb)) 879 hns3_set_field(*ol_type_vlan_len_msec, 880 HNS3_TXD_OL3T_S, 881 HNS3_OL3T_IPV4_CSUM); 882 else 883 hns3_set_field(*ol_type_vlan_len_msec, 884 HNS3_TXD_OL3T_S, 885 HNS3_OL3T_IPV4_NO_CSUM); 886 887 } else if (skb->protocol == htons(ETH_P_IPV6)) { 888 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_S, 889 HNS3_OL3T_IPV6); 890 } 891 892 if (ol4_proto == IPPROTO_UDP) 893 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_TUNTYPE_S, 894 HNS3_TUN_MAC_IN_UDP); 895 else if (ol4_proto == IPPROTO_GRE) 896 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_TUNTYPE_S, 897 HNS3_TUN_NVGRE); 898 } 899 900 static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto, 901 u8 il4_proto, u32 *type_cs_vlan_tso, 902 u32 *ol_type_vlan_len_msec) 903 { 904 unsigned char *l2_hdr = skb->data; 905 u32 l4_proto = ol4_proto; 906 union l4_hdr_info l4; 907 union l3_hdr_info l3; 908 u32 l2_len, l3_len; 909 910 l4.hdr = skb_transport_header(skb); 911 l3.hdr = skb_network_header(skb); 912 913 /* handle encapsulation skb */ 914 if (skb->encapsulation) { 915 /* If this is a not UDP/GRE encapsulation skb */ 916 if (!(ol4_proto == IPPROTO_UDP || ol4_proto == IPPROTO_GRE)) { 917 /* drop the skb tunnel packet if hardware don't support, 918 * because hardware can't calculate csum when TSO. 919 */ 920 if (skb_is_gso(skb)) 921 return -EDOM; 922 923 /* the stack computes the IP header already, 924 * driver calculate l4 checksum when not TSO. 925 */ 926 skb_checksum_help(skb); 927 return 0; 928 } 929 930 hns3_set_outer_l2l3l4(skb, ol4_proto, ol_type_vlan_len_msec); 931 932 /* switch to inner header */ 933 l2_hdr = skb_inner_mac_header(skb); 934 l3.hdr = skb_inner_network_header(skb); 935 l4.hdr = skb_inner_transport_header(skb); 936 l4_proto = il4_proto; 937 } 938 939 if (l3.v4->version == 4) { 940 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S, 941 HNS3_L3T_IPV4); 942 943 /* the stack computes the IP header already, the only time we 944 * need the hardware to recompute it is in the case of TSO. 945 */ 946 if (skb_is_gso(skb)) 947 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1); 948 } else if (l3.v6->version == 6) { 949 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S, 950 HNS3_L3T_IPV6); 951 } 952 953 /* compute inner(/normal) L2 header size, defined in 2 Bytes */ 954 l2_len = l3.hdr - l2_hdr; 955 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, l2_len >> 1); 956 957 /* compute inner(/normal) L3 header size, defined in 4 Bytes */ 958 l3_len = l4.hdr - l3.hdr; 959 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_S, l3_len >> 2); 960 961 /* compute inner(/normal) L4 header size, defined in 4 Bytes */ 962 switch (l4_proto) { 963 case IPPROTO_TCP: 964 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); 965 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S, 966 HNS3_L4T_TCP); 967 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S, 968 l4.tcp->doff); 969 break; 970 case IPPROTO_UDP: 971 if (hns3_tunnel_csum_bug(skb)) 972 break; 973 974 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); 975 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S, 976 HNS3_L4T_UDP); 977 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S, 978 (sizeof(struct udphdr) >> 2)); 979 break; 980 case IPPROTO_SCTP: 981 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); 982 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S, 983 HNS3_L4T_SCTP); 984 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S, 985 (sizeof(struct sctphdr) >> 2)); 986 break; 987 default: 988 /* drop the skb tunnel packet if hardware don't support, 989 * because hardware can't calculate csum when TSO. 990 */ 991 if (skb_is_gso(skb)) 992 return -EDOM; 993 994 /* the stack computes the IP header already, 995 * driver calculate l4 checksum when not TSO. 996 */ 997 skb_checksum_help(skb); 998 return 0; 999 } 1000 1001 return 0; 1002 } 1003 1004 static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring, 1005 struct sk_buff *skb) 1006 { 1007 struct hnae3_handle *handle = tx_ring->tqp->handle; 1008 struct hnae3_ae_dev *ae_dev; 1009 struct vlan_ethhdr *vhdr; 1010 int rc; 1011 1012 if (!(skb->protocol == htons(ETH_P_8021Q) || 1013 skb_vlan_tag_present(skb))) 1014 return 0; 1015 1016 /* For HW limitation on HNAE3_DEVICE_VERSION_V2, if port based insert 1017 * VLAN enabled, only one VLAN header is allowed in skb, otherwise it 1018 * will cause RAS error. 1019 */ 1020 ae_dev = pci_get_drvdata(handle->pdev); 1021 if (unlikely(skb_vlan_tagged_multi(skb) && 1022 ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 && 1023 handle->port_base_vlan_state == 1024 HNAE3_PORT_BASE_VLAN_ENABLE)) 1025 return -EINVAL; 1026 1027 if (skb->protocol == htons(ETH_P_8021Q) && 1028 !(handle->kinfo.netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) { 1029 /* When HW VLAN acceleration is turned off, and the stack 1030 * sets the protocol to 802.1q, the driver just need to 1031 * set the protocol to the encapsulated ethertype. 1032 */ 1033 skb->protocol = vlan_get_protocol(skb); 1034 return 0; 1035 } 1036 1037 if (skb_vlan_tag_present(skb)) { 1038 /* Based on hw strategy, use out_vtag in two layer tag case, 1039 * and use inner_vtag in one tag case. 1040 */ 1041 if (skb->protocol == htons(ETH_P_8021Q) && 1042 handle->port_base_vlan_state == 1043 HNAE3_PORT_BASE_VLAN_DISABLE) 1044 rc = HNS3_OUTER_VLAN_TAG; 1045 else 1046 rc = HNS3_INNER_VLAN_TAG; 1047 1048 skb->protocol = vlan_get_protocol(skb); 1049 return rc; 1050 } 1051 1052 rc = skb_cow_head(skb, 0); 1053 if (unlikely(rc < 0)) 1054 return rc; 1055 1056 vhdr = (struct vlan_ethhdr *)skb->data; 1057 vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority << VLAN_PRIO_SHIFT) 1058 & VLAN_PRIO_MASK); 1059 1060 skb->protocol = vlan_get_protocol(skb); 1061 return 0; 1062 } 1063 1064 /* check if the hardware is capable of checksum offloading */ 1065 static bool hns3_check_hw_tx_csum(struct sk_buff *skb) 1066 { 1067 struct hns3_nic_priv *priv = netdev_priv(skb->dev); 1068 1069 /* Kindly note, due to backward compatibility of the TX descriptor, 1070 * HW checksum of the non-IP packets and GSO packets is handled at 1071 * different place in the following code 1072 */ 1073 if (skb_csum_is_sctp(skb) || skb_is_gso(skb) || 1074 !test_bit(HNS3_NIC_STATE_HW_TX_CSUM_ENABLE, &priv->state)) 1075 return false; 1076 1077 return true; 1078 } 1079 1080 static int hns3_fill_skb_desc(struct hns3_enet_ring *ring, 1081 struct sk_buff *skb, struct hns3_desc *desc) 1082 { 1083 u32 ol_type_vlan_len_msec = 0; 1084 u32 paylen_ol4cs = skb->len; 1085 u32 type_cs_vlan_tso = 0; 1086 u16 mss_hw_csum = 0; 1087 u16 inner_vtag = 0; 1088 u16 out_vtag = 0; 1089 int ret; 1090 1091 ret = hns3_handle_vtags(ring, skb); 1092 if (unlikely(ret < 0)) { 1093 u64_stats_update_begin(&ring->syncp); 1094 ring->stats.tx_vlan_err++; 1095 u64_stats_update_end(&ring->syncp); 1096 return ret; 1097 } else if (ret == HNS3_INNER_VLAN_TAG) { 1098 inner_vtag = skb_vlan_tag_get(skb); 1099 inner_vtag |= (skb->priority << VLAN_PRIO_SHIFT) & 1100 VLAN_PRIO_MASK; 1101 hns3_set_field(type_cs_vlan_tso, HNS3_TXD_VLAN_B, 1); 1102 } else if (ret == HNS3_OUTER_VLAN_TAG) { 1103 out_vtag = skb_vlan_tag_get(skb); 1104 out_vtag |= (skb->priority << VLAN_PRIO_SHIFT) & 1105 VLAN_PRIO_MASK; 1106 hns3_set_field(ol_type_vlan_len_msec, HNS3_TXD_OVLAN_B, 1107 1); 1108 } 1109 1110 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1111 u8 ol4_proto, il4_proto; 1112 1113 if (hns3_check_hw_tx_csum(skb)) { 1114 /* set checksum start and offset, defined in 2 Bytes */ 1115 hns3_set_field(type_cs_vlan_tso, HNS3_TXD_CSUM_START_S, 1116 skb_checksum_start_offset(skb) >> 1); 1117 hns3_set_field(ol_type_vlan_len_msec, 1118 HNS3_TXD_CSUM_OFFSET_S, 1119 skb->csum_offset >> 1); 1120 mss_hw_csum |= BIT(HNS3_TXD_HW_CS_B); 1121 goto out_hw_tx_csum; 1122 } 1123 1124 skb_reset_mac_len(skb); 1125 1126 ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto); 1127 if (unlikely(ret < 0)) { 1128 u64_stats_update_begin(&ring->syncp); 1129 ring->stats.tx_l4_proto_err++; 1130 u64_stats_update_end(&ring->syncp); 1131 return ret; 1132 } 1133 1134 ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto, 1135 &type_cs_vlan_tso, 1136 &ol_type_vlan_len_msec); 1137 if (unlikely(ret < 0)) { 1138 u64_stats_update_begin(&ring->syncp); 1139 ring->stats.tx_l2l3l4_err++; 1140 u64_stats_update_end(&ring->syncp); 1141 return ret; 1142 } 1143 1144 ret = hns3_set_tso(skb, &paylen_ol4cs, &mss_hw_csum, 1145 &type_cs_vlan_tso); 1146 if (unlikely(ret < 0)) { 1147 u64_stats_update_begin(&ring->syncp); 1148 ring->stats.tx_tso_err++; 1149 u64_stats_update_end(&ring->syncp); 1150 return ret; 1151 } 1152 } 1153 1154 out_hw_tx_csum: 1155 /* Set txbd */ 1156 desc->tx.ol_type_vlan_len_msec = 1157 cpu_to_le32(ol_type_vlan_len_msec); 1158 desc->tx.type_cs_vlan_tso_len = cpu_to_le32(type_cs_vlan_tso); 1159 desc->tx.paylen_ol4cs = cpu_to_le32(paylen_ol4cs); 1160 desc->tx.mss_hw_csum = cpu_to_le16(mss_hw_csum); 1161 desc->tx.vlan_tag = cpu_to_le16(inner_vtag); 1162 desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag); 1163 1164 return 0; 1165 } 1166 1167 static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, 1168 unsigned int size, enum hns_desc_type type) 1169 { 1170 #define HNS3_LIKELY_BD_NUM 1 1171 1172 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; 1173 struct hns3_desc *desc = &ring->desc[ring->next_to_use]; 1174 struct device *dev = ring_to_dev(ring); 1175 skb_frag_t *frag; 1176 unsigned int frag_buf_num; 1177 int k, sizeoflast; 1178 dma_addr_t dma; 1179 1180 if (type == DESC_TYPE_FRAGLIST_SKB || 1181 type == DESC_TYPE_SKB) { 1182 struct sk_buff *skb = (struct sk_buff *)priv; 1183 1184 dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); 1185 } else { 1186 frag = (skb_frag_t *)priv; 1187 dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE); 1188 } 1189 1190 if (unlikely(dma_mapping_error(dev, dma))) { 1191 u64_stats_update_begin(&ring->syncp); 1192 ring->stats.sw_err_cnt++; 1193 u64_stats_update_end(&ring->syncp); 1194 return -ENOMEM; 1195 } 1196 1197 desc_cb->priv = priv; 1198 desc_cb->length = size; 1199 desc_cb->dma = dma; 1200 desc_cb->type = type; 1201 1202 if (likely(size <= HNS3_MAX_BD_SIZE)) { 1203 desc->addr = cpu_to_le64(dma); 1204 desc->tx.send_size = cpu_to_le16(size); 1205 desc->tx.bdtp_fe_sc_vld_ra_ri = 1206 cpu_to_le16(BIT(HNS3_TXD_VLD_B)); 1207 1208 trace_hns3_tx_desc(ring, ring->next_to_use); 1209 ring_ptr_move_fw(ring, next_to_use); 1210 return HNS3_LIKELY_BD_NUM; 1211 } 1212 1213 frag_buf_num = hns3_tx_bd_count(size); 1214 sizeoflast = size % HNS3_MAX_BD_SIZE; 1215 sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE; 1216 1217 /* When frag size is bigger than hardware limit, split this frag */ 1218 for (k = 0; k < frag_buf_num; k++) { 1219 /* now, fill the descriptor */ 1220 desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k); 1221 desc->tx.send_size = cpu_to_le16((k == frag_buf_num - 1) ? 1222 (u16)sizeoflast : (u16)HNS3_MAX_BD_SIZE); 1223 desc->tx.bdtp_fe_sc_vld_ra_ri = 1224 cpu_to_le16(BIT(HNS3_TXD_VLD_B)); 1225 1226 trace_hns3_tx_desc(ring, ring->next_to_use); 1227 /* move ring pointer to next */ 1228 ring_ptr_move_fw(ring, next_to_use); 1229 1230 desc = &ring->desc[ring->next_to_use]; 1231 } 1232 1233 return frag_buf_num; 1234 } 1235 1236 static unsigned int hns3_skb_bd_num(struct sk_buff *skb, unsigned int *bd_size, 1237 unsigned int bd_num) 1238 { 1239 unsigned int size; 1240 int i; 1241 1242 size = skb_headlen(skb); 1243 while (size > HNS3_MAX_BD_SIZE) { 1244 bd_size[bd_num++] = HNS3_MAX_BD_SIZE; 1245 size -= HNS3_MAX_BD_SIZE; 1246 1247 if (bd_num > HNS3_MAX_TSO_BD_NUM) 1248 return bd_num; 1249 } 1250 1251 if (size) { 1252 bd_size[bd_num++] = size; 1253 if (bd_num > HNS3_MAX_TSO_BD_NUM) 1254 return bd_num; 1255 } 1256 1257 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1258 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1259 size = skb_frag_size(frag); 1260 if (!size) 1261 continue; 1262 1263 while (size > HNS3_MAX_BD_SIZE) { 1264 bd_size[bd_num++] = HNS3_MAX_BD_SIZE; 1265 size -= HNS3_MAX_BD_SIZE; 1266 1267 if (bd_num > HNS3_MAX_TSO_BD_NUM) 1268 return bd_num; 1269 } 1270 1271 bd_size[bd_num++] = size; 1272 if (bd_num > HNS3_MAX_TSO_BD_NUM) 1273 return bd_num; 1274 } 1275 1276 return bd_num; 1277 } 1278 1279 static unsigned int hns3_tx_bd_num(struct sk_buff *skb, unsigned int *bd_size, 1280 u8 max_non_tso_bd_num) 1281 { 1282 struct sk_buff *frag_skb; 1283 unsigned int bd_num = 0; 1284 1285 /* If the total len is within the max bd limit */ 1286 if (likely(skb->len <= HNS3_MAX_BD_SIZE && !skb_has_frag_list(skb) && 1287 skb_shinfo(skb)->nr_frags < max_non_tso_bd_num)) 1288 return skb_shinfo(skb)->nr_frags + 1U; 1289 1290 /* The below case will always be linearized, return 1291 * HNS3_MAX_BD_NUM_TSO + 1U to make sure it is linearized. 1292 */ 1293 if (unlikely(skb->len > HNS3_MAX_TSO_SIZE || 1294 (!skb_is_gso(skb) && skb->len > 1295 HNS3_MAX_NON_TSO_SIZE(max_non_tso_bd_num)))) 1296 return HNS3_MAX_TSO_BD_NUM + 1U; 1297 1298 bd_num = hns3_skb_bd_num(skb, bd_size, bd_num); 1299 1300 if (!skb_has_frag_list(skb) || bd_num > HNS3_MAX_TSO_BD_NUM) 1301 return bd_num; 1302 1303 skb_walk_frags(skb, frag_skb) { 1304 bd_num = hns3_skb_bd_num(frag_skb, bd_size, bd_num); 1305 if (bd_num > HNS3_MAX_TSO_BD_NUM) 1306 return bd_num; 1307 } 1308 1309 return bd_num; 1310 } 1311 1312 static unsigned int hns3_gso_hdr_len(struct sk_buff *skb) 1313 { 1314 if (!skb->encapsulation) 1315 return skb_transport_offset(skb) + tcp_hdrlen(skb); 1316 1317 return skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb); 1318 } 1319 1320 /* HW need every continuous max_non_tso_bd_num buffer data to be larger 1321 * than MSS, we simplify it by ensuring skb_headlen + the first continuous 1322 * max_non_tso_bd_num - 1 frags to be larger than gso header len + mss, 1323 * and the remaining continuous max_non_tso_bd_num - 1 frags to be larger 1324 * than MSS except the last max_non_tso_bd_num - 1 frags. 1325 */ 1326 static bool hns3_skb_need_linearized(struct sk_buff *skb, unsigned int *bd_size, 1327 unsigned int bd_num, u8 max_non_tso_bd_num) 1328 { 1329 unsigned int tot_len = 0; 1330 int i; 1331 1332 for (i = 0; i < max_non_tso_bd_num - 1U; i++) 1333 tot_len += bd_size[i]; 1334 1335 /* ensure the first max_non_tso_bd_num frags is greater than 1336 * mss + header 1337 */ 1338 if (tot_len + bd_size[max_non_tso_bd_num - 1U] < 1339 skb_shinfo(skb)->gso_size + hns3_gso_hdr_len(skb)) 1340 return true; 1341 1342 /* ensure every continuous max_non_tso_bd_num - 1 buffer is greater 1343 * than mss except the last one. 1344 */ 1345 for (i = 0; i < bd_num - max_non_tso_bd_num; i++) { 1346 tot_len -= bd_size[i]; 1347 tot_len += bd_size[i + max_non_tso_bd_num - 1U]; 1348 1349 if (tot_len < skb_shinfo(skb)->gso_size) 1350 return true; 1351 } 1352 1353 return false; 1354 } 1355 1356 void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size) 1357 { 1358 int i; 1359 1360 for (i = 0; i < MAX_SKB_FRAGS; i++) 1361 size[i] = skb_frag_size(&shinfo->frags[i]); 1362 } 1363 1364 static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring, 1365 struct net_device *netdev, 1366 struct sk_buff *skb) 1367 { 1368 struct hns3_nic_priv *priv = netdev_priv(netdev); 1369 u8 max_non_tso_bd_num = priv->max_non_tso_bd_num; 1370 unsigned int bd_size[HNS3_MAX_TSO_BD_NUM + 1U]; 1371 unsigned int bd_num; 1372 1373 bd_num = hns3_tx_bd_num(skb, bd_size, max_non_tso_bd_num); 1374 if (unlikely(bd_num > max_non_tso_bd_num)) { 1375 if (bd_num <= HNS3_MAX_TSO_BD_NUM && skb_is_gso(skb) && 1376 !hns3_skb_need_linearized(skb, bd_size, bd_num, 1377 max_non_tso_bd_num)) { 1378 trace_hns3_over_max_bd(skb); 1379 goto out; 1380 } 1381 1382 if (__skb_linearize(skb)) 1383 return -ENOMEM; 1384 1385 bd_num = hns3_tx_bd_count(skb->len); 1386 if ((skb_is_gso(skb) && bd_num > HNS3_MAX_TSO_BD_NUM) || 1387 (!skb_is_gso(skb) && 1388 bd_num > max_non_tso_bd_num)) { 1389 trace_hns3_over_max_bd(skb); 1390 return -ENOMEM; 1391 } 1392 1393 u64_stats_update_begin(&ring->syncp); 1394 ring->stats.tx_copy++; 1395 u64_stats_update_end(&ring->syncp); 1396 } 1397 1398 out: 1399 if (likely(ring_space(ring) >= bd_num)) 1400 return bd_num; 1401 1402 netif_stop_subqueue(netdev, ring->queue_index); 1403 smp_mb(); /* Memory barrier before checking ring_space */ 1404 1405 /* Start queue in case hns3_clean_tx_ring has just made room 1406 * available and has not seen the queue stopped state performed 1407 * by netif_stop_subqueue above. 1408 */ 1409 if (ring_space(ring) >= bd_num && netif_carrier_ok(netdev) && 1410 !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) { 1411 netif_start_subqueue(netdev, ring->queue_index); 1412 return bd_num; 1413 } 1414 1415 return -EBUSY; 1416 } 1417 1418 static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig) 1419 { 1420 struct device *dev = ring_to_dev(ring); 1421 unsigned int i; 1422 1423 for (i = 0; i < ring->desc_num; i++) { 1424 struct hns3_desc *desc = &ring->desc[ring->next_to_use]; 1425 1426 memset(desc, 0, sizeof(*desc)); 1427 1428 /* check if this is where we started */ 1429 if (ring->next_to_use == next_to_use_orig) 1430 break; 1431 1432 /* rollback one */ 1433 ring_ptr_move_bw(ring, next_to_use); 1434 1435 if (!ring->desc_cb[ring->next_to_use].dma) 1436 continue; 1437 1438 /* unmap the descriptor dma address */ 1439 if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB || 1440 ring->desc_cb[ring->next_to_use].type == 1441 DESC_TYPE_FRAGLIST_SKB) 1442 dma_unmap_single(dev, 1443 ring->desc_cb[ring->next_to_use].dma, 1444 ring->desc_cb[ring->next_to_use].length, 1445 DMA_TO_DEVICE); 1446 else if (ring->desc_cb[ring->next_to_use].length) 1447 dma_unmap_page(dev, 1448 ring->desc_cb[ring->next_to_use].dma, 1449 ring->desc_cb[ring->next_to_use].length, 1450 DMA_TO_DEVICE); 1451 1452 ring->desc_cb[ring->next_to_use].length = 0; 1453 ring->desc_cb[ring->next_to_use].dma = 0; 1454 ring->desc_cb[ring->next_to_use].type = DESC_TYPE_UNKNOWN; 1455 } 1456 } 1457 1458 static int hns3_fill_skb_to_desc(struct hns3_enet_ring *ring, 1459 struct sk_buff *skb, enum hns_desc_type type) 1460 { 1461 unsigned int size = skb_headlen(skb); 1462 int i, ret, bd_num = 0; 1463 1464 if (size) { 1465 ret = hns3_fill_desc(ring, skb, size, type); 1466 if (unlikely(ret < 0)) 1467 return ret; 1468 1469 bd_num += ret; 1470 } 1471 1472 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1473 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1474 1475 size = skb_frag_size(frag); 1476 if (!size) 1477 continue; 1478 1479 ret = hns3_fill_desc(ring, frag, size, DESC_TYPE_PAGE); 1480 if (unlikely(ret < 0)) 1481 return ret; 1482 1483 bd_num += ret; 1484 } 1485 1486 return bd_num; 1487 } 1488 1489 static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num, 1490 bool doorbell) 1491 { 1492 ring->pending_buf += num; 1493 1494 if (!doorbell) { 1495 u64_stats_update_begin(&ring->syncp); 1496 ring->stats.tx_more++; 1497 u64_stats_update_end(&ring->syncp); 1498 return; 1499 } 1500 1501 if (!ring->pending_buf) 1502 return; 1503 1504 writel(ring->pending_buf, 1505 ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG); 1506 ring->pending_buf = 0; 1507 WRITE_ONCE(ring->last_to_use, ring->next_to_use); 1508 } 1509 1510 netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) 1511 { 1512 struct hns3_nic_priv *priv = netdev_priv(netdev); 1513 struct hns3_enet_ring *ring = &priv->ring[skb->queue_mapping]; 1514 struct netdev_queue *dev_queue; 1515 int pre_ntu, next_to_use_head; 1516 struct sk_buff *frag_skb; 1517 int bd_num = 0; 1518 bool doorbell; 1519 int ret; 1520 1521 /* Hardware can only handle short frames above 32 bytes */ 1522 if (skb_put_padto(skb, HNS3_MIN_TX_LEN)) { 1523 hns3_tx_doorbell(ring, 0, !netdev_xmit_more()); 1524 return NETDEV_TX_OK; 1525 } 1526 1527 /* Prefetch the data used later */ 1528 prefetch(skb->data); 1529 1530 ret = hns3_nic_maybe_stop_tx(ring, netdev, skb); 1531 if (unlikely(ret <= 0)) { 1532 if (ret == -EBUSY) { 1533 u64_stats_update_begin(&ring->syncp); 1534 ring->stats.tx_busy++; 1535 u64_stats_update_end(&ring->syncp); 1536 hns3_tx_doorbell(ring, 0, true); 1537 return NETDEV_TX_BUSY; 1538 } else if (ret == -ENOMEM) { 1539 u64_stats_update_begin(&ring->syncp); 1540 ring->stats.sw_err_cnt++; 1541 u64_stats_update_end(&ring->syncp); 1542 } 1543 1544 hns3_rl_err(netdev, "xmit error: %d!\n", ret); 1545 goto out_err_tx_ok; 1546 } 1547 1548 next_to_use_head = ring->next_to_use; 1549 1550 ret = hns3_fill_skb_desc(ring, skb, &ring->desc[ring->next_to_use]); 1551 if (unlikely(ret < 0)) 1552 goto fill_err; 1553 1554 ret = hns3_fill_skb_to_desc(ring, skb, DESC_TYPE_SKB); 1555 if (unlikely(ret < 0)) 1556 goto fill_err; 1557 1558 bd_num += ret; 1559 1560 skb_walk_frags(skb, frag_skb) { 1561 ret = hns3_fill_skb_to_desc(ring, frag_skb, 1562 DESC_TYPE_FRAGLIST_SKB); 1563 if (unlikely(ret < 0)) 1564 goto fill_err; 1565 1566 bd_num += ret; 1567 } 1568 1569 pre_ntu = ring->next_to_use ? (ring->next_to_use - 1) : 1570 (ring->desc_num - 1); 1571 ring->desc[pre_ntu].tx.bdtp_fe_sc_vld_ra_ri |= 1572 cpu_to_le16(BIT(HNS3_TXD_FE_B)); 1573 trace_hns3_tx_desc(ring, pre_ntu); 1574 1575 /* Complete translate all packets */ 1576 dev_queue = netdev_get_tx_queue(netdev, ring->queue_index); 1577 doorbell = __netdev_tx_sent_queue(dev_queue, skb->len, 1578 netdev_xmit_more()); 1579 hns3_tx_doorbell(ring, bd_num, doorbell); 1580 1581 return NETDEV_TX_OK; 1582 1583 fill_err: 1584 hns3_clear_desc(ring, next_to_use_head); 1585 1586 out_err_tx_ok: 1587 dev_kfree_skb_any(skb); 1588 hns3_tx_doorbell(ring, 0, !netdev_xmit_more()); 1589 return NETDEV_TX_OK; 1590 } 1591 1592 static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p) 1593 { 1594 struct hnae3_handle *h = hns3_get_handle(netdev); 1595 struct sockaddr *mac_addr = p; 1596 int ret; 1597 1598 if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data)) 1599 return -EADDRNOTAVAIL; 1600 1601 if (ether_addr_equal(netdev->dev_addr, mac_addr->sa_data)) { 1602 netdev_info(netdev, "already using mac address %pM\n", 1603 mac_addr->sa_data); 1604 return 0; 1605 } 1606 1607 /* For VF device, if there is a perm_addr, then the user will not 1608 * be allowed to change the address. 1609 */ 1610 if (!hns3_is_phys_func(h->pdev) && 1611 !is_zero_ether_addr(netdev->perm_addr)) { 1612 netdev_err(netdev, "has permanent MAC %pM, user MAC %pM not allow\n", 1613 netdev->perm_addr, mac_addr->sa_data); 1614 return -EPERM; 1615 } 1616 1617 ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data, false); 1618 if (ret) { 1619 netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret); 1620 return ret; 1621 } 1622 1623 ether_addr_copy(netdev->dev_addr, mac_addr->sa_data); 1624 1625 return 0; 1626 } 1627 1628 static int hns3_nic_do_ioctl(struct net_device *netdev, 1629 struct ifreq *ifr, int cmd) 1630 { 1631 struct hnae3_handle *h = hns3_get_handle(netdev); 1632 1633 if (!netif_running(netdev)) 1634 return -EINVAL; 1635 1636 if (!h->ae_algo->ops->do_ioctl) 1637 return -EOPNOTSUPP; 1638 1639 return h->ae_algo->ops->do_ioctl(h, ifr, cmd); 1640 } 1641 1642 static int hns3_nic_set_features(struct net_device *netdev, 1643 netdev_features_t features) 1644 { 1645 netdev_features_t changed = netdev->features ^ features; 1646 struct hns3_nic_priv *priv = netdev_priv(netdev); 1647 struct hnae3_handle *h = priv->ae_handle; 1648 bool enable; 1649 int ret; 1650 1651 if (changed & (NETIF_F_GRO_HW) && h->ae_algo->ops->set_gro_en) { 1652 enable = !!(features & NETIF_F_GRO_HW); 1653 ret = h->ae_algo->ops->set_gro_en(h, enable); 1654 if (ret) 1655 return ret; 1656 } 1657 1658 if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && 1659 h->ae_algo->ops->enable_hw_strip_rxvtag) { 1660 enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX); 1661 ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, enable); 1662 if (ret) 1663 return ret; 1664 } 1665 1666 if ((changed & NETIF_F_NTUPLE) && h->ae_algo->ops->enable_fd) { 1667 enable = !!(features & NETIF_F_NTUPLE); 1668 h->ae_algo->ops->enable_fd(h, enable); 1669 } 1670 1671 if ((netdev->features & NETIF_F_HW_TC) > (features & NETIF_F_HW_TC) && 1672 h->ae_algo->ops->cls_flower_active(h)) { 1673 netdev_err(netdev, 1674 "there are offloaded TC filters active, cannot disable HW TC offload"); 1675 return -EINVAL; 1676 } 1677 1678 netdev->features = features; 1679 return 0; 1680 } 1681 1682 static netdev_features_t hns3_features_check(struct sk_buff *skb, 1683 struct net_device *dev, 1684 netdev_features_t features) 1685 { 1686 #define HNS3_MAX_HDR_LEN 480U 1687 #define HNS3_MAX_L4_HDR_LEN 60U 1688 1689 size_t len; 1690 1691 if (skb->ip_summed != CHECKSUM_PARTIAL) 1692 return features; 1693 1694 if (skb->encapsulation) 1695 len = skb_inner_transport_header(skb) - skb->data; 1696 else 1697 len = skb_transport_header(skb) - skb->data; 1698 1699 /* Assume L4 is 60 byte as TCP is the only protocol with a 1700 * a flexible value, and it's max len is 60 bytes. 1701 */ 1702 len += HNS3_MAX_L4_HDR_LEN; 1703 1704 /* Hardware only supports checksum on the skb with a max header 1705 * len of 480 bytes. 1706 */ 1707 if (len > HNS3_MAX_HDR_LEN) 1708 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 1709 1710 return features; 1711 } 1712 1713 static void hns3_nic_get_stats64(struct net_device *netdev, 1714 struct rtnl_link_stats64 *stats) 1715 { 1716 struct hns3_nic_priv *priv = netdev_priv(netdev); 1717 int queue_num = priv->ae_handle->kinfo.num_tqps; 1718 struct hnae3_handle *handle = priv->ae_handle; 1719 struct hns3_enet_ring *ring; 1720 u64 rx_length_errors = 0; 1721 u64 rx_crc_errors = 0; 1722 u64 rx_multicast = 0; 1723 unsigned int start; 1724 u64 tx_errors = 0; 1725 u64 rx_errors = 0; 1726 unsigned int idx; 1727 u64 tx_bytes = 0; 1728 u64 rx_bytes = 0; 1729 u64 tx_pkts = 0; 1730 u64 rx_pkts = 0; 1731 u64 tx_drop = 0; 1732 u64 rx_drop = 0; 1733 1734 if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) 1735 return; 1736 1737 handle->ae_algo->ops->update_stats(handle, &netdev->stats); 1738 1739 for (idx = 0; idx < queue_num; idx++) { 1740 /* fetch the tx stats */ 1741 ring = &priv->ring[idx]; 1742 do { 1743 start = u64_stats_fetch_begin_irq(&ring->syncp); 1744 tx_bytes += ring->stats.tx_bytes; 1745 tx_pkts += ring->stats.tx_pkts; 1746 tx_drop += ring->stats.sw_err_cnt; 1747 tx_drop += ring->stats.tx_vlan_err; 1748 tx_drop += ring->stats.tx_l4_proto_err; 1749 tx_drop += ring->stats.tx_l2l3l4_err; 1750 tx_drop += ring->stats.tx_tso_err; 1751 tx_errors += ring->stats.sw_err_cnt; 1752 tx_errors += ring->stats.tx_vlan_err; 1753 tx_errors += ring->stats.tx_l4_proto_err; 1754 tx_errors += ring->stats.tx_l2l3l4_err; 1755 tx_errors += ring->stats.tx_tso_err; 1756 } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); 1757 1758 /* fetch the rx stats */ 1759 ring = &priv->ring[idx + queue_num]; 1760 do { 1761 start = u64_stats_fetch_begin_irq(&ring->syncp); 1762 rx_bytes += ring->stats.rx_bytes; 1763 rx_pkts += ring->stats.rx_pkts; 1764 rx_drop += ring->stats.l2_err; 1765 rx_errors += ring->stats.l2_err; 1766 rx_errors += ring->stats.l3l4_csum_err; 1767 rx_crc_errors += ring->stats.l2_err; 1768 rx_multicast += ring->stats.rx_multicast; 1769 rx_length_errors += ring->stats.err_pkt_len; 1770 } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); 1771 } 1772 1773 stats->tx_bytes = tx_bytes; 1774 stats->tx_packets = tx_pkts; 1775 stats->rx_bytes = rx_bytes; 1776 stats->rx_packets = rx_pkts; 1777 1778 stats->rx_errors = rx_errors; 1779 stats->multicast = rx_multicast; 1780 stats->rx_length_errors = rx_length_errors; 1781 stats->rx_crc_errors = rx_crc_errors; 1782 stats->rx_missed_errors = netdev->stats.rx_missed_errors; 1783 1784 stats->tx_errors = tx_errors; 1785 stats->rx_dropped = rx_drop; 1786 stats->tx_dropped = tx_drop; 1787 stats->collisions = netdev->stats.collisions; 1788 stats->rx_over_errors = netdev->stats.rx_over_errors; 1789 stats->rx_frame_errors = netdev->stats.rx_frame_errors; 1790 stats->rx_fifo_errors = netdev->stats.rx_fifo_errors; 1791 stats->tx_aborted_errors = netdev->stats.tx_aborted_errors; 1792 stats->tx_carrier_errors = netdev->stats.tx_carrier_errors; 1793 stats->tx_fifo_errors = netdev->stats.tx_fifo_errors; 1794 stats->tx_heartbeat_errors = netdev->stats.tx_heartbeat_errors; 1795 stats->tx_window_errors = netdev->stats.tx_window_errors; 1796 stats->rx_compressed = netdev->stats.rx_compressed; 1797 stats->tx_compressed = netdev->stats.tx_compressed; 1798 } 1799 1800 static int hns3_setup_tc(struct net_device *netdev, void *type_data) 1801 { 1802 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data; 1803 struct hnae3_knic_private_info *kinfo; 1804 u8 tc = mqprio_qopt->qopt.num_tc; 1805 u16 mode = mqprio_qopt->mode; 1806 u8 hw = mqprio_qopt->qopt.hw; 1807 struct hnae3_handle *h; 1808 1809 if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS && 1810 mode == TC_MQPRIO_MODE_CHANNEL) || (!hw && tc == 0))) 1811 return -EOPNOTSUPP; 1812 1813 if (tc > HNAE3_MAX_TC) 1814 return -EINVAL; 1815 1816 if (!netdev) 1817 return -EINVAL; 1818 1819 h = hns3_get_handle(netdev); 1820 kinfo = &h->kinfo; 1821 1822 netif_dbg(h, drv, netdev, "setup tc: num_tc=%u\n", tc); 1823 1824 return (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ? 1825 kinfo->dcb_ops->setup_tc(h, mqprio_qopt) : -EOPNOTSUPP; 1826 } 1827 1828 static int hns3_setup_tc_cls_flower(struct hns3_nic_priv *priv, 1829 struct flow_cls_offload *flow) 1830 { 1831 int tc = tc_classid_to_hwtc(priv->netdev, flow->classid); 1832 struct hnae3_handle *h = hns3_get_handle(priv->netdev); 1833 1834 switch (flow->command) { 1835 case FLOW_CLS_REPLACE: 1836 if (h->ae_algo->ops->add_cls_flower) 1837 return h->ae_algo->ops->add_cls_flower(h, flow, tc); 1838 break; 1839 case FLOW_CLS_DESTROY: 1840 if (h->ae_algo->ops->del_cls_flower) 1841 return h->ae_algo->ops->del_cls_flower(h, flow); 1842 break; 1843 default: 1844 break; 1845 } 1846 1847 return -EOPNOTSUPP; 1848 } 1849 1850 static int hns3_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 1851 void *cb_priv) 1852 { 1853 struct hns3_nic_priv *priv = cb_priv; 1854 1855 if (!tc_cls_can_offload_and_chain0(priv->netdev, type_data)) 1856 return -EOPNOTSUPP; 1857 1858 switch (type) { 1859 case TC_SETUP_CLSFLOWER: 1860 return hns3_setup_tc_cls_flower(priv, type_data); 1861 default: 1862 return -EOPNOTSUPP; 1863 } 1864 } 1865 1866 static LIST_HEAD(hns3_block_cb_list); 1867 1868 static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type, 1869 void *type_data) 1870 { 1871 struct hns3_nic_priv *priv = netdev_priv(dev); 1872 int ret; 1873 1874 switch (type) { 1875 case TC_SETUP_QDISC_MQPRIO: 1876 ret = hns3_setup_tc(dev, type_data); 1877 break; 1878 case TC_SETUP_BLOCK: 1879 ret = flow_block_cb_setup_simple(type_data, 1880 &hns3_block_cb_list, 1881 hns3_setup_tc_block_cb, 1882 priv, priv, true); 1883 break; 1884 default: 1885 return -EOPNOTSUPP; 1886 } 1887 1888 return ret; 1889 } 1890 1891 static int hns3_vlan_rx_add_vid(struct net_device *netdev, 1892 __be16 proto, u16 vid) 1893 { 1894 struct hnae3_handle *h = hns3_get_handle(netdev); 1895 int ret = -EIO; 1896 1897 if (h->ae_algo->ops->set_vlan_filter) 1898 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false); 1899 1900 return ret; 1901 } 1902 1903 static int hns3_vlan_rx_kill_vid(struct net_device *netdev, 1904 __be16 proto, u16 vid) 1905 { 1906 struct hnae3_handle *h = hns3_get_handle(netdev); 1907 int ret = -EIO; 1908 1909 if (h->ae_algo->ops->set_vlan_filter) 1910 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true); 1911 1912 return ret; 1913 } 1914 1915 static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, 1916 u8 qos, __be16 vlan_proto) 1917 { 1918 struct hnae3_handle *h = hns3_get_handle(netdev); 1919 int ret = -EIO; 1920 1921 netif_dbg(h, drv, netdev, 1922 "set vf vlan: vf=%d, vlan=%u, qos=%u, vlan_proto=0x%x\n", 1923 vf, vlan, qos, ntohs(vlan_proto)); 1924 1925 if (h->ae_algo->ops->set_vf_vlan_filter) 1926 ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan, 1927 qos, vlan_proto); 1928 1929 return ret; 1930 } 1931 1932 static int hns3_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable) 1933 { 1934 struct hnae3_handle *handle = hns3_get_handle(netdev); 1935 1936 if (hns3_nic_resetting(netdev)) 1937 return -EBUSY; 1938 1939 if (!handle->ae_algo->ops->set_vf_spoofchk) 1940 return -EOPNOTSUPP; 1941 1942 return handle->ae_algo->ops->set_vf_spoofchk(handle, vf, enable); 1943 } 1944 1945 static int hns3_set_vf_trust(struct net_device *netdev, int vf, bool enable) 1946 { 1947 struct hnae3_handle *handle = hns3_get_handle(netdev); 1948 1949 if (!handle->ae_algo->ops->set_vf_trust) 1950 return -EOPNOTSUPP; 1951 1952 return handle->ae_algo->ops->set_vf_trust(handle, vf, enable); 1953 } 1954 1955 static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu) 1956 { 1957 struct hnae3_handle *h = hns3_get_handle(netdev); 1958 int ret; 1959 1960 if (hns3_nic_resetting(netdev)) 1961 return -EBUSY; 1962 1963 if (!h->ae_algo->ops->set_mtu) 1964 return -EOPNOTSUPP; 1965 1966 netif_dbg(h, drv, netdev, 1967 "change mtu from %u to %d\n", netdev->mtu, new_mtu); 1968 1969 ret = h->ae_algo->ops->set_mtu(h, new_mtu); 1970 if (ret) 1971 netdev_err(netdev, "failed to change MTU in hardware %d\n", 1972 ret); 1973 else 1974 netdev->mtu = new_mtu; 1975 1976 return ret; 1977 } 1978 1979 static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) 1980 { 1981 struct hns3_nic_priv *priv = netdev_priv(ndev); 1982 struct hnae3_handle *h = hns3_get_handle(ndev); 1983 struct hns3_enet_ring *tx_ring; 1984 struct napi_struct *napi; 1985 int timeout_queue = 0; 1986 int hw_head, hw_tail; 1987 int fbd_num, fbd_oft; 1988 int ebd_num, ebd_oft; 1989 int bd_num, bd_err; 1990 int ring_en, tc; 1991 int i; 1992 1993 /* Find the stopped queue the same way the stack does */ 1994 for (i = 0; i < ndev->num_tx_queues; i++) { 1995 struct netdev_queue *q; 1996 unsigned long trans_start; 1997 1998 q = netdev_get_tx_queue(ndev, i); 1999 trans_start = q->trans_start; 2000 if (netif_xmit_stopped(q) && 2001 time_after(jiffies, 2002 (trans_start + ndev->watchdog_timeo))) { 2003 timeout_queue = i; 2004 netdev_info(ndev, "queue state: 0x%lx, delta msecs: %u\n", 2005 q->state, 2006 jiffies_to_msecs(jiffies - trans_start)); 2007 break; 2008 } 2009 } 2010 2011 if (i == ndev->num_tx_queues) { 2012 netdev_info(ndev, 2013 "no netdev TX timeout queue found, timeout count: %llu\n", 2014 priv->tx_timeout_count); 2015 return false; 2016 } 2017 2018 priv->tx_timeout_count++; 2019 2020 tx_ring = &priv->ring[timeout_queue]; 2021 napi = &tx_ring->tqp_vector->napi; 2022 2023 netdev_info(ndev, 2024 "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, napi state: %lu\n", 2025 priv->tx_timeout_count, timeout_queue, tx_ring->next_to_use, 2026 tx_ring->next_to_clean, napi->state); 2027 2028 netdev_info(ndev, 2029 "tx_pkts: %llu, tx_bytes: %llu, sw_err_cnt: %llu, tx_pending: %d\n", 2030 tx_ring->stats.tx_pkts, tx_ring->stats.tx_bytes, 2031 tx_ring->stats.sw_err_cnt, tx_ring->pending_buf); 2032 2033 netdev_info(ndev, 2034 "seg_pkt_cnt: %llu, tx_more: %llu, restart_queue: %llu, tx_busy: %llu\n", 2035 tx_ring->stats.seg_pkt_cnt, tx_ring->stats.tx_more, 2036 tx_ring->stats.restart_queue, tx_ring->stats.tx_busy); 2037 2038 /* When mac received many pause frames continuous, it's unable to send 2039 * packets, which may cause tx timeout 2040 */ 2041 if (h->ae_algo->ops->get_mac_stats) { 2042 struct hns3_mac_stats mac_stats; 2043 2044 h->ae_algo->ops->get_mac_stats(h, &mac_stats); 2045 netdev_info(ndev, "tx_pause_cnt: %llu, rx_pause_cnt: %llu\n", 2046 mac_stats.tx_pause_cnt, mac_stats.rx_pause_cnt); 2047 } 2048 2049 hw_head = readl_relaxed(tx_ring->tqp->io_base + 2050 HNS3_RING_TX_RING_HEAD_REG); 2051 hw_tail = readl_relaxed(tx_ring->tqp->io_base + 2052 HNS3_RING_TX_RING_TAIL_REG); 2053 fbd_num = readl_relaxed(tx_ring->tqp->io_base + 2054 HNS3_RING_TX_RING_FBDNUM_REG); 2055 fbd_oft = readl_relaxed(tx_ring->tqp->io_base + 2056 HNS3_RING_TX_RING_OFFSET_REG); 2057 ebd_num = readl_relaxed(tx_ring->tqp->io_base + 2058 HNS3_RING_TX_RING_EBDNUM_REG); 2059 ebd_oft = readl_relaxed(tx_ring->tqp->io_base + 2060 HNS3_RING_TX_RING_EBD_OFFSET_REG); 2061 bd_num = readl_relaxed(tx_ring->tqp->io_base + 2062 HNS3_RING_TX_RING_BD_NUM_REG); 2063 bd_err = readl_relaxed(tx_ring->tqp->io_base + 2064 HNS3_RING_TX_RING_BD_ERR_REG); 2065 ring_en = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_EN_REG); 2066 tc = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_TX_RING_TC_REG); 2067 2068 netdev_info(ndev, 2069 "BD_NUM: 0x%x HW_HEAD: 0x%x, HW_TAIL: 0x%x, BD_ERR: 0x%x, INT: 0x%x\n", 2070 bd_num, hw_head, hw_tail, bd_err, 2071 readl(tx_ring->tqp_vector->mask_addr)); 2072 netdev_info(ndev, 2073 "RING_EN: 0x%x, TC: 0x%x, FBD_NUM: 0x%x FBD_OFT: 0x%x, EBD_NUM: 0x%x, EBD_OFT: 0x%x\n", 2074 ring_en, tc, fbd_num, fbd_oft, ebd_num, ebd_oft); 2075 2076 return true; 2077 } 2078 2079 static void hns3_nic_net_timeout(struct net_device *ndev, unsigned int txqueue) 2080 { 2081 struct hns3_nic_priv *priv = netdev_priv(ndev); 2082 struct hnae3_handle *h = priv->ae_handle; 2083 2084 if (!hns3_get_tx_timeo_queue_info(ndev)) 2085 return; 2086 2087 /* request the reset, and let the hclge to determine 2088 * which reset level should be done 2089 */ 2090 if (h->ae_algo->ops->reset_event) 2091 h->ae_algo->ops->reset_event(h->pdev, h); 2092 } 2093 2094 #ifdef CONFIG_RFS_ACCEL 2095 static int hns3_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, 2096 u16 rxq_index, u32 flow_id) 2097 { 2098 struct hnae3_handle *h = hns3_get_handle(dev); 2099 struct flow_keys fkeys; 2100 2101 if (!h->ae_algo->ops->add_arfs_entry) 2102 return -EOPNOTSUPP; 2103 2104 if (skb->encapsulation) 2105 return -EPROTONOSUPPORT; 2106 2107 if (!skb_flow_dissect_flow_keys(skb, &fkeys, 0)) 2108 return -EPROTONOSUPPORT; 2109 2110 if ((fkeys.basic.n_proto != htons(ETH_P_IP) && 2111 fkeys.basic.n_proto != htons(ETH_P_IPV6)) || 2112 (fkeys.basic.ip_proto != IPPROTO_TCP && 2113 fkeys.basic.ip_proto != IPPROTO_UDP)) 2114 return -EPROTONOSUPPORT; 2115 2116 return h->ae_algo->ops->add_arfs_entry(h, rxq_index, flow_id, &fkeys); 2117 } 2118 #endif 2119 2120 static int hns3_nic_get_vf_config(struct net_device *ndev, int vf, 2121 struct ifla_vf_info *ivf) 2122 { 2123 struct hnae3_handle *h = hns3_get_handle(ndev); 2124 2125 if (!h->ae_algo->ops->get_vf_config) 2126 return -EOPNOTSUPP; 2127 2128 return h->ae_algo->ops->get_vf_config(h, vf, ivf); 2129 } 2130 2131 static int hns3_nic_set_vf_link_state(struct net_device *ndev, int vf, 2132 int link_state) 2133 { 2134 struct hnae3_handle *h = hns3_get_handle(ndev); 2135 2136 if (!h->ae_algo->ops->set_vf_link_state) 2137 return -EOPNOTSUPP; 2138 2139 return h->ae_algo->ops->set_vf_link_state(h, vf, link_state); 2140 } 2141 2142 static int hns3_nic_set_vf_rate(struct net_device *ndev, int vf, 2143 int min_tx_rate, int max_tx_rate) 2144 { 2145 struct hnae3_handle *h = hns3_get_handle(ndev); 2146 2147 if (!h->ae_algo->ops->set_vf_rate) 2148 return -EOPNOTSUPP; 2149 2150 return h->ae_algo->ops->set_vf_rate(h, vf, min_tx_rate, max_tx_rate, 2151 false); 2152 } 2153 2154 static int hns3_nic_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) 2155 { 2156 struct hnae3_handle *h = hns3_get_handle(netdev); 2157 2158 if (!h->ae_algo->ops->set_vf_mac) 2159 return -EOPNOTSUPP; 2160 2161 if (is_multicast_ether_addr(mac)) { 2162 netdev_err(netdev, 2163 "Invalid MAC:%pM specified. Could not set MAC\n", 2164 mac); 2165 return -EINVAL; 2166 } 2167 2168 return h->ae_algo->ops->set_vf_mac(h, vf_id, mac); 2169 } 2170 2171 static const struct net_device_ops hns3_nic_netdev_ops = { 2172 .ndo_open = hns3_nic_net_open, 2173 .ndo_stop = hns3_nic_net_stop, 2174 .ndo_start_xmit = hns3_nic_net_xmit, 2175 .ndo_tx_timeout = hns3_nic_net_timeout, 2176 .ndo_set_mac_address = hns3_nic_net_set_mac_address, 2177 .ndo_do_ioctl = hns3_nic_do_ioctl, 2178 .ndo_change_mtu = hns3_nic_change_mtu, 2179 .ndo_set_features = hns3_nic_set_features, 2180 .ndo_features_check = hns3_features_check, 2181 .ndo_get_stats64 = hns3_nic_get_stats64, 2182 .ndo_setup_tc = hns3_nic_setup_tc, 2183 .ndo_set_rx_mode = hns3_nic_set_rx_mode, 2184 .ndo_vlan_rx_add_vid = hns3_vlan_rx_add_vid, 2185 .ndo_vlan_rx_kill_vid = hns3_vlan_rx_kill_vid, 2186 .ndo_set_vf_vlan = hns3_ndo_set_vf_vlan, 2187 .ndo_set_vf_spoofchk = hns3_set_vf_spoofchk, 2188 .ndo_set_vf_trust = hns3_set_vf_trust, 2189 #ifdef CONFIG_RFS_ACCEL 2190 .ndo_rx_flow_steer = hns3_rx_flow_steer, 2191 #endif 2192 .ndo_get_vf_config = hns3_nic_get_vf_config, 2193 .ndo_set_vf_link_state = hns3_nic_set_vf_link_state, 2194 .ndo_set_vf_rate = hns3_nic_set_vf_rate, 2195 .ndo_set_vf_mac = hns3_nic_set_vf_mac, 2196 }; 2197 2198 bool hns3_is_phys_func(struct pci_dev *pdev) 2199 { 2200 u32 dev_id = pdev->device; 2201 2202 switch (dev_id) { 2203 case HNAE3_DEV_ID_GE: 2204 case HNAE3_DEV_ID_25GE: 2205 case HNAE3_DEV_ID_25GE_RDMA: 2206 case HNAE3_DEV_ID_25GE_RDMA_MACSEC: 2207 case HNAE3_DEV_ID_50GE_RDMA: 2208 case HNAE3_DEV_ID_50GE_RDMA_MACSEC: 2209 case HNAE3_DEV_ID_100G_RDMA_MACSEC: 2210 case HNAE3_DEV_ID_200G_RDMA: 2211 return true; 2212 case HNAE3_DEV_ID_VF: 2213 case HNAE3_DEV_ID_RDMA_DCB_PFC_VF: 2214 return false; 2215 default: 2216 dev_warn(&pdev->dev, "un-recognized pci device-id %u", 2217 dev_id); 2218 } 2219 2220 return false; 2221 } 2222 2223 static void hns3_disable_sriov(struct pci_dev *pdev) 2224 { 2225 /* If our VFs are assigned we cannot shut down SR-IOV 2226 * without causing issues, so just leave the hardware 2227 * available but disabled 2228 */ 2229 if (pci_vfs_assigned(pdev)) { 2230 dev_warn(&pdev->dev, 2231 "disabling driver while VFs are assigned\n"); 2232 return; 2233 } 2234 2235 pci_disable_sriov(pdev); 2236 } 2237 2238 /* hns3_probe - Device initialization routine 2239 * @pdev: PCI device information struct 2240 * @ent: entry in hns3_pci_tbl 2241 * 2242 * hns3_probe initializes a PF identified by a pci_dev structure. 2243 * The OS initialization, configuring of the PF private structure, 2244 * and a hardware reset occur. 2245 * 2246 * Returns 0 on success, negative on failure 2247 */ 2248 static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 2249 { 2250 struct hnae3_ae_dev *ae_dev; 2251 int ret; 2252 2253 ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev), GFP_KERNEL); 2254 if (!ae_dev) 2255 return -ENOMEM; 2256 2257 ae_dev->pdev = pdev; 2258 ae_dev->flag = ent->driver_data; 2259 pci_set_drvdata(pdev, ae_dev); 2260 2261 ret = hnae3_register_ae_dev(ae_dev); 2262 if (ret) 2263 pci_set_drvdata(pdev, NULL); 2264 2265 return ret; 2266 } 2267 2268 /* hns3_remove - Device removal routine 2269 * @pdev: PCI device information struct 2270 */ 2271 static void hns3_remove(struct pci_dev *pdev) 2272 { 2273 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 2274 2275 if (hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV)) 2276 hns3_disable_sriov(pdev); 2277 2278 hnae3_unregister_ae_dev(ae_dev); 2279 pci_set_drvdata(pdev, NULL); 2280 } 2281 2282 /** 2283 * hns3_pci_sriov_configure 2284 * @pdev: pointer to a pci_dev structure 2285 * @num_vfs: number of VFs to allocate 2286 * 2287 * Enable or change the number of VFs. Called when the user updates the number 2288 * of VFs in sysfs. 2289 **/ 2290 static int hns3_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) 2291 { 2292 int ret; 2293 2294 if (!(hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))) { 2295 dev_warn(&pdev->dev, "Can not config SRIOV\n"); 2296 return -EINVAL; 2297 } 2298 2299 if (num_vfs) { 2300 ret = pci_enable_sriov(pdev, num_vfs); 2301 if (ret) 2302 dev_err(&pdev->dev, "SRIOV enable failed %d\n", ret); 2303 else 2304 return num_vfs; 2305 } else if (!pci_vfs_assigned(pdev)) { 2306 pci_disable_sriov(pdev); 2307 } else { 2308 dev_warn(&pdev->dev, 2309 "Unable to free VFs because some are assigned to VMs.\n"); 2310 } 2311 2312 return 0; 2313 } 2314 2315 static void hns3_shutdown(struct pci_dev *pdev) 2316 { 2317 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 2318 2319 hnae3_unregister_ae_dev(ae_dev); 2320 pci_set_drvdata(pdev, NULL); 2321 2322 if (system_state == SYSTEM_POWER_OFF) 2323 pci_set_power_state(pdev, PCI_D3hot); 2324 } 2325 2326 static pci_ers_result_t hns3_error_detected(struct pci_dev *pdev, 2327 pci_channel_state_t state) 2328 { 2329 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 2330 pci_ers_result_t ret; 2331 2332 dev_info(&pdev->dev, "PCI error detected, state(=%u)!!\n", state); 2333 2334 if (state == pci_channel_io_perm_failure) 2335 return PCI_ERS_RESULT_DISCONNECT; 2336 2337 if (!ae_dev || !ae_dev->ops) { 2338 dev_err(&pdev->dev, 2339 "Can't recover - error happened before device initialized\n"); 2340 return PCI_ERS_RESULT_NONE; 2341 } 2342 2343 if (ae_dev->ops->handle_hw_ras_error) 2344 ret = ae_dev->ops->handle_hw_ras_error(ae_dev); 2345 else 2346 return PCI_ERS_RESULT_NONE; 2347 2348 return ret; 2349 } 2350 2351 static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev) 2352 { 2353 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 2354 const struct hnae3_ae_ops *ops; 2355 enum hnae3_reset_type reset_type; 2356 struct device *dev = &pdev->dev; 2357 2358 if (!ae_dev || !ae_dev->ops) 2359 return PCI_ERS_RESULT_NONE; 2360 2361 ops = ae_dev->ops; 2362 /* request the reset */ 2363 if (ops->reset_event && ops->get_reset_level && 2364 ops->set_default_reset_request) { 2365 if (ae_dev->hw_err_reset_req) { 2366 reset_type = ops->get_reset_level(ae_dev, 2367 &ae_dev->hw_err_reset_req); 2368 ops->set_default_reset_request(ae_dev, reset_type); 2369 dev_info(dev, "requesting reset due to PCI error\n"); 2370 ops->reset_event(pdev, NULL); 2371 } 2372 2373 return PCI_ERS_RESULT_RECOVERED; 2374 } 2375 2376 return PCI_ERS_RESULT_DISCONNECT; 2377 } 2378 2379 static void hns3_reset_prepare(struct pci_dev *pdev) 2380 { 2381 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 2382 2383 dev_info(&pdev->dev, "FLR prepare\n"); 2384 if (ae_dev && ae_dev->ops && ae_dev->ops->flr_prepare) 2385 ae_dev->ops->flr_prepare(ae_dev); 2386 } 2387 2388 static void hns3_reset_done(struct pci_dev *pdev) 2389 { 2390 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 2391 2392 dev_info(&pdev->dev, "FLR done\n"); 2393 if (ae_dev && ae_dev->ops && ae_dev->ops->flr_done) 2394 ae_dev->ops->flr_done(ae_dev); 2395 } 2396 2397 static const struct pci_error_handlers hns3_err_handler = { 2398 .error_detected = hns3_error_detected, 2399 .slot_reset = hns3_slot_reset, 2400 .reset_prepare = hns3_reset_prepare, 2401 .reset_done = hns3_reset_done, 2402 }; 2403 2404 static struct pci_driver hns3_driver = { 2405 .name = hns3_driver_name, 2406 .id_table = hns3_pci_tbl, 2407 .probe = hns3_probe, 2408 .remove = hns3_remove, 2409 .shutdown = hns3_shutdown, 2410 .sriov_configure = hns3_pci_sriov_configure, 2411 .err_handler = &hns3_err_handler, 2412 }; 2413 2414 /* set default feature to hns3 */ 2415 static void hns3_set_default_feature(struct net_device *netdev) 2416 { 2417 struct hnae3_handle *h = hns3_get_handle(netdev); 2418 struct pci_dev *pdev = h->pdev; 2419 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 2420 2421 netdev->priv_flags |= IFF_UNICAST_FLT; 2422 2423 netdev->hw_enc_features |= NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | 2424 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | 2425 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | 2426 NETIF_F_SCTP_CRC | NETIF_F_TSO_MANGLEID | NETIF_F_FRAGLIST; 2427 2428 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; 2429 2430 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | 2431 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | 2432 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | 2433 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | 2434 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | 2435 NETIF_F_SCTP_CRC | NETIF_F_FRAGLIST; 2436 2437 netdev->vlan_features |= NETIF_F_RXCSUM | 2438 NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO | 2439 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | 2440 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | 2441 NETIF_F_SCTP_CRC | NETIF_F_FRAGLIST; 2442 2443 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | 2444 NETIF_F_HW_VLAN_CTAG_RX | 2445 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | 2446 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | 2447 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | 2448 NETIF_F_SCTP_CRC | NETIF_F_FRAGLIST; 2449 2450 if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 2451 netdev->hw_features |= NETIF_F_GRO_HW; 2452 netdev->features |= NETIF_F_GRO_HW; 2453 2454 if (!(h->flags & HNAE3_SUPPORT_VF)) { 2455 netdev->hw_features |= NETIF_F_NTUPLE; 2456 netdev->features |= NETIF_F_NTUPLE; 2457 } 2458 } 2459 2460 if (test_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, ae_dev->caps)) { 2461 netdev->hw_features |= NETIF_F_GSO_UDP_L4; 2462 netdev->features |= NETIF_F_GSO_UDP_L4; 2463 netdev->vlan_features |= NETIF_F_GSO_UDP_L4; 2464 netdev->hw_enc_features |= NETIF_F_GSO_UDP_L4; 2465 } 2466 2467 if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps)) { 2468 netdev->hw_features |= NETIF_F_HW_CSUM; 2469 netdev->features |= NETIF_F_HW_CSUM; 2470 netdev->vlan_features |= NETIF_F_HW_CSUM; 2471 netdev->hw_enc_features |= NETIF_F_HW_CSUM; 2472 } else { 2473 netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 2474 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 2475 netdev->vlan_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 2476 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 2477 } 2478 2479 if (test_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, ae_dev->caps)) { 2480 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; 2481 netdev->features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; 2482 netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; 2483 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; 2484 } 2485 2486 if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) { 2487 netdev->hw_features |= NETIF_F_HW_TC; 2488 netdev->features |= NETIF_F_HW_TC; 2489 } 2490 } 2491 2492 static int hns3_alloc_buffer(struct hns3_enet_ring *ring, 2493 struct hns3_desc_cb *cb) 2494 { 2495 unsigned int order = hns3_page_order(ring); 2496 struct page *p; 2497 2498 p = dev_alloc_pages(order); 2499 if (!p) 2500 return -ENOMEM; 2501 2502 cb->priv = p; 2503 cb->page_offset = 0; 2504 cb->reuse_flag = 0; 2505 cb->buf = page_address(p); 2506 cb->length = hns3_page_size(ring); 2507 cb->type = DESC_TYPE_PAGE; 2508 page_ref_add(p, USHRT_MAX - 1); 2509 cb->pagecnt_bias = USHRT_MAX; 2510 2511 return 0; 2512 } 2513 2514 static void hns3_free_buffer(struct hns3_enet_ring *ring, 2515 struct hns3_desc_cb *cb, int budget) 2516 { 2517 if (cb->type == DESC_TYPE_SKB) 2518 napi_consume_skb(cb->priv, budget); 2519 else if (!HNAE3_IS_TX_RING(ring) && cb->pagecnt_bias) 2520 __page_frag_cache_drain(cb->priv, cb->pagecnt_bias); 2521 memset(cb, 0, sizeof(*cb)); 2522 } 2523 2524 static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb) 2525 { 2526 cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0, 2527 cb->length, ring_to_dma_dir(ring)); 2528 2529 if (unlikely(dma_mapping_error(ring_to_dev(ring), cb->dma))) 2530 return -EIO; 2531 2532 return 0; 2533 } 2534 2535 static void hns3_unmap_buffer(struct hns3_enet_ring *ring, 2536 struct hns3_desc_cb *cb) 2537 { 2538 if (cb->type == DESC_TYPE_SKB || cb->type == DESC_TYPE_FRAGLIST_SKB) 2539 dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length, 2540 ring_to_dma_dir(ring)); 2541 else if (cb->length) 2542 dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length, 2543 ring_to_dma_dir(ring)); 2544 } 2545 2546 static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i) 2547 { 2548 hns3_unmap_buffer(ring, &ring->desc_cb[i]); 2549 ring->desc[i].addr = 0; 2550 } 2551 2552 static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i, 2553 int budget) 2554 { 2555 struct hns3_desc_cb *cb = &ring->desc_cb[i]; 2556 2557 if (!ring->desc_cb[i].dma) 2558 return; 2559 2560 hns3_buffer_detach(ring, i); 2561 hns3_free_buffer(ring, cb, budget); 2562 } 2563 2564 static void hns3_free_buffers(struct hns3_enet_ring *ring) 2565 { 2566 int i; 2567 2568 for (i = 0; i < ring->desc_num; i++) 2569 hns3_free_buffer_detach(ring, i, 0); 2570 } 2571 2572 /* free desc along with its attached buffer */ 2573 static void hns3_free_desc(struct hns3_enet_ring *ring) 2574 { 2575 int size = ring->desc_num * sizeof(ring->desc[0]); 2576 2577 hns3_free_buffers(ring); 2578 2579 if (ring->desc) { 2580 dma_free_coherent(ring_to_dev(ring), size, 2581 ring->desc, ring->desc_dma_addr); 2582 ring->desc = NULL; 2583 } 2584 } 2585 2586 static int hns3_alloc_desc(struct hns3_enet_ring *ring) 2587 { 2588 int size = ring->desc_num * sizeof(ring->desc[0]); 2589 2590 ring->desc = dma_alloc_coherent(ring_to_dev(ring), size, 2591 &ring->desc_dma_addr, GFP_KERNEL); 2592 if (!ring->desc) 2593 return -ENOMEM; 2594 2595 return 0; 2596 } 2597 2598 static int hns3_alloc_and_map_buffer(struct hns3_enet_ring *ring, 2599 struct hns3_desc_cb *cb) 2600 { 2601 int ret; 2602 2603 ret = hns3_alloc_buffer(ring, cb); 2604 if (ret) 2605 goto out; 2606 2607 ret = hns3_map_buffer(ring, cb); 2608 if (ret) 2609 goto out_with_buf; 2610 2611 return 0; 2612 2613 out_with_buf: 2614 hns3_free_buffer(ring, cb, 0); 2615 out: 2616 return ret; 2617 } 2618 2619 static int hns3_alloc_and_attach_buffer(struct hns3_enet_ring *ring, int i) 2620 { 2621 int ret = hns3_alloc_and_map_buffer(ring, &ring->desc_cb[i]); 2622 2623 if (ret) 2624 return ret; 2625 2626 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma); 2627 2628 return 0; 2629 } 2630 2631 /* Allocate memory for raw pkg, and map with dma */ 2632 static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring) 2633 { 2634 int i, j, ret; 2635 2636 for (i = 0; i < ring->desc_num; i++) { 2637 ret = hns3_alloc_and_attach_buffer(ring, i); 2638 if (ret) 2639 goto out_buffer_fail; 2640 } 2641 2642 return 0; 2643 2644 out_buffer_fail: 2645 for (j = i - 1; j >= 0; j--) 2646 hns3_free_buffer_detach(ring, j, 0); 2647 return ret; 2648 } 2649 2650 /* detach a in-used buffer and replace with a reserved one */ 2651 static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i, 2652 struct hns3_desc_cb *res_cb) 2653 { 2654 hns3_unmap_buffer(ring, &ring->desc_cb[i]); 2655 ring->desc_cb[i] = *res_cb; 2656 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma); 2657 ring->desc[i].rx.bd_base_info = 0; 2658 } 2659 2660 static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i) 2661 { 2662 ring->desc_cb[i].reuse_flag = 0; 2663 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma + 2664 ring->desc_cb[i].page_offset); 2665 ring->desc[i].rx.bd_base_info = 0; 2666 2667 dma_sync_single_for_device(ring_to_dev(ring), 2668 ring->desc_cb[i].dma + ring->desc_cb[i].page_offset, 2669 hns3_buf_size(ring), 2670 DMA_FROM_DEVICE); 2671 } 2672 2673 static bool hns3_nic_reclaim_desc(struct hns3_enet_ring *ring, 2674 int *bytes, int *pkts, int budget) 2675 { 2676 /* pair with ring->last_to_use update in hns3_tx_doorbell(), 2677 * smp_store_release() is not used in hns3_tx_doorbell() because 2678 * the doorbell operation already have the needed barrier operation. 2679 */ 2680 int ltu = smp_load_acquire(&ring->last_to_use); 2681 int ntc = ring->next_to_clean; 2682 struct hns3_desc_cb *desc_cb; 2683 bool reclaimed = false; 2684 struct hns3_desc *desc; 2685 2686 while (ltu != ntc) { 2687 desc = &ring->desc[ntc]; 2688 2689 if (le16_to_cpu(desc->tx.bdtp_fe_sc_vld_ra_ri) & 2690 BIT(HNS3_TXD_VLD_B)) 2691 break; 2692 2693 desc_cb = &ring->desc_cb[ntc]; 2694 (*pkts) += (desc_cb->type == DESC_TYPE_SKB); 2695 (*bytes) += desc_cb->length; 2696 /* desc_cb will be cleaned, after hnae3_free_buffer_detach */ 2697 hns3_free_buffer_detach(ring, ntc, budget); 2698 2699 if (++ntc == ring->desc_num) 2700 ntc = 0; 2701 2702 /* Issue prefetch for next Tx descriptor */ 2703 prefetch(&ring->desc_cb[ntc]); 2704 reclaimed = true; 2705 } 2706 2707 if (unlikely(!reclaimed)) 2708 return false; 2709 2710 /* This smp_store_release() pairs with smp_load_acquire() in 2711 * ring_space called by hns3_nic_net_xmit. 2712 */ 2713 smp_store_release(&ring->next_to_clean, ntc); 2714 return true; 2715 } 2716 2717 void hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget) 2718 { 2719 struct net_device *netdev = ring_to_netdev(ring); 2720 struct hns3_nic_priv *priv = netdev_priv(netdev); 2721 struct netdev_queue *dev_queue; 2722 int bytes, pkts; 2723 2724 bytes = 0; 2725 pkts = 0; 2726 2727 if (unlikely(!hns3_nic_reclaim_desc(ring, &bytes, &pkts, budget))) 2728 return; 2729 2730 ring->tqp_vector->tx_group.total_bytes += bytes; 2731 ring->tqp_vector->tx_group.total_packets += pkts; 2732 2733 u64_stats_update_begin(&ring->syncp); 2734 ring->stats.tx_bytes += bytes; 2735 ring->stats.tx_pkts += pkts; 2736 u64_stats_update_end(&ring->syncp); 2737 2738 dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index); 2739 netdev_tx_completed_queue(dev_queue, pkts, bytes); 2740 2741 if (unlikely(netif_carrier_ok(netdev) && 2742 ring_space(ring) > HNS3_MAX_TSO_BD_NUM)) { 2743 /* Make sure that anybody stopping the queue after this 2744 * sees the new next_to_clean. 2745 */ 2746 smp_mb(); 2747 if (netif_tx_queue_stopped(dev_queue) && 2748 !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) { 2749 netif_tx_wake_queue(dev_queue); 2750 ring->stats.restart_queue++; 2751 } 2752 } 2753 } 2754 2755 static int hns3_desc_unused(struct hns3_enet_ring *ring) 2756 { 2757 int ntc = ring->next_to_clean; 2758 int ntu = ring->next_to_use; 2759 2760 return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu; 2761 } 2762 2763 static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, 2764 int cleand_count) 2765 { 2766 struct hns3_desc_cb *desc_cb; 2767 struct hns3_desc_cb res_cbs; 2768 int i, ret; 2769 2770 for (i = 0; i < cleand_count; i++) { 2771 desc_cb = &ring->desc_cb[ring->next_to_use]; 2772 if (desc_cb->reuse_flag) { 2773 u64_stats_update_begin(&ring->syncp); 2774 ring->stats.reuse_pg_cnt++; 2775 u64_stats_update_end(&ring->syncp); 2776 2777 hns3_reuse_buffer(ring, ring->next_to_use); 2778 } else { 2779 ret = hns3_alloc_and_map_buffer(ring, &res_cbs); 2780 if (ret) { 2781 u64_stats_update_begin(&ring->syncp); 2782 ring->stats.sw_err_cnt++; 2783 u64_stats_update_end(&ring->syncp); 2784 2785 hns3_rl_err(ring_to_netdev(ring), 2786 "alloc rx buffer failed: %d\n", 2787 ret); 2788 break; 2789 } 2790 hns3_replace_buffer(ring, ring->next_to_use, &res_cbs); 2791 2792 u64_stats_update_begin(&ring->syncp); 2793 ring->stats.non_reuse_pg++; 2794 u64_stats_update_end(&ring->syncp); 2795 } 2796 2797 ring_ptr_move_fw(ring, next_to_use); 2798 } 2799 2800 writel(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG); 2801 } 2802 2803 static bool hns3_can_reuse_page(struct hns3_desc_cb *cb) 2804 { 2805 return (page_count(cb->priv) - cb->pagecnt_bias) == 1; 2806 } 2807 2808 static void hns3_nic_reuse_page(struct sk_buff *skb, int i, 2809 struct hns3_enet_ring *ring, int pull_len, 2810 struct hns3_desc_cb *desc_cb) 2811 { 2812 struct hns3_desc *desc = &ring->desc[ring->next_to_clean]; 2813 int size = le16_to_cpu(desc->rx.size); 2814 u32 truesize = hns3_buf_size(ring); 2815 2816 desc_cb->pagecnt_bias--; 2817 skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len, 2818 size - pull_len, truesize); 2819 2820 /* Avoid re-using remote and pfmemalloc pages, or the stack is still 2821 * using the page when page_offset rollback to zero, flag default 2822 * unreuse 2823 */ 2824 if (!dev_page_is_reusable(desc_cb->priv) || 2825 (!desc_cb->page_offset && !hns3_can_reuse_page(desc_cb))) { 2826 __page_frag_cache_drain(desc_cb->priv, desc_cb->pagecnt_bias); 2827 return; 2828 } 2829 2830 /* Move offset up to the next cache line */ 2831 desc_cb->page_offset += truesize; 2832 2833 if (desc_cb->page_offset + truesize <= hns3_page_size(ring)) { 2834 desc_cb->reuse_flag = 1; 2835 } else if (hns3_can_reuse_page(desc_cb)) { 2836 desc_cb->reuse_flag = 1; 2837 desc_cb->page_offset = 0; 2838 } else if (desc_cb->pagecnt_bias) { 2839 __page_frag_cache_drain(desc_cb->priv, desc_cb->pagecnt_bias); 2840 return; 2841 } 2842 2843 if (unlikely(!desc_cb->pagecnt_bias)) { 2844 page_ref_add(desc_cb->priv, USHRT_MAX); 2845 desc_cb->pagecnt_bias = USHRT_MAX; 2846 } 2847 } 2848 2849 static int hns3_gro_complete(struct sk_buff *skb, u32 l234info) 2850 { 2851 __be16 type = skb->protocol; 2852 struct tcphdr *th; 2853 int depth = 0; 2854 2855 while (eth_type_vlan(type)) { 2856 struct vlan_hdr *vh; 2857 2858 if ((depth + VLAN_HLEN) > skb_headlen(skb)) 2859 return -EFAULT; 2860 2861 vh = (struct vlan_hdr *)(skb->data + depth); 2862 type = vh->h_vlan_encapsulated_proto; 2863 depth += VLAN_HLEN; 2864 } 2865 2866 skb_set_network_header(skb, depth); 2867 2868 if (type == htons(ETH_P_IP)) { 2869 const struct iphdr *iph = ip_hdr(skb); 2870 2871 depth += sizeof(struct iphdr); 2872 skb_set_transport_header(skb, depth); 2873 th = tcp_hdr(skb); 2874 th->check = ~tcp_v4_check(skb->len - depth, iph->saddr, 2875 iph->daddr, 0); 2876 } else if (type == htons(ETH_P_IPV6)) { 2877 const struct ipv6hdr *iph = ipv6_hdr(skb); 2878 2879 depth += sizeof(struct ipv6hdr); 2880 skb_set_transport_header(skb, depth); 2881 th = tcp_hdr(skb); 2882 th->check = ~tcp_v6_check(skb->len - depth, &iph->saddr, 2883 &iph->daddr, 0); 2884 } else { 2885 hns3_rl_err(skb->dev, 2886 "Error: FW GRO supports only IPv4/IPv6, not 0x%04x, depth: %d\n", 2887 be16_to_cpu(type), depth); 2888 return -EFAULT; 2889 } 2890 2891 skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count; 2892 if (th->cwr) 2893 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; 2894 2895 if (l234info & BIT(HNS3_RXD_GRO_FIXID_B)) 2896 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID; 2897 2898 skb->csum_start = (unsigned char *)th - skb->head; 2899 skb->csum_offset = offsetof(struct tcphdr, check); 2900 skb->ip_summed = CHECKSUM_PARTIAL; 2901 2902 trace_hns3_gro(skb); 2903 2904 return 0; 2905 } 2906 2907 static void hns3_checksum_complete(struct hns3_enet_ring *ring, 2908 struct sk_buff *skb, u32 l234info) 2909 { 2910 u32 lo, hi; 2911 2912 u64_stats_update_begin(&ring->syncp); 2913 ring->stats.csum_complete++; 2914 u64_stats_update_end(&ring->syncp); 2915 skb->ip_summed = CHECKSUM_COMPLETE; 2916 lo = hnae3_get_field(l234info, HNS3_RXD_L2_CSUM_L_M, 2917 HNS3_RXD_L2_CSUM_L_S); 2918 hi = hnae3_get_field(l234info, HNS3_RXD_L2_CSUM_H_M, 2919 HNS3_RXD_L2_CSUM_H_S); 2920 skb->csum = csum_unfold((__force __sum16)(lo | hi << 8)); 2921 } 2922 2923 static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, 2924 u32 l234info, u32 bd_base_info, u32 ol_info) 2925 { 2926 struct net_device *netdev = ring_to_netdev(ring); 2927 int l3_type, l4_type; 2928 int ol4_type; 2929 2930 skb->ip_summed = CHECKSUM_NONE; 2931 2932 skb_checksum_none_assert(skb); 2933 2934 if (!(netdev->features & NETIF_F_RXCSUM)) 2935 return; 2936 2937 if (l234info & BIT(HNS3_RXD_L2_CSUM_B)) { 2938 hns3_checksum_complete(ring, skb, l234info); 2939 return; 2940 } 2941 2942 /* check if hardware has done checksum */ 2943 if (!(bd_base_info & BIT(HNS3_RXD_L3L4P_B))) 2944 return; 2945 2946 if (unlikely(l234info & (BIT(HNS3_RXD_L3E_B) | BIT(HNS3_RXD_L4E_B) | 2947 BIT(HNS3_RXD_OL3E_B) | 2948 BIT(HNS3_RXD_OL4E_B)))) { 2949 u64_stats_update_begin(&ring->syncp); 2950 ring->stats.l3l4_csum_err++; 2951 u64_stats_update_end(&ring->syncp); 2952 2953 return; 2954 } 2955 2956 ol4_type = hnae3_get_field(ol_info, HNS3_RXD_OL4ID_M, 2957 HNS3_RXD_OL4ID_S); 2958 switch (ol4_type) { 2959 case HNS3_OL4_TYPE_MAC_IN_UDP: 2960 case HNS3_OL4_TYPE_NVGRE: 2961 skb->csum_level = 1; 2962 fallthrough; 2963 case HNS3_OL4_TYPE_NO_TUN: 2964 l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, 2965 HNS3_RXD_L3ID_S); 2966 l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M, 2967 HNS3_RXD_L4ID_S); 2968 2969 /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */ 2970 if ((l3_type == HNS3_L3_TYPE_IPV4 || 2971 l3_type == HNS3_L3_TYPE_IPV6) && 2972 (l4_type == HNS3_L4_TYPE_UDP || 2973 l4_type == HNS3_L4_TYPE_TCP || 2974 l4_type == HNS3_L4_TYPE_SCTP)) 2975 skb->ip_summed = CHECKSUM_UNNECESSARY; 2976 break; 2977 default: 2978 break; 2979 } 2980 } 2981 2982 static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb) 2983 { 2984 if (skb_has_frag_list(skb)) 2985 napi_gro_flush(&ring->tqp_vector->napi, false); 2986 2987 napi_gro_receive(&ring->tqp_vector->napi, skb); 2988 } 2989 2990 static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring, 2991 struct hns3_desc *desc, u32 l234info, 2992 u16 *vlan_tag) 2993 { 2994 struct hnae3_handle *handle = ring->tqp->handle; 2995 struct pci_dev *pdev = ring->tqp->handle->pdev; 2996 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 2997 2998 if (unlikely(ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)) { 2999 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); 3000 if (!(*vlan_tag & VLAN_VID_MASK)) 3001 *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); 3002 3003 return (*vlan_tag != 0); 3004 } 3005 3006 #define HNS3_STRP_OUTER_VLAN 0x1 3007 #define HNS3_STRP_INNER_VLAN 0x2 3008 #define HNS3_STRP_BOTH 0x3 3009 3010 /* Hardware always insert VLAN tag into RX descriptor when 3011 * remove the tag from packet, driver needs to determine 3012 * reporting which tag to stack. 3013 */ 3014 switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M, 3015 HNS3_RXD_STRP_TAGP_S)) { 3016 case HNS3_STRP_OUTER_VLAN: 3017 if (handle->port_base_vlan_state != 3018 HNAE3_PORT_BASE_VLAN_DISABLE) 3019 return false; 3020 3021 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); 3022 return true; 3023 case HNS3_STRP_INNER_VLAN: 3024 if (handle->port_base_vlan_state != 3025 HNAE3_PORT_BASE_VLAN_DISABLE) 3026 return false; 3027 3028 *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); 3029 return true; 3030 case HNS3_STRP_BOTH: 3031 if (handle->port_base_vlan_state == 3032 HNAE3_PORT_BASE_VLAN_DISABLE) 3033 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); 3034 else 3035 *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); 3036 3037 return true; 3038 default: 3039 return false; 3040 } 3041 } 3042 3043 static void hns3_rx_ring_move_fw(struct hns3_enet_ring *ring) 3044 { 3045 ring->desc[ring->next_to_clean].rx.bd_base_info &= 3046 cpu_to_le32(~BIT(HNS3_RXD_VLD_B)); 3047 ring->next_to_clean += 1; 3048 3049 if (unlikely(ring->next_to_clean == ring->desc_num)) 3050 ring->next_to_clean = 0; 3051 } 3052 3053 static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length, 3054 unsigned char *va) 3055 { 3056 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean]; 3057 struct net_device *netdev = ring_to_netdev(ring); 3058 struct sk_buff *skb; 3059 3060 ring->skb = napi_alloc_skb(&ring->tqp_vector->napi, HNS3_RX_HEAD_SIZE); 3061 skb = ring->skb; 3062 if (unlikely(!skb)) { 3063 hns3_rl_err(netdev, "alloc rx skb fail\n"); 3064 3065 u64_stats_update_begin(&ring->syncp); 3066 ring->stats.sw_err_cnt++; 3067 u64_stats_update_end(&ring->syncp); 3068 3069 return -ENOMEM; 3070 } 3071 3072 trace_hns3_rx_desc(ring); 3073 prefetchw(skb->data); 3074 3075 ring->pending_buf = 1; 3076 ring->frag_num = 0; 3077 ring->tail_skb = NULL; 3078 if (length <= HNS3_RX_HEAD_SIZE) { 3079 memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long))); 3080 3081 /* We can reuse buffer as-is, just make sure it is reusable */ 3082 if (dev_page_is_reusable(desc_cb->priv)) 3083 desc_cb->reuse_flag = 1; 3084 else /* This page cannot be reused so discard it */ 3085 __page_frag_cache_drain(desc_cb->priv, 3086 desc_cb->pagecnt_bias); 3087 3088 hns3_rx_ring_move_fw(ring); 3089 return 0; 3090 } 3091 u64_stats_update_begin(&ring->syncp); 3092 ring->stats.seg_pkt_cnt++; 3093 u64_stats_update_end(&ring->syncp); 3094 3095 ring->pull_len = eth_get_headlen(netdev, va, HNS3_RX_HEAD_SIZE); 3096 __skb_put(skb, ring->pull_len); 3097 hns3_nic_reuse_page(skb, ring->frag_num++, ring, ring->pull_len, 3098 desc_cb); 3099 hns3_rx_ring_move_fw(ring); 3100 3101 return 0; 3102 } 3103 3104 static int hns3_add_frag(struct hns3_enet_ring *ring) 3105 { 3106 struct sk_buff *skb = ring->skb; 3107 struct sk_buff *head_skb = skb; 3108 struct sk_buff *new_skb; 3109 struct hns3_desc_cb *desc_cb; 3110 struct hns3_desc *desc; 3111 u32 bd_base_info; 3112 3113 do { 3114 desc = &ring->desc[ring->next_to_clean]; 3115 desc_cb = &ring->desc_cb[ring->next_to_clean]; 3116 bd_base_info = le32_to_cpu(desc->rx.bd_base_info); 3117 /* make sure HW write desc complete */ 3118 dma_rmb(); 3119 if (!(bd_base_info & BIT(HNS3_RXD_VLD_B))) 3120 return -ENXIO; 3121 3122 if (unlikely(ring->frag_num >= MAX_SKB_FRAGS)) { 3123 new_skb = napi_alloc_skb(&ring->tqp_vector->napi, 0); 3124 if (unlikely(!new_skb)) { 3125 hns3_rl_err(ring_to_netdev(ring), 3126 "alloc rx fraglist skb fail\n"); 3127 return -ENXIO; 3128 } 3129 ring->frag_num = 0; 3130 3131 if (ring->tail_skb) { 3132 ring->tail_skb->next = new_skb; 3133 ring->tail_skb = new_skb; 3134 } else { 3135 skb_shinfo(skb)->frag_list = new_skb; 3136 ring->tail_skb = new_skb; 3137 } 3138 } 3139 3140 if (ring->tail_skb) { 3141 head_skb->truesize += hns3_buf_size(ring); 3142 head_skb->data_len += le16_to_cpu(desc->rx.size); 3143 head_skb->len += le16_to_cpu(desc->rx.size); 3144 skb = ring->tail_skb; 3145 } 3146 3147 dma_sync_single_for_cpu(ring_to_dev(ring), 3148 desc_cb->dma + desc_cb->page_offset, 3149 hns3_buf_size(ring), 3150 DMA_FROM_DEVICE); 3151 3152 hns3_nic_reuse_page(skb, ring->frag_num++, ring, 0, desc_cb); 3153 trace_hns3_rx_desc(ring); 3154 hns3_rx_ring_move_fw(ring); 3155 ring->pending_buf++; 3156 } while (!(bd_base_info & BIT(HNS3_RXD_FE_B))); 3157 3158 return 0; 3159 } 3160 3161 static int hns3_set_gro_and_checksum(struct hns3_enet_ring *ring, 3162 struct sk_buff *skb, u32 l234info, 3163 u32 bd_base_info, u32 ol_info) 3164 { 3165 u32 l3_type; 3166 3167 skb_shinfo(skb)->gso_size = hnae3_get_field(bd_base_info, 3168 HNS3_RXD_GRO_SIZE_M, 3169 HNS3_RXD_GRO_SIZE_S); 3170 /* if there is no HW GRO, do not set gro params */ 3171 if (!skb_shinfo(skb)->gso_size) { 3172 hns3_rx_checksum(ring, skb, l234info, bd_base_info, ol_info); 3173 return 0; 3174 } 3175 3176 NAPI_GRO_CB(skb)->count = hnae3_get_field(l234info, 3177 HNS3_RXD_GRO_COUNT_M, 3178 HNS3_RXD_GRO_COUNT_S); 3179 3180 l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S); 3181 if (l3_type == HNS3_L3_TYPE_IPV4) 3182 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; 3183 else if (l3_type == HNS3_L3_TYPE_IPV6) 3184 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; 3185 else 3186 return -EFAULT; 3187 3188 return hns3_gro_complete(skb, l234info); 3189 } 3190 3191 static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring, 3192 struct sk_buff *skb, u32 rss_hash) 3193 { 3194 struct hnae3_handle *handle = ring->tqp->handle; 3195 enum pkt_hash_types rss_type; 3196 3197 if (rss_hash) 3198 rss_type = handle->kinfo.rss_type; 3199 else 3200 rss_type = PKT_HASH_TYPE_NONE; 3201 3202 skb_set_hash(skb, rss_hash, rss_type); 3203 } 3204 3205 static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb) 3206 { 3207 struct net_device *netdev = ring_to_netdev(ring); 3208 enum hns3_pkt_l2t_type l2_frame_type; 3209 u32 bd_base_info, l234info, ol_info; 3210 struct hns3_desc *desc; 3211 unsigned int len; 3212 int pre_ntc, ret; 3213 3214 /* bdinfo handled below is only valid on the last BD of the 3215 * current packet, and ring->next_to_clean indicates the first 3216 * descriptor of next packet, so need - 1 below. 3217 */ 3218 pre_ntc = ring->next_to_clean ? (ring->next_to_clean - 1) : 3219 (ring->desc_num - 1); 3220 desc = &ring->desc[pre_ntc]; 3221 bd_base_info = le32_to_cpu(desc->rx.bd_base_info); 3222 l234info = le32_to_cpu(desc->rx.l234_info); 3223 ol_info = le32_to_cpu(desc->rx.ol_info); 3224 3225 /* Based on hw strategy, the tag offloaded will be stored at 3226 * ot_vlan_tag in two layer tag case, and stored at vlan_tag 3227 * in one layer tag case. 3228 */ 3229 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) { 3230 u16 vlan_tag; 3231 3232 if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag)) 3233 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 3234 vlan_tag); 3235 } 3236 3237 if (unlikely(!desc->rx.pkt_len || (l234info & (BIT(HNS3_RXD_TRUNCAT_B) | 3238 BIT(HNS3_RXD_L2E_B))))) { 3239 u64_stats_update_begin(&ring->syncp); 3240 if (l234info & BIT(HNS3_RXD_L2E_B)) 3241 ring->stats.l2_err++; 3242 else 3243 ring->stats.err_pkt_len++; 3244 u64_stats_update_end(&ring->syncp); 3245 3246 return -EFAULT; 3247 } 3248 3249 len = skb->len; 3250 3251 /* Do update ip stack process */ 3252 skb->protocol = eth_type_trans(skb, netdev); 3253 3254 /* This is needed in order to enable forwarding support */ 3255 ret = hns3_set_gro_and_checksum(ring, skb, l234info, 3256 bd_base_info, ol_info); 3257 if (unlikely(ret)) { 3258 u64_stats_update_begin(&ring->syncp); 3259 ring->stats.rx_err_cnt++; 3260 u64_stats_update_end(&ring->syncp); 3261 return ret; 3262 } 3263 3264 l2_frame_type = hnae3_get_field(l234info, HNS3_RXD_DMAC_M, 3265 HNS3_RXD_DMAC_S); 3266 3267 u64_stats_update_begin(&ring->syncp); 3268 ring->stats.rx_pkts++; 3269 ring->stats.rx_bytes += len; 3270 3271 if (l2_frame_type == HNS3_L2_TYPE_MULTICAST) 3272 ring->stats.rx_multicast++; 3273 3274 u64_stats_update_end(&ring->syncp); 3275 3276 ring->tqp_vector->rx_group.total_bytes += len; 3277 3278 hns3_set_rx_skb_rss_type(ring, skb, le32_to_cpu(desc->rx.rss_hash)); 3279 return 0; 3280 } 3281 3282 static int hns3_handle_rx_bd(struct hns3_enet_ring *ring) 3283 { 3284 struct sk_buff *skb = ring->skb; 3285 struct hns3_desc_cb *desc_cb; 3286 struct hns3_desc *desc; 3287 unsigned int length; 3288 u32 bd_base_info; 3289 int ret; 3290 3291 desc = &ring->desc[ring->next_to_clean]; 3292 desc_cb = &ring->desc_cb[ring->next_to_clean]; 3293 3294 prefetch(desc); 3295 3296 if (!skb) { 3297 bd_base_info = le32_to_cpu(desc->rx.bd_base_info); 3298 3299 /* Check valid BD */ 3300 if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B)))) 3301 return -ENXIO; 3302 3303 dma_rmb(); 3304 length = le16_to_cpu(desc->rx.size); 3305 3306 ring->va = desc_cb->buf + desc_cb->page_offset; 3307 3308 dma_sync_single_for_cpu(ring_to_dev(ring), 3309 desc_cb->dma + desc_cb->page_offset, 3310 hns3_buf_size(ring), 3311 DMA_FROM_DEVICE); 3312 3313 /* Prefetch first cache line of first page. 3314 * Idea is to cache few bytes of the header of the packet. 3315 * Our L1 Cache line size is 64B so need to prefetch twice to make 3316 * it 128B. But in actual we can have greater size of caches with 3317 * 128B Level 1 cache lines. In such a case, single fetch would 3318 * suffice to cache in the relevant part of the header. 3319 */ 3320 net_prefetch(ring->va); 3321 3322 ret = hns3_alloc_skb(ring, length, ring->va); 3323 skb = ring->skb; 3324 3325 if (ret < 0) /* alloc buffer fail */ 3326 return ret; 3327 if (!(bd_base_info & BIT(HNS3_RXD_FE_B))) { /* need add frag */ 3328 ret = hns3_add_frag(ring); 3329 if (ret) 3330 return ret; 3331 } 3332 } else { 3333 ret = hns3_add_frag(ring); 3334 if (ret) 3335 return ret; 3336 } 3337 3338 /* As the head data may be changed when GRO enable, copy 3339 * the head data in after other data rx completed 3340 */ 3341 if (skb->len > HNS3_RX_HEAD_SIZE) 3342 memcpy(skb->data, ring->va, 3343 ALIGN(ring->pull_len, sizeof(long))); 3344 3345 ret = hns3_handle_bdinfo(ring, skb); 3346 if (unlikely(ret)) { 3347 dev_kfree_skb_any(skb); 3348 return ret; 3349 } 3350 3351 skb_record_rx_queue(skb, ring->tqp->tqp_index); 3352 return 0; 3353 } 3354 3355 int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget, 3356 void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *)) 3357 { 3358 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16 3359 int unused_count = hns3_desc_unused(ring); 3360 int recv_pkts = 0; 3361 int err; 3362 3363 unused_count -= ring->pending_buf; 3364 3365 while (recv_pkts < budget) { 3366 /* Reuse or realloc buffers */ 3367 if (unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) { 3368 hns3_nic_alloc_rx_buffers(ring, unused_count); 3369 unused_count = hns3_desc_unused(ring) - 3370 ring->pending_buf; 3371 } 3372 3373 /* Poll one pkt */ 3374 err = hns3_handle_rx_bd(ring); 3375 /* Do not get FE for the packet or failed to alloc skb */ 3376 if (unlikely(!ring->skb || err == -ENXIO)) { 3377 goto out; 3378 } else if (likely(!err)) { 3379 rx_fn(ring, ring->skb); 3380 recv_pkts++; 3381 } 3382 3383 unused_count += ring->pending_buf; 3384 ring->skb = NULL; 3385 ring->pending_buf = 0; 3386 } 3387 3388 out: 3389 /* Make all data has been write before submit */ 3390 if (unused_count > 0) 3391 hns3_nic_alloc_rx_buffers(ring, unused_count); 3392 3393 return recv_pkts; 3394 } 3395 3396 static bool hns3_get_new_flow_lvl(struct hns3_enet_ring_group *ring_group) 3397 { 3398 #define HNS3_RX_LOW_BYTE_RATE 10000 3399 #define HNS3_RX_MID_BYTE_RATE 20000 3400 #define HNS3_RX_ULTRA_PACKET_RATE 40 3401 3402 enum hns3_flow_level_range new_flow_level; 3403 struct hns3_enet_tqp_vector *tqp_vector; 3404 int packets_per_msecs, bytes_per_msecs; 3405 u32 time_passed_ms; 3406 3407 tqp_vector = ring_group->ring->tqp_vector; 3408 time_passed_ms = 3409 jiffies_to_msecs(jiffies - tqp_vector->last_jiffies); 3410 if (!time_passed_ms) 3411 return false; 3412 3413 do_div(ring_group->total_packets, time_passed_ms); 3414 packets_per_msecs = ring_group->total_packets; 3415 3416 do_div(ring_group->total_bytes, time_passed_ms); 3417 bytes_per_msecs = ring_group->total_bytes; 3418 3419 new_flow_level = ring_group->coal.flow_level; 3420 3421 /* Simple throttlerate management 3422 * 0-10MB/s lower (50000 ints/s) 3423 * 10-20MB/s middle (20000 ints/s) 3424 * 20-1249MB/s high (18000 ints/s) 3425 * > 40000pps ultra (8000 ints/s) 3426 */ 3427 switch (new_flow_level) { 3428 case HNS3_FLOW_LOW: 3429 if (bytes_per_msecs > HNS3_RX_LOW_BYTE_RATE) 3430 new_flow_level = HNS3_FLOW_MID; 3431 break; 3432 case HNS3_FLOW_MID: 3433 if (bytes_per_msecs > HNS3_RX_MID_BYTE_RATE) 3434 new_flow_level = HNS3_FLOW_HIGH; 3435 else if (bytes_per_msecs <= HNS3_RX_LOW_BYTE_RATE) 3436 new_flow_level = HNS3_FLOW_LOW; 3437 break; 3438 case HNS3_FLOW_HIGH: 3439 case HNS3_FLOW_ULTRA: 3440 default: 3441 if (bytes_per_msecs <= HNS3_RX_MID_BYTE_RATE) 3442 new_flow_level = HNS3_FLOW_MID; 3443 break; 3444 } 3445 3446 if (packets_per_msecs > HNS3_RX_ULTRA_PACKET_RATE && 3447 &tqp_vector->rx_group == ring_group) 3448 new_flow_level = HNS3_FLOW_ULTRA; 3449 3450 ring_group->total_bytes = 0; 3451 ring_group->total_packets = 0; 3452 ring_group->coal.flow_level = new_flow_level; 3453 3454 return true; 3455 } 3456 3457 static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group) 3458 { 3459 struct hns3_enet_tqp_vector *tqp_vector; 3460 u16 new_int_gl; 3461 3462 if (!ring_group->ring) 3463 return false; 3464 3465 tqp_vector = ring_group->ring->tqp_vector; 3466 if (!tqp_vector->last_jiffies) 3467 return false; 3468 3469 if (ring_group->total_packets == 0) { 3470 ring_group->coal.int_gl = HNS3_INT_GL_50K; 3471 ring_group->coal.flow_level = HNS3_FLOW_LOW; 3472 return true; 3473 } 3474 3475 if (!hns3_get_new_flow_lvl(ring_group)) 3476 return false; 3477 3478 new_int_gl = ring_group->coal.int_gl; 3479 switch (ring_group->coal.flow_level) { 3480 case HNS3_FLOW_LOW: 3481 new_int_gl = HNS3_INT_GL_50K; 3482 break; 3483 case HNS3_FLOW_MID: 3484 new_int_gl = HNS3_INT_GL_20K; 3485 break; 3486 case HNS3_FLOW_HIGH: 3487 new_int_gl = HNS3_INT_GL_18K; 3488 break; 3489 case HNS3_FLOW_ULTRA: 3490 new_int_gl = HNS3_INT_GL_8K; 3491 break; 3492 default: 3493 break; 3494 } 3495 3496 if (new_int_gl != ring_group->coal.int_gl) { 3497 ring_group->coal.int_gl = new_int_gl; 3498 return true; 3499 } 3500 return false; 3501 } 3502 3503 static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector) 3504 { 3505 struct hns3_enet_ring_group *rx_group = &tqp_vector->rx_group; 3506 struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group; 3507 bool rx_update, tx_update; 3508 3509 /* update param every 1000ms */ 3510 if (time_before(jiffies, 3511 tqp_vector->last_jiffies + msecs_to_jiffies(1000))) 3512 return; 3513 3514 if (rx_group->coal.adapt_enable) { 3515 rx_update = hns3_get_new_int_gl(rx_group); 3516 if (rx_update) 3517 hns3_set_vector_coalesce_rx_gl(tqp_vector, 3518 rx_group->coal.int_gl); 3519 } 3520 3521 if (tx_group->coal.adapt_enable) { 3522 tx_update = hns3_get_new_int_gl(tx_group); 3523 if (tx_update) 3524 hns3_set_vector_coalesce_tx_gl(tqp_vector, 3525 tx_group->coal.int_gl); 3526 } 3527 3528 tqp_vector->last_jiffies = jiffies; 3529 } 3530 3531 static int hns3_nic_common_poll(struct napi_struct *napi, int budget) 3532 { 3533 struct hns3_nic_priv *priv = netdev_priv(napi->dev); 3534 struct hns3_enet_ring *ring; 3535 int rx_pkt_total = 0; 3536 3537 struct hns3_enet_tqp_vector *tqp_vector = 3538 container_of(napi, struct hns3_enet_tqp_vector, napi); 3539 bool clean_complete = true; 3540 int rx_budget = budget; 3541 3542 if (unlikely(test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) { 3543 napi_complete(napi); 3544 return 0; 3545 } 3546 3547 /* Since the actual Tx work is minimal, we can give the Tx a larger 3548 * budget and be more aggressive about cleaning up the Tx descriptors. 3549 */ 3550 hns3_for_each_ring(ring, tqp_vector->tx_group) 3551 hns3_clean_tx_ring(ring, budget); 3552 3553 /* make sure rx ring budget not smaller than 1 */ 3554 if (tqp_vector->num_tqps > 1) 3555 rx_budget = max(budget / tqp_vector->num_tqps, 1); 3556 3557 hns3_for_each_ring(ring, tqp_vector->rx_group) { 3558 int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget, 3559 hns3_rx_skb); 3560 3561 if (rx_cleaned >= rx_budget) 3562 clean_complete = false; 3563 3564 rx_pkt_total += rx_cleaned; 3565 } 3566 3567 tqp_vector->rx_group.total_packets += rx_pkt_total; 3568 3569 if (!clean_complete) 3570 return budget; 3571 3572 if (napi_complete(napi) && 3573 likely(!test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) { 3574 hns3_update_new_int_gl(tqp_vector); 3575 hns3_mask_vector_irq(tqp_vector, 1); 3576 } 3577 3578 return rx_pkt_total; 3579 } 3580 3581 static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, 3582 struct hnae3_ring_chain_node *head) 3583 { 3584 struct pci_dev *pdev = tqp_vector->handle->pdev; 3585 struct hnae3_ring_chain_node *cur_chain = head; 3586 struct hnae3_ring_chain_node *chain; 3587 struct hns3_enet_ring *tx_ring; 3588 struct hns3_enet_ring *rx_ring; 3589 3590 tx_ring = tqp_vector->tx_group.ring; 3591 if (tx_ring) { 3592 cur_chain->tqp_index = tx_ring->tqp->tqp_index; 3593 hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, 3594 HNAE3_RING_TYPE_TX); 3595 hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, 3596 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX); 3597 3598 cur_chain->next = NULL; 3599 3600 while (tx_ring->next) { 3601 tx_ring = tx_ring->next; 3602 3603 chain = devm_kzalloc(&pdev->dev, sizeof(*chain), 3604 GFP_KERNEL); 3605 if (!chain) 3606 goto err_free_chain; 3607 3608 cur_chain->next = chain; 3609 chain->tqp_index = tx_ring->tqp->tqp_index; 3610 hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B, 3611 HNAE3_RING_TYPE_TX); 3612 hnae3_set_field(chain->int_gl_idx, 3613 HNAE3_RING_GL_IDX_M, 3614 HNAE3_RING_GL_IDX_S, 3615 HNAE3_RING_GL_TX); 3616 3617 cur_chain = chain; 3618 } 3619 } 3620 3621 rx_ring = tqp_vector->rx_group.ring; 3622 if (!tx_ring && rx_ring) { 3623 cur_chain->next = NULL; 3624 cur_chain->tqp_index = rx_ring->tqp->tqp_index; 3625 hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, 3626 HNAE3_RING_TYPE_RX); 3627 hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, 3628 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX); 3629 3630 rx_ring = rx_ring->next; 3631 } 3632 3633 while (rx_ring) { 3634 chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL); 3635 if (!chain) 3636 goto err_free_chain; 3637 3638 cur_chain->next = chain; 3639 chain->tqp_index = rx_ring->tqp->tqp_index; 3640 hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B, 3641 HNAE3_RING_TYPE_RX); 3642 hnae3_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M, 3643 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX); 3644 3645 cur_chain = chain; 3646 3647 rx_ring = rx_ring->next; 3648 } 3649 3650 return 0; 3651 3652 err_free_chain: 3653 cur_chain = head->next; 3654 while (cur_chain) { 3655 chain = cur_chain->next; 3656 devm_kfree(&pdev->dev, cur_chain); 3657 cur_chain = chain; 3658 } 3659 head->next = NULL; 3660 3661 return -ENOMEM; 3662 } 3663 3664 static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, 3665 struct hnae3_ring_chain_node *head) 3666 { 3667 struct pci_dev *pdev = tqp_vector->handle->pdev; 3668 struct hnae3_ring_chain_node *chain_tmp, *chain; 3669 3670 chain = head->next; 3671 3672 while (chain) { 3673 chain_tmp = chain->next; 3674 devm_kfree(&pdev->dev, chain); 3675 chain = chain_tmp; 3676 } 3677 } 3678 3679 static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group, 3680 struct hns3_enet_ring *ring) 3681 { 3682 ring->next = group->ring; 3683 group->ring = ring; 3684 3685 group->count++; 3686 } 3687 3688 static void hns3_nic_set_cpumask(struct hns3_nic_priv *priv) 3689 { 3690 struct pci_dev *pdev = priv->ae_handle->pdev; 3691 struct hns3_enet_tqp_vector *tqp_vector; 3692 int num_vectors = priv->vector_num; 3693 int numa_node; 3694 int vector_i; 3695 3696 numa_node = dev_to_node(&pdev->dev); 3697 3698 for (vector_i = 0; vector_i < num_vectors; vector_i++) { 3699 tqp_vector = &priv->tqp_vector[vector_i]; 3700 cpumask_set_cpu(cpumask_local_spread(vector_i, numa_node), 3701 &tqp_vector->affinity_mask); 3702 } 3703 } 3704 3705 static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv) 3706 { 3707 struct hnae3_ring_chain_node vector_ring_chain; 3708 struct hnae3_handle *h = priv->ae_handle; 3709 struct hns3_enet_tqp_vector *tqp_vector; 3710 int ret; 3711 int i; 3712 3713 hns3_nic_set_cpumask(priv); 3714 3715 for (i = 0; i < priv->vector_num; i++) { 3716 tqp_vector = &priv->tqp_vector[i]; 3717 hns3_vector_coalesce_init_hw(tqp_vector, priv); 3718 tqp_vector->num_tqps = 0; 3719 } 3720 3721 for (i = 0; i < h->kinfo.num_tqps; i++) { 3722 u16 vector_i = i % priv->vector_num; 3723 u16 tqp_num = h->kinfo.num_tqps; 3724 3725 tqp_vector = &priv->tqp_vector[vector_i]; 3726 3727 hns3_add_ring_to_group(&tqp_vector->tx_group, 3728 &priv->ring[i]); 3729 3730 hns3_add_ring_to_group(&tqp_vector->rx_group, 3731 &priv->ring[i + tqp_num]); 3732 3733 priv->ring[i].tqp_vector = tqp_vector; 3734 priv->ring[i + tqp_num].tqp_vector = tqp_vector; 3735 tqp_vector->num_tqps++; 3736 } 3737 3738 for (i = 0; i < priv->vector_num; i++) { 3739 tqp_vector = &priv->tqp_vector[i]; 3740 3741 tqp_vector->rx_group.total_bytes = 0; 3742 tqp_vector->rx_group.total_packets = 0; 3743 tqp_vector->tx_group.total_bytes = 0; 3744 tqp_vector->tx_group.total_packets = 0; 3745 tqp_vector->handle = h; 3746 3747 ret = hns3_get_vector_ring_chain(tqp_vector, 3748 &vector_ring_chain); 3749 if (ret) 3750 goto map_ring_fail; 3751 3752 ret = h->ae_algo->ops->map_ring_to_vector(h, 3753 tqp_vector->vector_irq, &vector_ring_chain); 3754 3755 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); 3756 3757 if (ret) 3758 goto map_ring_fail; 3759 3760 netif_napi_add(priv->netdev, &tqp_vector->napi, 3761 hns3_nic_common_poll, NAPI_POLL_WEIGHT); 3762 } 3763 3764 return 0; 3765 3766 map_ring_fail: 3767 while (i--) 3768 netif_napi_del(&priv->tqp_vector[i].napi); 3769 3770 return ret; 3771 } 3772 3773 static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv) 3774 { 3775 struct hnae3_handle *h = priv->ae_handle; 3776 struct hns3_enet_tqp_vector *tqp_vector; 3777 struct hnae3_vector_info *vector; 3778 struct pci_dev *pdev = h->pdev; 3779 u16 tqp_num = h->kinfo.num_tqps; 3780 u16 vector_num; 3781 int ret = 0; 3782 u16 i; 3783 3784 /* RSS size, cpu online and vector_num should be the same */ 3785 /* Should consider 2p/4p later */ 3786 vector_num = min_t(u16, num_online_cpus(), tqp_num); 3787 3788 vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector), 3789 GFP_KERNEL); 3790 if (!vector) 3791 return -ENOMEM; 3792 3793 /* save the actual available vector number */ 3794 vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector); 3795 3796 priv->vector_num = vector_num; 3797 priv->tqp_vector = (struct hns3_enet_tqp_vector *) 3798 devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector), 3799 GFP_KERNEL); 3800 if (!priv->tqp_vector) { 3801 ret = -ENOMEM; 3802 goto out; 3803 } 3804 3805 for (i = 0; i < priv->vector_num; i++) { 3806 tqp_vector = &priv->tqp_vector[i]; 3807 tqp_vector->idx = i; 3808 tqp_vector->mask_addr = vector[i].io_addr; 3809 tqp_vector->vector_irq = vector[i].vector; 3810 hns3_vector_coalesce_init(tqp_vector, priv); 3811 } 3812 3813 out: 3814 devm_kfree(&pdev->dev, vector); 3815 return ret; 3816 } 3817 3818 static void hns3_clear_ring_group(struct hns3_enet_ring_group *group) 3819 { 3820 group->ring = NULL; 3821 group->count = 0; 3822 } 3823 3824 static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv) 3825 { 3826 struct hnae3_ring_chain_node vector_ring_chain; 3827 struct hnae3_handle *h = priv->ae_handle; 3828 struct hns3_enet_tqp_vector *tqp_vector; 3829 int i; 3830 3831 for (i = 0; i < priv->vector_num; i++) { 3832 tqp_vector = &priv->tqp_vector[i]; 3833 3834 if (!tqp_vector->rx_group.ring && !tqp_vector->tx_group.ring) 3835 continue; 3836 3837 /* Since the mapping can be overwritten, when fail to get the 3838 * chain between vector and ring, we should go on to deal with 3839 * the remaining options. 3840 */ 3841 if (hns3_get_vector_ring_chain(tqp_vector, &vector_ring_chain)) 3842 dev_warn(priv->dev, "failed to get ring chain\n"); 3843 3844 h->ae_algo->ops->unmap_ring_from_vector(h, 3845 tqp_vector->vector_irq, &vector_ring_chain); 3846 3847 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); 3848 3849 hns3_clear_ring_group(&tqp_vector->rx_group); 3850 hns3_clear_ring_group(&tqp_vector->tx_group); 3851 netif_napi_del(&priv->tqp_vector[i].napi); 3852 } 3853 } 3854 3855 static void hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv) 3856 { 3857 struct hnae3_handle *h = priv->ae_handle; 3858 struct pci_dev *pdev = h->pdev; 3859 int i, ret; 3860 3861 for (i = 0; i < priv->vector_num; i++) { 3862 struct hns3_enet_tqp_vector *tqp_vector; 3863 3864 tqp_vector = &priv->tqp_vector[i]; 3865 ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq); 3866 if (ret) 3867 return; 3868 } 3869 3870 devm_kfree(&pdev->dev, priv->tqp_vector); 3871 } 3872 3873 static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv, 3874 unsigned int ring_type) 3875 { 3876 int queue_num = priv->ae_handle->kinfo.num_tqps; 3877 struct hns3_enet_ring *ring; 3878 int desc_num; 3879 3880 if (ring_type == HNAE3_RING_TYPE_TX) { 3881 ring = &priv->ring[q->tqp_index]; 3882 desc_num = priv->ae_handle->kinfo.num_tx_desc; 3883 ring->queue_index = q->tqp_index; 3884 } else { 3885 ring = &priv->ring[q->tqp_index + queue_num]; 3886 desc_num = priv->ae_handle->kinfo.num_rx_desc; 3887 ring->queue_index = q->tqp_index; 3888 } 3889 3890 hnae3_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type); 3891 3892 ring->tqp = q; 3893 ring->desc = NULL; 3894 ring->desc_cb = NULL; 3895 ring->dev = priv->dev; 3896 ring->desc_dma_addr = 0; 3897 ring->buf_size = q->buf_size; 3898 ring->desc_num = desc_num; 3899 ring->next_to_use = 0; 3900 ring->next_to_clean = 0; 3901 ring->last_to_use = 0; 3902 } 3903 3904 static void hns3_queue_to_ring(struct hnae3_queue *tqp, 3905 struct hns3_nic_priv *priv) 3906 { 3907 hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX); 3908 hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX); 3909 } 3910 3911 static int hns3_get_ring_config(struct hns3_nic_priv *priv) 3912 { 3913 struct hnae3_handle *h = priv->ae_handle; 3914 struct pci_dev *pdev = h->pdev; 3915 int i; 3916 3917 priv->ring = devm_kzalloc(&pdev->dev, 3918 array3_size(h->kinfo.num_tqps, 3919 sizeof(*priv->ring), 2), 3920 GFP_KERNEL); 3921 if (!priv->ring) 3922 return -ENOMEM; 3923 3924 for (i = 0; i < h->kinfo.num_tqps; i++) 3925 hns3_queue_to_ring(h->kinfo.tqp[i], priv); 3926 3927 return 0; 3928 } 3929 3930 static void hns3_put_ring_config(struct hns3_nic_priv *priv) 3931 { 3932 if (!priv->ring) 3933 return; 3934 3935 devm_kfree(priv->dev, priv->ring); 3936 priv->ring = NULL; 3937 } 3938 3939 static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring) 3940 { 3941 int ret; 3942 3943 if (ring->desc_num <= 0 || ring->buf_size <= 0) 3944 return -EINVAL; 3945 3946 ring->desc_cb = devm_kcalloc(ring_to_dev(ring), ring->desc_num, 3947 sizeof(ring->desc_cb[0]), GFP_KERNEL); 3948 if (!ring->desc_cb) { 3949 ret = -ENOMEM; 3950 goto out; 3951 } 3952 3953 ret = hns3_alloc_desc(ring); 3954 if (ret) 3955 goto out_with_desc_cb; 3956 3957 if (!HNAE3_IS_TX_RING(ring)) { 3958 ret = hns3_alloc_ring_buffers(ring); 3959 if (ret) 3960 goto out_with_desc; 3961 } 3962 3963 return 0; 3964 3965 out_with_desc: 3966 hns3_free_desc(ring); 3967 out_with_desc_cb: 3968 devm_kfree(ring_to_dev(ring), ring->desc_cb); 3969 ring->desc_cb = NULL; 3970 out: 3971 return ret; 3972 } 3973 3974 void hns3_fini_ring(struct hns3_enet_ring *ring) 3975 { 3976 hns3_free_desc(ring); 3977 devm_kfree(ring_to_dev(ring), ring->desc_cb); 3978 ring->desc_cb = NULL; 3979 ring->next_to_clean = 0; 3980 ring->next_to_use = 0; 3981 ring->last_to_use = 0; 3982 ring->pending_buf = 0; 3983 if (ring->skb) { 3984 dev_kfree_skb_any(ring->skb); 3985 ring->skb = NULL; 3986 } 3987 } 3988 3989 static int hns3_buf_size2type(u32 buf_size) 3990 { 3991 int bd_size_type; 3992 3993 switch (buf_size) { 3994 case 512: 3995 bd_size_type = HNS3_BD_SIZE_512_TYPE; 3996 break; 3997 case 1024: 3998 bd_size_type = HNS3_BD_SIZE_1024_TYPE; 3999 break; 4000 case 2048: 4001 bd_size_type = HNS3_BD_SIZE_2048_TYPE; 4002 break; 4003 case 4096: 4004 bd_size_type = HNS3_BD_SIZE_4096_TYPE; 4005 break; 4006 default: 4007 bd_size_type = HNS3_BD_SIZE_2048_TYPE; 4008 } 4009 4010 return bd_size_type; 4011 } 4012 4013 static void hns3_init_ring_hw(struct hns3_enet_ring *ring) 4014 { 4015 dma_addr_t dma = ring->desc_dma_addr; 4016 struct hnae3_queue *q = ring->tqp; 4017 4018 if (!HNAE3_IS_TX_RING(ring)) { 4019 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG, (u32)dma); 4020 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG, 4021 (u32)((dma >> 31) >> 1)); 4022 4023 hns3_write_dev(q, HNS3_RING_RX_RING_BD_LEN_REG, 4024 hns3_buf_size2type(ring->buf_size)); 4025 hns3_write_dev(q, HNS3_RING_RX_RING_BD_NUM_REG, 4026 ring->desc_num / 8 - 1); 4027 4028 } else { 4029 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_L_REG, 4030 (u32)dma); 4031 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG, 4032 (u32)((dma >> 31) >> 1)); 4033 4034 hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG, 4035 ring->desc_num / 8 - 1); 4036 } 4037 } 4038 4039 static void hns3_init_tx_ring_tc(struct hns3_nic_priv *priv) 4040 { 4041 struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo; 4042 struct hnae3_tc_info *tc_info = &kinfo->tc_info; 4043 int i; 4044 4045 for (i = 0; i < HNAE3_MAX_TC; i++) { 4046 int j; 4047 4048 if (!test_bit(i, &tc_info->tc_en)) 4049 continue; 4050 4051 for (j = 0; j < tc_info->tqp_count[i]; j++) { 4052 struct hnae3_queue *q; 4053 4054 q = priv->ring[tc_info->tqp_offset[i] + j].tqp; 4055 hns3_write_dev(q, HNS3_RING_TX_RING_TC_REG, i); 4056 } 4057 } 4058 } 4059 4060 int hns3_init_all_ring(struct hns3_nic_priv *priv) 4061 { 4062 struct hnae3_handle *h = priv->ae_handle; 4063 int ring_num = h->kinfo.num_tqps * 2; 4064 int i, j; 4065 int ret; 4066 4067 for (i = 0; i < ring_num; i++) { 4068 ret = hns3_alloc_ring_memory(&priv->ring[i]); 4069 if (ret) { 4070 dev_err(priv->dev, 4071 "Alloc ring memory fail! ret=%d\n", ret); 4072 goto out_when_alloc_ring_memory; 4073 } 4074 4075 u64_stats_init(&priv->ring[i].syncp); 4076 } 4077 4078 return 0; 4079 4080 out_when_alloc_ring_memory: 4081 for (j = i - 1; j >= 0; j--) 4082 hns3_fini_ring(&priv->ring[j]); 4083 4084 return -ENOMEM; 4085 } 4086 4087 static void hns3_uninit_all_ring(struct hns3_nic_priv *priv) 4088 { 4089 struct hnae3_handle *h = priv->ae_handle; 4090 int i; 4091 4092 for (i = 0; i < h->kinfo.num_tqps; i++) { 4093 hns3_fini_ring(&priv->ring[i]); 4094 hns3_fini_ring(&priv->ring[i + h->kinfo.num_tqps]); 4095 } 4096 } 4097 4098 /* Set mac addr if it is configured. or leave it to the AE driver */ 4099 static int hns3_init_mac_addr(struct net_device *netdev) 4100 { 4101 struct hns3_nic_priv *priv = netdev_priv(netdev); 4102 struct hnae3_handle *h = priv->ae_handle; 4103 u8 mac_addr_temp[ETH_ALEN]; 4104 int ret = 0; 4105 4106 if (h->ae_algo->ops->get_mac_addr) 4107 h->ae_algo->ops->get_mac_addr(h, mac_addr_temp); 4108 4109 /* Check if the MAC address is valid, if not get a random one */ 4110 if (!is_valid_ether_addr(mac_addr_temp)) { 4111 eth_hw_addr_random(netdev); 4112 dev_warn(priv->dev, "using random MAC address %pM\n", 4113 netdev->dev_addr); 4114 } else if (!ether_addr_equal(netdev->dev_addr, mac_addr_temp)) { 4115 ether_addr_copy(netdev->dev_addr, mac_addr_temp); 4116 ether_addr_copy(netdev->perm_addr, mac_addr_temp); 4117 } else { 4118 return 0; 4119 } 4120 4121 if (h->ae_algo->ops->set_mac_addr) 4122 ret = h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr, true); 4123 4124 return ret; 4125 } 4126 4127 static int hns3_init_phy(struct net_device *netdev) 4128 { 4129 struct hnae3_handle *h = hns3_get_handle(netdev); 4130 int ret = 0; 4131 4132 if (h->ae_algo->ops->mac_connect_phy) 4133 ret = h->ae_algo->ops->mac_connect_phy(h); 4134 4135 return ret; 4136 } 4137 4138 static void hns3_uninit_phy(struct net_device *netdev) 4139 { 4140 struct hnae3_handle *h = hns3_get_handle(netdev); 4141 4142 if (h->ae_algo->ops->mac_disconnect_phy) 4143 h->ae_algo->ops->mac_disconnect_phy(h); 4144 } 4145 4146 static void hns3_del_all_fd_rules(struct net_device *netdev, bool clear_list) 4147 { 4148 struct hnae3_handle *h = hns3_get_handle(netdev); 4149 4150 if (h->ae_algo->ops->del_all_fd_entries) 4151 h->ae_algo->ops->del_all_fd_entries(h, clear_list); 4152 } 4153 4154 static int hns3_client_start(struct hnae3_handle *handle) 4155 { 4156 if (!handle->ae_algo->ops->client_start) 4157 return 0; 4158 4159 return handle->ae_algo->ops->client_start(handle); 4160 } 4161 4162 static void hns3_client_stop(struct hnae3_handle *handle) 4163 { 4164 if (!handle->ae_algo->ops->client_stop) 4165 return; 4166 4167 handle->ae_algo->ops->client_stop(handle); 4168 } 4169 4170 static void hns3_info_show(struct hns3_nic_priv *priv) 4171 { 4172 struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo; 4173 4174 dev_info(priv->dev, "MAC address: %pM\n", priv->netdev->dev_addr); 4175 dev_info(priv->dev, "Task queue pairs numbers: %u\n", kinfo->num_tqps); 4176 dev_info(priv->dev, "RSS size: %u\n", kinfo->rss_size); 4177 dev_info(priv->dev, "Allocated RSS size: %u\n", kinfo->req_rss_size); 4178 dev_info(priv->dev, "RX buffer length: %u\n", kinfo->rx_buf_len); 4179 dev_info(priv->dev, "Desc num per TX queue: %u\n", kinfo->num_tx_desc); 4180 dev_info(priv->dev, "Desc num per RX queue: %u\n", kinfo->num_rx_desc); 4181 dev_info(priv->dev, "Total number of enabled TCs: %u\n", 4182 kinfo->tc_info.num_tc); 4183 dev_info(priv->dev, "Max mtu size: %u\n", priv->netdev->max_mtu); 4184 } 4185 4186 static int hns3_client_init(struct hnae3_handle *handle) 4187 { 4188 struct pci_dev *pdev = handle->pdev; 4189 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 4190 u16 alloc_tqps, max_rss_size; 4191 struct hns3_nic_priv *priv; 4192 struct net_device *netdev; 4193 int ret; 4194 4195 handle->ae_algo->ops->get_tqps_and_rss_info(handle, &alloc_tqps, 4196 &max_rss_size); 4197 netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv), alloc_tqps); 4198 if (!netdev) 4199 return -ENOMEM; 4200 4201 priv = netdev_priv(netdev); 4202 priv->dev = &pdev->dev; 4203 priv->netdev = netdev; 4204 priv->ae_handle = handle; 4205 priv->tx_timeout_count = 0; 4206 priv->max_non_tso_bd_num = ae_dev->dev_specs.max_non_tso_bd_num; 4207 set_bit(HNS3_NIC_STATE_DOWN, &priv->state); 4208 4209 handle->msg_enable = netif_msg_init(debug, DEFAULT_MSG_LEVEL); 4210 4211 handle->kinfo.netdev = netdev; 4212 handle->priv = (void *)priv; 4213 4214 hns3_init_mac_addr(netdev); 4215 4216 hns3_set_default_feature(netdev); 4217 4218 netdev->watchdog_timeo = HNS3_TX_TIMEOUT; 4219 netdev->priv_flags |= IFF_UNICAST_FLT; 4220 netdev->netdev_ops = &hns3_nic_netdev_ops; 4221 SET_NETDEV_DEV(netdev, &pdev->dev); 4222 hns3_ethtool_set_ops(netdev); 4223 4224 /* Carrier off reporting is important to ethtool even BEFORE open */ 4225 netif_carrier_off(netdev); 4226 4227 ret = hns3_get_ring_config(priv); 4228 if (ret) { 4229 ret = -ENOMEM; 4230 goto out_get_ring_cfg; 4231 } 4232 4233 ret = hns3_nic_alloc_vector_data(priv); 4234 if (ret) { 4235 ret = -ENOMEM; 4236 goto out_alloc_vector_data; 4237 } 4238 4239 ret = hns3_nic_init_vector_data(priv); 4240 if (ret) { 4241 ret = -ENOMEM; 4242 goto out_init_vector_data; 4243 } 4244 4245 ret = hns3_init_all_ring(priv); 4246 if (ret) { 4247 ret = -ENOMEM; 4248 goto out_init_ring; 4249 } 4250 4251 ret = hns3_init_phy(netdev); 4252 if (ret) 4253 goto out_init_phy; 4254 4255 ret = register_netdev(netdev); 4256 if (ret) { 4257 dev_err(priv->dev, "probe register netdev fail!\n"); 4258 goto out_reg_netdev_fail; 4259 } 4260 4261 /* the device can work without cpu rmap, only aRFS needs it */ 4262 ret = hns3_set_rx_cpu_rmap(netdev); 4263 if (ret) 4264 dev_warn(priv->dev, "set rx cpu rmap fail, ret=%d\n", ret); 4265 4266 ret = hns3_nic_init_irq(priv); 4267 if (ret) { 4268 dev_err(priv->dev, "init irq failed! ret=%d\n", ret); 4269 hns3_free_rx_cpu_rmap(netdev); 4270 goto out_init_irq_fail; 4271 } 4272 4273 ret = hns3_client_start(handle); 4274 if (ret) { 4275 dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret); 4276 goto out_client_start; 4277 } 4278 4279 hns3_dcbnl_setup(handle); 4280 4281 hns3_dbg_init(handle); 4282 4283 netdev->max_mtu = HNS3_MAX_MTU(ae_dev->dev_specs.max_frm_size); 4284 4285 if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps)) 4286 set_bit(HNS3_NIC_STATE_HW_TX_CSUM_ENABLE, &priv->state); 4287 4288 set_bit(HNS3_NIC_STATE_INITED, &priv->state); 4289 4290 if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) 4291 set_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->supported_pflags); 4292 4293 if (netif_msg_drv(handle)) 4294 hns3_info_show(priv); 4295 4296 return ret; 4297 4298 out_client_start: 4299 hns3_free_rx_cpu_rmap(netdev); 4300 hns3_nic_uninit_irq(priv); 4301 out_init_irq_fail: 4302 unregister_netdev(netdev); 4303 out_reg_netdev_fail: 4304 hns3_uninit_phy(netdev); 4305 out_init_phy: 4306 hns3_uninit_all_ring(priv); 4307 out_init_ring: 4308 hns3_nic_uninit_vector_data(priv); 4309 out_init_vector_data: 4310 hns3_nic_dealloc_vector_data(priv); 4311 out_alloc_vector_data: 4312 priv->ring = NULL; 4313 out_get_ring_cfg: 4314 priv->ae_handle = NULL; 4315 free_netdev(netdev); 4316 return ret; 4317 } 4318 4319 static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) 4320 { 4321 struct net_device *netdev = handle->kinfo.netdev; 4322 struct hns3_nic_priv *priv = netdev_priv(netdev); 4323 4324 if (netdev->reg_state != NETREG_UNINITIALIZED) 4325 unregister_netdev(netdev); 4326 4327 hns3_client_stop(handle); 4328 4329 hns3_uninit_phy(netdev); 4330 4331 if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) { 4332 netdev_warn(netdev, "already uninitialized\n"); 4333 goto out_netdev_free; 4334 } 4335 4336 hns3_free_rx_cpu_rmap(netdev); 4337 4338 hns3_nic_uninit_irq(priv); 4339 4340 hns3_del_all_fd_rules(netdev, true); 4341 4342 hns3_clear_all_ring(handle, true); 4343 4344 hns3_nic_uninit_vector_data(priv); 4345 4346 hns3_nic_dealloc_vector_data(priv); 4347 4348 hns3_uninit_all_ring(priv); 4349 4350 hns3_put_ring_config(priv); 4351 4352 out_netdev_free: 4353 hns3_dbg_uninit(handle); 4354 free_netdev(netdev); 4355 } 4356 4357 static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup) 4358 { 4359 struct net_device *netdev = handle->kinfo.netdev; 4360 4361 if (!netdev) 4362 return; 4363 4364 if (linkup) { 4365 netif_tx_wake_all_queues(netdev); 4366 netif_carrier_on(netdev); 4367 if (netif_msg_link(handle)) 4368 netdev_info(netdev, "link up\n"); 4369 } else { 4370 netif_carrier_off(netdev); 4371 netif_tx_stop_all_queues(netdev); 4372 if (netif_msg_link(handle)) 4373 netdev_info(netdev, "link down\n"); 4374 } 4375 } 4376 4377 static void hns3_clear_tx_ring(struct hns3_enet_ring *ring) 4378 { 4379 while (ring->next_to_clean != ring->next_to_use) { 4380 ring->desc[ring->next_to_clean].tx.bdtp_fe_sc_vld_ra_ri = 0; 4381 hns3_free_buffer_detach(ring, ring->next_to_clean, 0); 4382 ring_ptr_move_fw(ring, next_to_clean); 4383 } 4384 4385 ring->pending_buf = 0; 4386 } 4387 4388 static int hns3_clear_rx_ring(struct hns3_enet_ring *ring) 4389 { 4390 struct hns3_desc_cb res_cbs; 4391 int ret; 4392 4393 while (ring->next_to_use != ring->next_to_clean) { 4394 /* When a buffer is not reused, it's memory has been 4395 * freed in hns3_handle_rx_bd or will be freed by 4396 * stack, so we need to replace the buffer here. 4397 */ 4398 if (!ring->desc_cb[ring->next_to_use].reuse_flag) { 4399 ret = hns3_alloc_and_map_buffer(ring, &res_cbs); 4400 if (ret) { 4401 u64_stats_update_begin(&ring->syncp); 4402 ring->stats.sw_err_cnt++; 4403 u64_stats_update_end(&ring->syncp); 4404 /* if alloc new buffer fail, exit directly 4405 * and reclear in up flow. 4406 */ 4407 netdev_warn(ring_to_netdev(ring), 4408 "reserve buffer map failed, ret = %d\n", 4409 ret); 4410 return ret; 4411 } 4412 hns3_replace_buffer(ring, ring->next_to_use, &res_cbs); 4413 } 4414 ring_ptr_move_fw(ring, next_to_use); 4415 } 4416 4417 /* Free the pending skb in rx ring */ 4418 if (ring->skb) { 4419 dev_kfree_skb_any(ring->skb); 4420 ring->skb = NULL; 4421 ring->pending_buf = 0; 4422 } 4423 4424 return 0; 4425 } 4426 4427 static void hns3_force_clear_rx_ring(struct hns3_enet_ring *ring) 4428 { 4429 while (ring->next_to_use != ring->next_to_clean) { 4430 /* When a buffer is not reused, it's memory has been 4431 * freed in hns3_handle_rx_bd or will be freed by 4432 * stack, so only need to unmap the buffer here. 4433 */ 4434 if (!ring->desc_cb[ring->next_to_use].reuse_flag) { 4435 hns3_unmap_buffer(ring, 4436 &ring->desc_cb[ring->next_to_use]); 4437 ring->desc_cb[ring->next_to_use].dma = 0; 4438 } 4439 4440 ring_ptr_move_fw(ring, next_to_use); 4441 } 4442 } 4443 4444 static void hns3_clear_all_ring(struct hnae3_handle *h, bool force) 4445 { 4446 struct net_device *ndev = h->kinfo.netdev; 4447 struct hns3_nic_priv *priv = netdev_priv(ndev); 4448 u32 i; 4449 4450 for (i = 0; i < h->kinfo.num_tqps; i++) { 4451 struct hns3_enet_ring *ring; 4452 4453 ring = &priv->ring[i]; 4454 hns3_clear_tx_ring(ring); 4455 4456 ring = &priv->ring[i + h->kinfo.num_tqps]; 4457 /* Continue to clear other rings even if clearing some 4458 * rings failed. 4459 */ 4460 if (force) 4461 hns3_force_clear_rx_ring(ring); 4462 else 4463 hns3_clear_rx_ring(ring); 4464 } 4465 } 4466 4467 int hns3_nic_reset_all_ring(struct hnae3_handle *h) 4468 { 4469 struct net_device *ndev = h->kinfo.netdev; 4470 struct hns3_nic_priv *priv = netdev_priv(ndev); 4471 struct hns3_enet_ring *rx_ring; 4472 int i, j; 4473 int ret; 4474 4475 for (i = 0; i < h->kinfo.num_tqps; i++) { 4476 ret = h->ae_algo->ops->reset_queue(h, i); 4477 if (ret) 4478 return ret; 4479 4480 hns3_init_ring_hw(&priv->ring[i]); 4481 4482 /* We need to clear tx ring here because self test will 4483 * use the ring and will not run down before up 4484 */ 4485 hns3_clear_tx_ring(&priv->ring[i]); 4486 priv->ring[i].next_to_clean = 0; 4487 priv->ring[i].next_to_use = 0; 4488 priv->ring[i].last_to_use = 0; 4489 4490 rx_ring = &priv->ring[i + h->kinfo.num_tqps]; 4491 hns3_init_ring_hw(rx_ring); 4492 ret = hns3_clear_rx_ring(rx_ring); 4493 if (ret) 4494 return ret; 4495 4496 /* We can not know the hardware head and tail when this 4497 * function is called in reset flow, so we reuse all desc. 4498 */ 4499 for (j = 0; j < rx_ring->desc_num; j++) 4500 hns3_reuse_buffer(rx_ring, j); 4501 4502 rx_ring->next_to_clean = 0; 4503 rx_ring->next_to_use = 0; 4504 } 4505 4506 hns3_init_tx_ring_tc(priv); 4507 4508 return 0; 4509 } 4510 4511 static void hns3_store_coal(struct hns3_nic_priv *priv) 4512 { 4513 /* ethtool only support setting and querying one coal 4514 * configuration for now, so save the vector 0' coal 4515 * configuration here in order to restore it. 4516 */ 4517 memcpy(&priv->tx_coal, &priv->tqp_vector[0].tx_group.coal, 4518 sizeof(struct hns3_enet_coalesce)); 4519 memcpy(&priv->rx_coal, &priv->tqp_vector[0].rx_group.coal, 4520 sizeof(struct hns3_enet_coalesce)); 4521 } 4522 4523 static void hns3_restore_coal(struct hns3_nic_priv *priv) 4524 { 4525 u16 vector_num = priv->vector_num; 4526 int i; 4527 4528 for (i = 0; i < vector_num; i++) { 4529 memcpy(&priv->tqp_vector[i].tx_group.coal, &priv->tx_coal, 4530 sizeof(struct hns3_enet_coalesce)); 4531 memcpy(&priv->tqp_vector[i].rx_group.coal, &priv->rx_coal, 4532 sizeof(struct hns3_enet_coalesce)); 4533 } 4534 } 4535 4536 static int hns3_reset_notify_down_enet(struct hnae3_handle *handle) 4537 { 4538 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 4539 struct net_device *ndev = kinfo->netdev; 4540 struct hns3_nic_priv *priv = netdev_priv(ndev); 4541 4542 if (test_and_set_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) 4543 return 0; 4544 4545 if (!netif_running(ndev)) 4546 return 0; 4547 4548 return hns3_nic_net_stop(ndev); 4549 } 4550 4551 static int hns3_reset_notify_up_enet(struct hnae3_handle *handle) 4552 { 4553 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 4554 struct hns3_nic_priv *priv = netdev_priv(kinfo->netdev); 4555 int ret = 0; 4556 4557 clear_bit(HNS3_NIC_STATE_RESETTING, &priv->state); 4558 4559 if (netif_running(kinfo->netdev)) { 4560 ret = hns3_nic_net_open(kinfo->netdev); 4561 if (ret) { 4562 set_bit(HNS3_NIC_STATE_RESETTING, &priv->state); 4563 netdev_err(kinfo->netdev, 4564 "net up fail, ret=%d!\n", ret); 4565 return ret; 4566 } 4567 } 4568 4569 return ret; 4570 } 4571 4572 static int hns3_reset_notify_init_enet(struct hnae3_handle *handle) 4573 { 4574 struct net_device *netdev = handle->kinfo.netdev; 4575 struct hns3_nic_priv *priv = netdev_priv(netdev); 4576 int ret; 4577 4578 /* Carrier off reporting is important to ethtool even BEFORE open */ 4579 netif_carrier_off(netdev); 4580 4581 ret = hns3_get_ring_config(priv); 4582 if (ret) 4583 return ret; 4584 4585 ret = hns3_nic_alloc_vector_data(priv); 4586 if (ret) 4587 goto err_put_ring; 4588 4589 hns3_restore_coal(priv); 4590 4591 ret = hns3_nic_init_vector_data(priv); 4592 if (ret) 4593 goto err_dealloc_vector; 4594 4595 ret = hns3_init_all_ring(priv); 4596 if (ret) 4597 goto err_uninit_vector; 4598 4599 /* the device can work without cpu rmap, only aRFS needs it */ 4600 ret = hns3_set_rx_cpu_rmap(netdev); 4601 if (ret) 4602 dev_warn(priv->dev, "set rx cpu rmap fail, ret=%d\n", ret); 4603 4604 ret = hns3_nic_init_irq(priv); 4605 if (ret) { 4606 dev_err(priv->dev, "init irq failed! ret=%d\n", ret); 4607 hns3_free_rx_cpu_rmap(netdev); 4608 goto err_init_irq_fail; 4609 } 4610 4611 if (!hns3_is_phys_func(handle->pdev)) 4612 hns3_init_mac_addr(netdev); 4613 4614 ret = hns3_client_start(handle); 4615 if (ret) { 4616 dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret); 4617 goto err_client_start_fail; 4618 } 4619 4620 set_bit(HNS3_NIC_STATE_INITED, &priv->state); 4621 4622 return ret; 4623 4624 err_client_start_fail: 4625 hns3_free_rx_cpu_rmap(netdev); 4626 hns3_nic_uninit_irq(priv); 4627 err_init_irq_fail: 4628 hns3_uninit_all_ring(priv); 4629 err_uninit_vector: 4630 hns3_nic_uninit_vector_data(priv); 4631 err_dealloc_vector: 4632 hns3_nic_dealloc_vector_data(priv); 4633 err_put_ring: 4634 hns3_put_ring_config(priv); 4635 4636 return ret; 4637 } 4638 4639 static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) 4640 { 4641 struct net_device *netdev = handle->kinfo.netdev; 4642 struct hns3_nic_priv *priv = netdev_priv(netdev); 4643 4644 if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) { 4645 netdev_warn(netdev, "already uninitialized\n"); 4646 return 0; 4647 } 4648 4649 hns3_free_rx_cpu_rmap(netdev); 4650 hns3_nic_uninit_irq(priv); 4651 hns3_clear_all_ring(handle, true); 4652 hns3_reset_tx_queue(priv->ae_handle); 4653 4654 hns3_nic_uninit_vector_data(priv); 4655 4656 hns3_store_coal(priv); 4657 4658 hns3_nic_dealloc_vector_data(priv); 4659 4660 hns3_uninit_all_ring(priv); 4661 4662 hns3_put_ring_config(priv); 4663 4664 return 0; 4665 } 4666 4667 static int hns3_reset_notify(struct hnae3_handle *handle, 4668 enum hnae3_reset_notify_type type) 4669 { 4670 int ret = 0; 4671 4672 switch (type) { 4673 case HNAE3_UP_CLIENT: 4674 ret = hns3_reset_notify_up_enet(handle); 4675 break; 4676 case HNAE3_DOWN_CLIENT: 4677 ret = hns3_reset_notify_down_enet(handle); 4678 break; 4679 case HNAE3_INIT_CLIENT: 4680 ret = hns3_reset_notify_init_enet(handle); 4681 break; 4682 case HNAE3_UNINIT_CLIENT: 4683 ret = hns3_reset_notify_uninit_enet(handle); 4684 break; 4685 default: 4686 break; 4687 } 4688 4689 return ret; 4690 } 4691 4692 static int hns3_change_channels(struct hnae3_handle *handle, u32 new_tqp_num, 4693 bool rxfh_configured) 4694 { 4695 int ret; 4696 4697 ret = handle->ae_algo->ops->set_channels(handle, new_tqp_num, 4698 rxfh_configured); 4699 if (ret) { 4700 dev_err(&handle->pdev->dev, 4701 "Change tqp num(%u) fail.\n", new_tqp_num); 4702 return ret; 4703 } 4704 4705 ret = hns3_reset_notify(handle, HNAE3_INIT_CLIENT); 4706 if (ret) 4707 return ret; 4708 4709 ret = hns3_reset_notify(handle, HNAE3_UP_CLIENT); 4710 if (ret) 4711 hns3_reset_notify(handle, HNAE3_UNINIT_CLIENT); 4712 4713 return ret; 4714 } 4715 4716 int hns3_set_channels(struct net_device *netdev, 4717 struct ethtool_channels *ch) 4718 { 4719 struct hnae3_handle *h = hns3_get_handle(netdev); 4720 struct hnae3_knic_private_info *kinfo = &h->kinfo; 4721 bool rxfh_configured = netif_is_rxfh_configured(netdev); 4722 u32 new_tqp_num = ch->combined_count; 4723 u16 org_tqp_num; 4724 int ret; 4725 4726 if (hns3_nic_resetting(netdev)) 4727 return -EBUSY; 4728 4729 if (ch->rx_count || ch->tx_count) 4730 return -EINVAL; 4731 4732 if (kinfo->tc_info.mqprio_active) { 4733 dev_err(&netdev->dev, 4734 "it's not allowed to set channels via ethtool when MQPRIO mode is on\n"); 4735 return -EINVAL; 4736 } 4737 4738 if (new_tqp_num > hns3_get_max_available_channels(h) || 4739 new_tqp_num < 1) { 4740 dev_err(&netdev->dev, 4741 "Change tqps fail, the tqp range is from 1 to %u", 4742 hns3_get_max_available_channels(h)); 4743 return -EINVAL; 4744 } 4745 4746 if (kinfo->rss_size == new_tqp_num) 4747 return 0; 4748 4749 netif_dbg(h, drv, netdev, 4750 "set channels: tqp_num=%u, rxfh=%d\n", 4751 new_tqp_num, rxfh_configured); 4752 4753 ret = hns3_reset_notify(h, HNAE3_DOWN_CLIENT); 4754 if (ret) 4755 return ret; 4756 4757 ret = hns3_reset_notify(h, HNAE3_UNINIT_CLIENT); 4758 if (ret) 4759 return ret; 4760 4761 org_tqp_num = h->kinfo.num_tqps; 4762 ret = hns3_change_channels(h, new_tqp_num, rxfh_configured); 4763 if (ret) { 4764 int ret1; 4765 4766 netdev_warn(netdev, 4767 "Change channels fail, revert to old value\n"); 4768 ret1 = hns3_change_channels(h, org_tqp_num, rxfh_configured); 4769 if (ret1) { 4770 netdev_err(netdev, 4771 "revert to old channel fail\n"); 4772 return ret1; 4773 } 4774 4775 return ret; 4776 } 4777 4778 return 0; 4779 } 4780 4781 static const struct hns3_hw_error_info hns3_hw_err[] = { 4782 { .type = HNAE3_PPU_POISON_ERROR, 4783 .msg = "PPU poison" }, 4784 { .type = HNAE3_CMDQ_ECC_ERROR, 4785 .msg = "IMP CMDQ error" }, 4786 { .type = HNAE3_IMP_RD_POISON_ERROR, 4787 .msg = "IMP RD poison" }, 4788 { .type = HNAE3_ROCEE_AXI_RESP_ERROR, 4789 .msg = "ROCEE AXI RESP error" }, 4790 }; 4791 4792 static void hns3_process_hw_error(struct hnae3_handle *handle, 4793 enum hnae3_hw_error_type type) 4794 { 4795 int i; 4796 4797 for (i = 0; i < ARRAY_SIZE(hns3_hw_err); i++) { 4798 if (hns3_hw_err[i].type == type) { 4799 dev_err(&handle->pdev->dev, "Detected %s!\n", 4800 hns3_hw_err[i].msg); 4801 break; 4802 } 4803 } 4804 } 4805 4806 static const struct hnae3_client_ops client_ops = { 4807 .init_instance = hns3_client_init, 4808 .uninit_instance = hns3_client_uninit, 4809 .link_status_change = hns3_link_status_change, 4810 .reset_notify = hns3_reset_notify, 4811 .process_hw_error = hns3_process_hw_error, 4812 }; 4813 4814 /* hns3_init_module - Driver registration routine 4815 * hns3_init_module is the first routine called when the driver is 4816 * loaded. All it does is register with the PCI subsystem. 4817 */ 4818 static int __init hns3_init_module(void) 4819 { 4820 int ret; 4821 4822 pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string); 4823 pr_info("%s: %s\n", hns3_driver_name, hns3_copyright); 4824 4825 client.type = HNAE3_CLIENT_KNIC; 4826 snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH, "%s", 4827 hns3_driver_name); 4828 4829 client.ops = &client_ops; 4830 4831 INIT_LIST_HEAD(&client.node); 4832 4833 hns3_dbg_register_debugfs(hns3_driver_name); 4834 4835 ret = hnae3_register_client(&client); 4836 if (ret) 4837 goto err_reg_client; 4838 4839 ret = pci_register_driver(&hns3_driver); 4840 if (ret) 4841 goto err_reg_driver; 4842 4843 return ret; 4844 4845 err_reg_driver: 4846 hnae3_unregister_client(&client); 4847 err_reg_client: 4848 hns3_dbg_unregister_debugfs(); 4849 return ret; 4850 } 4851 module_init(hns3_init_module); 4852 4853 /* hns3_exit_module - Driver exit cleanup routine 4854 * hns3_exit_module is called just before the driver is removed 4855 * from memory. 4856 */ 4857 static void __exit hns3_exit_module(void) 4858 { 4859 pci_unregister_driver(&hns3_driver); 4860 hnae3_unregister_client(&client); 4861 hns3_dbg_unregister_debugfs(); 4862 } 4863 module_exit(hns3_exit_module); 4864 4865 MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver"); 4866 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 4867 MODULE_LICENSE("GPL"); 4868 MODULE_ALIAS("pci:hns-nic"); 4869