1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/dma-mapping.h> 5 #include <linux/etherdevice.h> 6 #include <linux/interrupt.h> 7 #ifdef CONFIG_RFS_ACCEL 8 #include <linux/cpu_rmap.h> 9 #endif 10 #include <linux/if_vlan.h> 11 #include <linux/irq.h> 12 #include <linux/ip.h> 13 #include <linux/ipv6.h> 14 #include <linux/module.h> 15 #include <linux/pci.h> 16 #include <linux/aer.h> 17 #include <linux/skbuff.h> 18 #include <linux/sctp.h> 19 #include <net/gre.h> 20 #include <net/ip6_checksum.h> 21 #include <net/pkt_cls.h> 22 #include <net/tcp.h> 23 #include <net/vxlan.h> 24 #include <net/geneve.h> 25 26 #include "hnae3.h" 27 #include "hns3_enet.h" 28 /* All hns3 tracepoints are defined by the include below, which 29 * must be included exactly once across the whole kernel with 30 * CREATE_TRACE_POINTS defined 31 */ 32 #define CREATE_TRACE_POINTS 33 #include "hns3_trace.h" 34 35 #define hns3_set_field(origin, shift, val) ((origin) |= ((val) << (shift))) 36 #define hns3_tx_bd_count(S) DIV_ROUND_UP(S, HNS3_MAX_BD_SIZE) 37 38 #define hns3_rl_err(fmt, ...) \ 39 do { \ 40 if (net_ratelimit()) \ 41 netdev_err(fmt, ##__VA_ARGS__); \ 42 } while (0) 43 44 static void hns3_clear_all_ring(struct hnae3_handle *h, bool force); 45 46 static const char hns3_driver_name[] = "hns3"; 47 static const char hns3_driver_string[] = 48 "Hisilicon Ethernet Network Driver for Hip08 Family"; 49 static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation."; 50 static struct hnae3_client client; 51 52 static int debug = -1; 53 module_param(debug, int, 0); 54 MODULE_PARM_DESC(debug, " Network interface message level setting"); 55 56 #define DEFAULT_MSG_LEVEL (NETIF_MSG_PROBE | NETIF_MSG_LINK | \ 57 NETIF_MSG_IFDOWN | NETIF_MSG_IFUP) 58 59 #define HNS3_INNER_VLAN_TAG 1 60 #define HNS3_OUTER_VLAN_TAG 2 61 62 #define HNS3_MIN_TX_LEN 33U 63 64 /* hns3_pci_tbl - PCI Device ID Table 65 * 66 * Last entry must be all 0s 67 * 68 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 69 * Class, Class Mask, private data (not used) } 70 */ 71 static const struct pci_device_id hns3_pci_tbl[] = { 72 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0}, 73 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0}, 74 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 75 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 76 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 77 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 78 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 79 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 80 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 81 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 82 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 83 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 84 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 85 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 86 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_VF), 0}, 87 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF), 88 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 89 /* required last entry */ 90 {0, } 91 }; 92 MODULE_DEVICE_TABLE(pci, hns3_pci_tbl); 93 94 static irqreturn_t hns3_irq_handle(int irq, void *vector) 95 { 96 struct hns3_enet_tqp_vector *tqp_vector = vector; 97 98 napi_schedule_irqoff(&tqp_vector->napi); 99 100 return IRQ_HANDLED; 101 } 102 103 static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv) 104 { 105 struct hns3_enet_tqp_vector *tqp_vectors; 106 unsigned int i; 107 108 for (i = 0; i < priv->vector_num; i++) { 109 tqp_vectors = &priv->tqp_vector[i]; 110 111 if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED) 112 continue; 113 114 /* clear the affinity mask */ 115 irq_set_affinity_hint(tqp_vectors->vector_irq, NULL); 116 117 /* release the irq resource */ 118 free_irq(tqp_vectors->vector_irq, tqp_vectors); 119 tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED; 120 } 121 } 122 123 static int hns3_nic_init_irq(struct hns3_nic_priv *priv) 124 { 125 struct hns3_enet_tqp_vector *tqp_vectors; 126 int txrx_int_idx = 0; 127 int rx_int_idx = 0; 128 int tx_int_idx = 0; 129 unsigned int i; 130 int ret; 131 132 for (i = 0; i < priv->vector_num; i++) { 133 tqp_vectors = &priv->tqp_vector[i]; 134 135 if (tqp_vectors->irq_init_flag == HNS3_VECTOR_INITED) 136 continue; 137 138 if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) { 139 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN, 140 "%s-%s-%s-%d", hns3_driver_name, 141 pci_name(priv->ae_handle->pdev), 142 "TxRx", txrx_int_idx++); 143 txrx_int_idx++; 144 } else if (tqp_vectors->rx_group.ring) { 145 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN, 146 "%s-%s-%s-%d", hns3_driver_name, 147 pci_name(priv->ae_handle->pdev), 148 "Rx", rx_int_idx++); 149 } else if (tqp_vectors->tx_group.ring) { 150 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN, 151 "%s-%s-%s-%d", hns3_driver_name, 152 pci_name(priv->ae_handle->pdev), 153 "Tx", tx_int_idx++); 154 } else { 155 /* Skip this unused q_vector */ 156 continue; 157 } 158 159 tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0'; 160 161 irq_set_status_flags(tqp_vectors->vector_irq, IRQ_NOAUTOEN); 162 ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0, 163 tqp_vectors->name, tqp_vectors); 164 if (ret) { 165 netdev_err(priv->netdev, "request irq(%d) fail\n", 166 tqp_vectors->vector_irq); 167 hns3_nic_uninit_irq(priv); 168 return ret; 169 } 170 171 irq_set_affinity_hint(tqp_vectors->vector_irq, 172 &tqp_vectors->affinity_mask); 173 174 tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED; 175 } 176 177 return 0; 178 } 179 180 static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector, 181 u32 mask_en) 182 { 183 writel(mask_en, tqp_vector->mask_addr); 184 } 185 186 static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector) 187 { 188 napi_enable(&tqp_vector->napi); 189 enable_irq(tqp_vector->vector_irq); 190 191 /* enable vector */ 192 hns3_mask_vector_irq(tqp_vector, 1); 193 } 194 195 static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector) 196 { 197 /* disable vector */ 198 hns3_mask_vector_irq(tqp_vector, 0); 199 200 disable_irq(tqp_vector->vector_irq); 201 napi_disable(&tqp_vector->napi); 202 } 203 204 void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector, 205 u32 rl_value) 206 { 207 u32 rl_reg = hns3_rl_usec_to_reg(rl_value); 208 209 /* this defines the configuration for RL (Interrupt Rate Limiter). 210 * Rl defines rate of interrupts i.e. number of interrupts-per-second 211 * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing 212 */ 213 214 if (rl_reg > 0 && !tqp_vector->tx_group.coal.adapt_enable && 215 !tqp_vector->rx_group.coal.adapt_enable) 216 /* According to the hardware, the range of rl_reg is 217 * 0-59 and the unit is 4. 218 */ 219 rl_reg |= HNS3_INT_RL_ENABLE_MASK; 220 221 writel(rl_reg, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET); 222 } 223 224 void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector, 225 u32 gl_value) 226 { 227 u32 new_val; 228 229 if (tqp_vector->rx_group.coal.unit_1us) 230 new_val = gl_value | HNS3_INT_GL_1US; 231 else 232 new_val = hns3_gl_usec_to_reg(gl_value); 233 234 writel(new_val, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET); 235 } 236 237 void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector, 238 u32 gl_value) 239 { 240 u32 new_val; 241 242 if (tqp_vector->tx_group.coal.unit_1us) 243 new_val = gl_value | HNS3_INT_GL_1US; 244 else 245 new_val = hns3_gl_usec_to_reg(gl_value); 246 247 writel(new_val, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET); 248 } 249 250 void hns3_set_vector_coalesce_tx_ql(struct hns3_enet_tqp_vector *tqp_vector, 251 u32 ql_value) 252 { 253 writel(ql_value, tqp_vector->mask_addr + HNS3_VECTOR_TX_QL_OFFSET); 254 } 255 256 void hns3_set_vector_coalesce_rx_ql(struct hns3_enet_tqp_vector *tqp_vector, 257 u32 ql_value) 258 { 259 writel(ql_value, tqp_vector->mask_addr + HNS3_VECTOR_RX_QL_OFFSET); 260 } 261 262 static void hns3_vector_coalesce_init(struct hns3_enet_tqp_vector *tqp_vector, 263 struct hns3_nic_priv *priv) 264 { 265 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev); 266 struct hns3_enet_coalesce *tx_coal = &tqp_vector->tx_group.coal; 267 struct hns3_enet_coalesce *rx_coal = &tqp_vector->rx_group.coal; 268 269 /* initialize the configuration for interrupt coalescing. 270 * 1. GL (Interrupt Gap Limiter) 271 * 2. RL (Interrupt Rate Limiter) 272 * 3. QL (Interrupt Quantity Limiter) 273 * 274 * Default: enable interrupt coalescing self-adaptive and GL 275 */ 276 tx_coal->adapt_enable = 1; 277 rx_coal->adapt_enable = 1; 278 279 tx_coal->int_gl = HNS3_INT_GL_50K; 280 rx_coal->int_gl = HNS3_INT_GL_50K; 281 282 rx_coal->flow_level = HNS3_FLOW_LOW; 283 tx_coal->flow_level = HNS3_FLOW_LOW; 284 285 /* device version above V3(include V3), GL can configure 1us 286 * unit, so uses 1us unit. 287 */ 288 if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) { 289 tx_coal->unit_1us = 1; 290 rx_coal->unit_1us = 1; 291 } 292 293 if (ae_dev->dev_specs.int_ql_max) { 294 tx_coal->ql_enable = 1; 295 rx_coal->ql_enable = 1; 296 tx_coal->int_ql_max = ae_dev->dev_specs.int_ql_max; 297 rx_coal->int_ql_max = ae_dev->dev_specs.int_ql_max; 298 tx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG; 299 rx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG; 300 } 301 } 302 303 static void 304 hns3_vector_coalesce_init_hw(struct hns3_enet_tqp_vector *tqp_vector, 305 struct hns3_nic_priv *priv) 306 { 307 struct hns3_enet_coalesce *tx_coal = &tqp_vector->tx_group.coal; 308 struct hns3_enet_coalesce *rx_coal = &tqp_vector->rx_group.coal; 309 struct hnae3_handle *h = priv->ae_handle; 310 311 hns3_set_vector_coalesce_tx_gl(tqp_vector, tx_coal->int_gl); 312 hns3_set_vector_coalesce_rx_gl(tqp_vector, rx_coal->int_gl); 313 hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting); 314 315 if (tx_coal->ql_enable) 316 hns3_set_vector_coalesce_tx_ql(tqp_vector, tx_coal->int_ql); 317 318 if (rx_coal->ql_enable) 319 hns3_set_vector_coalesce_rx_ql(tqp_vector, rx_coal->int_ql); 320 } 321 322 static int hns3_nic_set_real_num_queue(struct net_device *netdev) 323 { 324 struct hnae3_handle *h = hns3_get_handle(netdev); 325 struct hnae3_knic_private_info *kinfo = &h->kinfo; 326 struct hnae3_tc_info *tc_info = &kinfo->tc_info; 327 unsigned int queue_size = kinfo->num_tqps; 328 int i, ret; 329 330 if (tc_info->num_tc <= 1 && !tc_info->mqprio_active) { 331 netdev_reset_tc(netdev); 332 } else { 333 ret = netdev_set_num_tc(netdev, tc_info->num_tc); 334 if (ret) { 335 netdev_err(netdev, 336 "netdev_set_num_tc fail, ret=%d!\n", ret); 337 return ret; 338 } 339 340 for (i = 0; i < HNAE3_MAX_TC; i++) { 341 if (!test_bit(i, &tc_info->tc_en)) 342 continue; 343 344 netdev_set_tc_queue(netdev, i, tc_info->tqp_count[i], 345 tc_info->tqp_offset[i]); 346 } 347 } 348 349 ret = netif_set_real_num_tx_queues(netdev, queue_size); 350 if (ret) { 351 netdev_err(netdev, 352 "netif_set_real_num_tx_queues fail, ret=%d!\n", ret); 353 return ret; 354 } 355 356 ret = netif_set_real_num_rx_queues(netdev, queue_size); 357 if (ret) { 358 netdev_err(netdev, 359 "netif_set_real_num_rx_queues fail, ret=%d!\n", ret); 360 return ret; 361 } 362 363 return 0; 364 } 365 366 static u16 hns3_get_max_available_channels(struct hnae3_handle *h) 367 { 368 u16 alloc_tqps, max_rss_size, rss_size; 369 370 h->ae_algo->ops->get_tqps_and_rss_info(h, &alloc_tqps, &max_rss_size); 371 rss_size = alloc_tqps / h->kinfo.tc_info.num_tc; 372 373 return min_t(u16, rss_size, max_rss_size); 374 } 375 376 static void hns3_tqp_enable(struct hnae3_queue *tqp) 377 { 378 u32 rcb_reg; 379 380 rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG); 381 rcb_reg |= BIT(HNS3_RING_EN_B); 382 hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg); 383 } 384 385 static void hns3_tqp_disable(struct hnae3_queue *tqp) 386 { 387 u32 rcb_reg; 388 389 rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG); 390 rcb_reg &= ~BIT(HNS3_RING_EN_B); 391 hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg); 392 } 393 394 static void hns3_free_rx_cpu_rmap(struct net_device *netdev) 395 { 396 #ifdef CONFIG_RFS_ACCEL 397 free_irq_cpu_rmap(netdev->rx_cpu_rmap); 398 netdev->rx_cpu_rmap = NULL; 399 #endif 400 } 401 402 static int hns3_set_rx_cpu_rmap(struct net_device *netdev) 403 { 404 #ifdef CONFIG_RFS_ACCEL 405 struct hns3_nic_priv *priv = netdev_priv(netdev); 406 struct hns3_enet_tqp_vector *tqp_vector; 407 int i, ret; 408 409 if (!netdev->rx_cpu_rmap) { 410 netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->vector_num); 411 if (!netdev->rx_cpu_rmap) 412 return -ENOMEM; 413 } 414 415 for (i = 0; i < priv->vector_num; i++) { 416 tqp_vector = &priv->tqp_vector[i]; 417 ret = irq_cpu_rmap_add(netdev->rx_cpu_rmap, 418 tqp_vector->vector_irq); 419 if (ret) { 420 hns3_free_rx_cpu_rmap(netdev); 421 return ret; 422 } 423 } 424 #endif 425 return 0; 426 } 427 428 static int hns3_nic_net_up(struct net_device *netdev) 429 { 430 struct hns3_nic_priv *priv = netdev_priv(netdev); 431 struct hnae3_handle *h = priv->ae_handle; 432 int i, j; 433 int ret; 434 435 ret = hns3_nic_reset_all_ring(h); 436 if (ret) 437 return ret; 438 439 clear_bit(HNS3_NIC_STATE_DOWN, &priv->state); 440 441 /* enable the vectors */ 442 for (i = 0; i < priv->vector_num; i++) 443 hns3_vector_enable(&priv->tqp_vector[i]); 444 445 /* enable rcb */ 446 for (j = 0; j < h->kinfo.num_tqps; j++) 447 hns3_tqp_enable(h->kinfo.tqp[j]); 448 449 /* start the ae_dev */ 450 ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0; 451 if (ret) { 452 set_bit(HNS3_NIC_STATE_DOWN, &priv->state); 453 while (j--) 454 hns3_tqp_disable(h->kinfo.tqp[j]); 455 456 for (j = i - 1; j >= 0; j--) 457 hns3_vector_disable(&priv->tqp_vector[j]); 458 } 459 460 return ret; 461 } 462 463 static void hns3_config_xps(struct hns3_nic_priv *priv) 464 { 465 int i; 466 467 for (i = 0; i < priv->vector_num; i++) { 468 struct hns3_enet_tqp_vector *tqp_vector = &priv->tqp_vector[i]; 469 struct hns3_enet_ring *ring = tqp_vector->tx_group.ring; 470 471 while (ring) { 472 int ret; 473 474 ret = netif_set_xps_queue(priv->netdev, 475 &tqp_vector->affinity_mask, 476 ring->tqp->tqp_index); 477 if (ret) 478 netdev_warn(priv->netdev, 479 "set xps queue failed: %d", ret); 480 481 ring = ring->next; 482 } 483 } 484 } 485 486 static int hns3_nic_net_open(struct net_device *netdev) 487 { 488 struct hns3_nic_priv *priv = netdev_priv(netdev); 489 struct hnae3_handle *h = hns3_get_handle(netdev); 490 struct hnae3_knic_private_info *kinfo; 491 int i, ret; 492 493 if (hns3_nic_resetting(netdev)) 494 return -EBUSY; 495 496 netif_carrier_off(netdev); 497 498 ret = hns3_nic_set_real_num_queue(netdev); 499 if (ret) 500 return ret; 501 502 ret = hns3_nic_net_up(netdev); 503 if (ret) { 504 netdev_err(netdev, "net up fail, ret=%d!\n", ret); 505 return ret; 506 } 507 508 kinfo = &h->kinfo; 509 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) 510 netdev_set_prio_tc_map(netdev, i, kinfo->tc_info.prio_tc[i]); 511 512 if (h->ae_algo->ops->set_timer_task) 513 h->ae_algo->ops->set_timer_task(priv->ae_handle, true); 514 515 hns3_config_xps(priv); 516 517 netif_dbg(h, drv, netdev, "net open\n"); 518 519 return 0; 520 } 521 522 static void hns3_reset_tx_queue(struct hnae3_handle *h) 523 { 524 struct net_device *ndev = h->kinfo.netdev; 525 struct hns3_nic_priv *priv = netdev_priv(ndev); 526 struct netdev_queue *dev_queue; 527 u32 i; 528 529 for (i = 0; i < h->kinfo.num_tqps; i++) { 530 dev_queue = netdev_get_tx_queue(ndev, 531 priv->ring[i].queue_index); 532 netdev_tx_reset_queue(dev_queue); 533 } 534 } 535 536 static void hns3_nic_net_down(struct net_device *netdev) 537 { 538 struct hns3_nic_priv *priv = netdev_priv(netdev); 539 struct hnae3_handle *h = hns3_get_handle(netdev); 540 const struct hnae3_ae_ops *ops; 541 int i; 542 543 /* disable vectors */ 544 for (i = 0; i < priv->vector_num; i++) 545 hns3_vector_disable(&priv->tqp_vector[i]); 546 547 /* disable rcb */ 548 for (i = 0; i < h->kinfo.num_tqps; i++) 549 hns3_tqp_disable(h->kinfo.tqp[i]); 550 551 /* stop ae_dev */ 552 ops = priv->ae_handle->ae_algo->ops; 553 if (ops->stop) 554 ops->stop(priv->ae_handle); 555 556 /* delay ring buffer clearing to hns3_reset_notify_uninit_enet 557 * during reset process, because driver may not be able 558 * to disable the ring through firmware when downing the netdev. 559 */ 560 if (!hns3_nic_resetting(netdev)) 561 hns3_clear_all_ring(priv->ae_handle, false); 562 563 hns3_reset_tx_queue(priv->ae_handle); 564 } 565 566 static int hns3_nic_net_stop(struct net_device *netdev) 567 { 568 struct hns3_nic_priv *priv = netdev_priv(netdev); 569 struct hnae3_handle *h = hns3_get_handle(netdev); 570 571 if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state)) 572 return 0; 573 574 netif_dbg(h, drv, netdev, "net stop\n"); 575 576 if (h->ae_algo->ops->set_timer_task) 577 h->ae_algo->ops->set_timer_task(priv->ae_handle, false); 578 579 netif_tx_stop_all_queues(netdev); 580 netif_carrier_off(netdev); 581 582 hns3_nic_net_down(netdev); 583 584 return 0; 585 } 586 587 static int hns3_nic_uc_sync(struct net_device *netdev, 588 const unsigned char *addr) 589 { 590 struct hnae3_handle *h = hns3_get_handle(netdev); 591 592 if (h->ae_algo->ops->add_uc_addr) 593 return h->ae_algo->ops->add_uc_addr(h, addr); 594 595 return 0; 596 } 597 598 static int hns3_nic_uc_unsync(struct net_device *netdev, 599 const unsigned char *addr) 600 { 601 struct hnae3_handle *h = hns3_get_handle(netdev); 602 603 /* need ignore the request of removing device address, because 604 * we store the device address and other addresses of uc list 605 * in the function's mac filter list. 606 */ 607 if (ether_addr_equal(addr, netdev->dev_addr)) 608 return 0; 609 610 if (h->ae_algo->ops->rm_uc_addr) 611 return h->ae_algo->ops->rm_uc_addr(h, addr); 612 613 return 0; 614 } 615 616 static int hns3_nic_mc_sync(struct net_device *netdev, 617 const unsigned char *addr) 618 { 619 struct hnae3_handle *h = hns3_get_handle(netdev); 620 621 if (h->ae_algo->ops->add_mc_addr) 622 return h->ae_algo->ops->add_mc_addr(h, addr); 623 624 return 0; 625 } 626 627 static int hns3_nic_mc_unsync(struct net_device *netdev, 628 const unsigned char *addr) 629 { 630 struct hnae3_handle *h = hns3_get_handle(netdev); 631 632 if (h->ae_algo->ops->rm_mc_addr) 633 return h->ae_algo->ops->rm_mc_addr(h, addr); 634 635 return 0; 636 } 637 638 static u8 hns3_get_netdev_flags(struct net_device *netdev) 639 { 640 u8 flags = 0; 641 642 if (netdev->flags & IFF_PROMISC) { 643 flags = HNAE3_USER_UPE | HNAE3_USER_MPE | HNAE3_BPE; 644 } else { 645 flags |= HNAE3_VLAN_FLTR; 646 if (netdev->flags & IFF_ALLMULTI) 647 flags |= HNAE3_USER_MPE; 648 } 649 650 return flags; 651 } 652 653 static void hns3_nic_set_rx_mode(struct net_device *netdev) 654 { 655 struct hnae3_handle *h = hns3_get_handle(netdev); 656 u8 new_flags; 657 658 new_flags = hns3_get_netdev_flags(netdev); 659 660 __dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync); 661 __dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync); 662 663 /* User mode Promisc mode enable and vlan filtering is disabled to 664 * let all packets in. 665 */ 666 h->netdev_flags = new_flags; 667 hns3_request_update_promisc_mode(h); 668 } 669 670 void hns3_request_update_promisc_mode(struct hnae3_handle *handle) 671 { 672 const struct hnae3_ae_ops *ops = handle->ae_algo->ops; 673 674 if (ops->request_update_promisc_mode) 675 ops->request_update_promisc_mode(handle); 676 } 677 678 void hns3_enable_vlan_filter(struct net_device *netdev, bool enable) 679 { 680 struct hns3_nic_priv *priv = netdev_priv(netdev); 681 struct hnae3_handle *h = priv->ae_handle; 682 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev); 683 bool last_state; 684 685 if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 && 686 h->ae_algo->ops->enable_vlan_filter) { 687 last_state = h->netdev_flags & HNAE3_VLAN_FLTR ? true : false; 688 if (enable != last_state) { 689 netdev_info(netdev, 690 "%s vlan filter\n", 691 enable ? "enable" : "disable"); 692 h->ae_algo->ops->enable_vlan_filter(h, enable); 693 } 694 } 695 } 696 697 static int hns3_set_tso(struct sk_buff *skb, u32 *paylen_fdop_ol4cs, 698 u16 *mss, u32 *type_cs_vlan_tso) 699 { 700 u32 l4_offset, hdr_len; 701 union l3_hdr_info l3; 702 union l4_hdr_info l4; 703 u32 l4_paylen; 704 int ret; 705 706 if (!skb_is_gso(skb)) 707 return 0; 708 709 ret = skb_cow_head(skb, 0); 710 if (unlikely(ret < 0)) 711 return ret; 712 713 l3.hdr = skb_network_header(skb); 714 l4.hdr = skb_transport_header(skb); 715 716 /* Software should clear the IPv4's checksum field when tso is 717 * needed. 718 */ 719 if (l3.v4->version == 4) 720 l3.v4->check = 0; 721 722 /* tunnel packet */ 723 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | 724 SKB_GSO_GRE_CSUM | 725 SKB_GSO_UDP_TUNNEL | 726 SKB_GSO_UDP_TUNNEL_CSUM)) { 727 /* reset l3&l4 pointers from outer to inner headers */ 728 l3.hdr = skb_inner_network_header(skb); 729 l4.hdr = skb_inner_transport_header(skb); 730 731 /* Software should clear the IPv4's checksum field when 732 * tso is needed. 733 */ 734 if (l3.v4->version == 4) 735 l3.v4->check = 0; 736 } 737 738 /* normal or tunnel packet */ 739 l4_offset = l4.hdr - skb->data; 740 741 /* remove payload length from inner pseudo checksum when tso */ 742 l4_paylen = skb->len - l4_offset; 743 744 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { 745 hdr_len = sizeof(*l4.udp) + l4_offset; 746 csum_replace_by_diff(&l4.udp->check, 747 (__force __wsum)htonl(l4_paylen)); 748 } else { 749 hdr_len = (l4.tcp->doff << 2) + l4_offset; 750 csum_replace_by_diff(&l4.tcp->check, 751 (__force __wsum)htonl(l4_paylen)); 752 } 753 754 /* find the txbd field values */ 755 *paylen_fdop_ol4cs = skb->len - hdr_len; 756 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_TSO_B, 1); 757 758 /* offload outer UDP header checksum */ 759 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) 760 hns3_set_field(*paylen_fdop_ol4cs, HNS3_TXD_OL4CS_B, 1); 761 762 /* get MSS for TSO */ 763 *mss = skb_shinfo(skb)->gso_size; 764 765 trace_hns3_tso(skb); 766 767 return 0; 768 } 769 770 static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto, 771 u8 *il4_proto) 772 { 773 union l3_hdr_info l3; 774 unsigned char *l4_hdr; 775 unsigned char *exthdr; 776 u8 l4_proto_tmp; 777 __be16 frag_off; 778 779 /* find outer header point */ 780 l3.hdr = skb_network_header(skb); 781 l4_hdr = skb_transport_header(skb); 782 783 if (skb->protocol == htons(ETH_P_IPV6)) { 784 exthdr = l3.hdr + sizeof(*l3.v6); 785 l4_proto_tmp = l3.v6->nexthdr; 786 if (l4_hdr != exthdr) 787 ipv6_skip_exthdr(skb, exthdr - skb->data, 788 &l4_proto_tmp, &frag_off); 789 } else if (skb->protocol == htons(ETH_P_IP)) { 790 l4_proto_tmp = l3.v4->protocol; 791 } else { 792 return -EINVAL; 793 } 794 795 *ol4_proto = l4_proto_tmp; 796 797 /* tunnel packet */ 798 if (!skb->encapsulation) { 799 *il4_proto = 0; 800 return 0; 801 } 802 803 /* find inner header point */ 804 l3.hdr = skb_inner_network_header(skb); 805 l4_hdr = skb_inner_transport_header(skb); 806 807 if (l3.v6->version == 6) { 808 exthdr = l3.hdr + sizeof(*l3.v6); 809 l4_proto_tmp = l3.v6->nexthdr; 810 if (l4_hdr != exthdr) 811 ipv6_skip_exthdr(skb, exthdr - skb->data, 812 &l4_proto_tmp, &frag_off); 813 } else if (l3.v4->version == 4) { 814 l4_proto_tmp = l3.v4->protocol; 815 } 816 817 *il4_proto = l4_proto_tmp; 818 819 return 0; 820 } 821 822 /* when skb->encapsulation is 0, skb->ip_summed is CHECKSUM_PARTIAL 823 * and it is udp packet, which has a dest port as the IANA assigned. 824 * the hardware is expected to do the checksum offload, but the 825 * hardware will not do the checksum offload when udp dest port is 826 * 4789 or 6081. 827 */ 828 static bool hns3_tunnel_csum_bug(struct sk_buff *skb) 829 { 830 struct hns3_nic_priv *priv = netdev_priv(skb->dev); 831 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev); 832 union l4_hdr_info l4; 833 834 /* device version above V3(include V3), the hardware can 835 * do this checksum offload. 836 */ 837 if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) 838 return false; 839 840 l4.hdr = skb_transport_header(skb); 841 842 if (!(!skb->encapsulation && 843 (l4.udp->dest == htons(IANA_VXLAN_UDP_PORT) || 844 l4.udp->dest == htons(GENEVE_UDP_PORT)))) 845 return false; 846 847 skb_checksum_help(skb); 848 849 return true; 850 } 851 852 static void hns3_set_outer_l2l3l4(struct sk_buff *skb, u8 ol4_proto, 853 u32 *ol_type_vlan_len_msec) 854 { 855 u32 l2_len, l3_len, l4_len; 856 unsigned char *il2_hdr; 857 union l3_hdr_info l3; 858 union l4_hdr_info l4; 859 860 l3.hdr = skb_network_header(skb); 861 l4.hdr = skb_transport_header(skb); 862 863 /* compute OL2 header size, defined in 2 Bytes */ 864 l2_len = l3.hdr - skb->data; 865 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L2LEN_S, l2_len >> 1); 866 867 /* compute OL3 header size, defined in 4 Bytes */ 868 l3_len = l4.hdr - l3.hdr; 869 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_S, l3_len >> 2); 870 871 il2_hdr = skb_inner_mac_header(skb); 872 /* compute OL4 header size, defined in 4 Bytes */ 873 l4_len = il2_hdr - l4.hdr; 874 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_S, l4_len >> 2); 875 876 /* define outer network header type */ 877 if (skb->protocol == htons(ETH_P_IP)) { 878 if (skb_is_gso(skb)) 879 hns3_set_field(*ol_type_vlan_len_msec, 880 HNS3_TXD_OL3T_S, 881 HNS3_OL3T_IPV4_CSUM); 882 else 883 hns3_set_field(*ol_type_vlan_len_msec, 884 HNS3_TXD_OL3T_S, 885 HNS3_OL3T_IPV4_NO_CSUM); 886 887 } else if (skb->protocol == htons(ETH_P_IPV6)) { 888 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_S, 889 HNS3_OL3T_IPV6); 890 } 891 892 if (ol4_proto == IPPROTO_UDP) 893 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_TUNTYPE_S, 894 HNS3_TUN_MAC_IN_UDP); 895 else if (ol4_proto == IPPROTO_GRE) 896 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_TUNTYPE_S, 897 HNS3_TUN_NVGRE); 898 } 899 900 static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto, 901 u8 il4_proto, u32 *type_cs_vlan_tso, 902 u32 *ol_type_vlan_len_msec) 903 { 904 unsigned char *l2_hdr = skb->data; 905 u32 l4_proto = ol4_proto; 906 union l4_hdr_info l4; 907 union l3_hdr_info l3; 908 u32 l2_len, l3_len; 909 910 l4.hdr = skb_transport_header(skb); 911 l3.hdr = skb_network_header(skb); 912 913 /* handle encapsulation skb */ 914 if (skb->encapsulation) { 915 /* If this is a not UDP/GRE encapsulation skb */ 916 if (!(ol4_proto == IPPROTO_UDP || ol4_proto == IPPROTO_GRE)) { 917 /* drop the skb tunnel packet if hardware don't support, 918 * because hardware can't calculate csum when TSO. 919 */ 920 if (skb_is_gso(skb)) 921 return -EDOM; 922 923 /* the stack computes the IP header already, 924 * driver calculate l4 checksum when not TSO. 925 */ 926 skb_checksum_help(skb); 927 return 0; 928 } 929 930 hns3_set_outer_l2l3l4(skb, ol4_proto, ol_type_vlan_len_msec); 931 932 /* switch to inner header */ 933 l2_hdr = skb_inner_mac_header(skb); 934 l3.hdr = skb_inner_network_header(skb); 935 l4.hdr = skb_inner_transport_header(skb); 936 l4_proto = il4_proto; 937 } 938 939 if (l3.v4->version == 4) { 940 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S, 941 HNS3_L3T_IPV4); 942 943 /* the stack computes the IP header already, the only time we 944 * need the hardware to recompute it is in the case of TSO. 945 */ 946 if (skb_is_gso(skb)) 947 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1); 948 } else if (l3.v6->version == 6) { 949 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S, 950 HNS3_L3T_IPV6); 951 } 952 953 /* compute inner(/normal) L2 header size, defined in 2 Bytes */ 954 l2_len = l3.hdr - l2_hdr; 955 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, l2_len >> 1); 956 957 /* compute inner(/normal) L3 header size, defined in 4 Bytes */ 958 l3_len = l4.hdr - l3.hdr; 959 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_S, l3_len >> 2); 960 961 /* compute inner(/normal) L4 header size, defined in 4 Bytes */ 962 switch (l4_proto) { 963 case IPPROTO_TCP: 964 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); 965 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S, 966 HNS3_L4T_TCP); 967 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S, 968 l4.tcp->doff); 969 break; 970 case IPPROTO_UDP: 971 if (hns3_tunnel_csum_bug(skb)) 972 break; 973 974 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); 975 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S, 976 HNS3_L4T_UDP); 977 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S, 978 (sizeof(struct udphdr) >> 2)); 979 break; 980 case IPPROTO_SCTP: 981 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); 982 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S, 983 HNS3_L4T_SCTP); 984 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S, 985 (sizeof(struct sctphdr) >> 2)); 986 break; 987 default: 988 /* drop the skb tunnel packet if hardware don't support, 989 * because hardware can't calculate csum when TSO. 990 */ 991 if (skb_is_gso(skb)) 992 return -EDOM; 993 994 /* the stack computes the IP header already, 995 * driver calculate l4 checksum when not TSO. 996 */ 997 skb_checksum_help(skb); 998 return 0; 999 } 1000 1001 return 0; 1002 } 1003 1004 static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring, 1005 struct sk_buff *skb) 1006 { 1007 struct hnae3_handle *handle = tx_ring->tqp->handle; 1008 struct hnae3_ae_dev *ae_dev; 1009 struct vlan_ethhdr *vhdr; 1010 int rc; 1011 1012 if (!(skb->protocol == htons(ETH_P_8021Q) || 1013 skb_vlan_tag_present(skb))) 1014 return 0; 1015 1016 /* For HW limitation on HNAE3_DEVICE_VERSION_V2, if port based insert 1017 * VLAN enabled, only one VLAN header is allowed in skb, otherwise it 1018 * will cause RAS error. 1019 */ 1020 ae_dev = pci_get_drvdata(handle->pdev); 1021 if (unlikely(skb_vlan_tagged_multi(skb) && 1022 ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 && 1023 handle->port_base_vlan_state == 1024 HNAE3_PORT_BASE_VLAN_ENABLE)) 1025 return -EINVAL; 1026 1027 if (skb->protocol == htons(ETH_P_8021Q) && 1028 !(handle->kinfo.netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) { 1029 /* When HW VLAN acceleration is turned off, and the stack 1030 * sets the protocol to 802.1q, the driver just need to 1031 * set the protocol to the encapsulated ethertype. 1032 */ 1033 skb->protocol = vlan_get_protocol(skb); 1034 return 0; 1035 } 1036 1037 if (skb_vlan_tag_present(skb)) { 1038 /* Based on hw strategy, use out_vtag in two layer tag case, 1039 * and use inner_vtag in one tag case. 1040 */ 1041 if (skb->protocol == htons(ETH_P_8021Q) && 1042 handle->port_base_vlan_state == 1043 HNAE3_PORT_BASE_VLAN_DISABLE) 1044 rc = HNS3_OUTER_VLAN_TAG; 1045 else 1046 rc = HNS3_INNER_VLAN_TAG; 1047 1048 skb->protocol = vlan_get_protocol(skb); 1049 return rc; 1050 } 1051 1052 rc = skb_cow_head(skb, 0); 1053 if (unlikely(rc < 0)) 1054 return rc; 1055 1056 vhdr = (struct vlan_ethhdr *)skb->data; 1057 vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority << VLAN_PRIO_SHIFT) 1058 & VLAN_PRIO_MASK); 1059 1060 skb->protocol = vlan_get_protocol(skb); 1061 return 0; 1062 } 1063 1064 /* check if the hardware is capable of checksum offloading */ 1065 static bool hns3_check_hw_tx_csum(struct sk_buff *skb) 1066 { 1067 struct hns3_nic_priv *priv = netdev_priv(skb->dev); 1068 1069 /* Kindly note, due to backward compatibility of the TX descriptor, 1070 * HW checksum of the non-IP packets and GSO packets is handled at 1071 * different place in the following code 1072 */ 1073 if (skb->csum_not_inet || skb_is_gso(skb) || 1074 !test_bit(HNS3_NIC_STATE_HW_TX_CSUM_ENABLE, &priv->state)) 1075 return false; 1076 1077 return true; 1078 } 1079 1080 static int hns3_fill_skb_desc(struct hns3_enet_ring *ring, 1081 struct sk_buff *skb, struct hns3_desc *desc) 1082 { 1083 u32 ol_type_vlan_len_msec = 0; 1084 u32 paylen_ol4cs = skb->len; 1085 u32 type_cs_vlan_tso = 0; 1086 u16 mss_hw_csum = 0; 1087 u16 inner_vtag = 0; 1088 u16 out_vtag = 0; 1089 int ret; 1090 1091 ret = hns3_handle_vtags(ring, skb); 1092 if (unlikely(ret < 0)) { 1093 u64_stats_update_begin(&ring->syncp); 1094 ring->stats.tx_vlan_err++; 1095 u64_stats_update_end(&ring->syncp); 1096 return ret; 1097 } else if (ret == HNS3_INNER_VLAN_TAG) { 1098 inner_vtag = skb_vlan_tag_get(skb); 1099 inner_vtag |= (skb->priority << VLAN_PRIO_SHIFT) & 1100 VLAN_PRIO_MASK; 1101 hns3_set_field(type_cs_vlan_tso, HNS3_TXD_VLAN_B, 1); 1102 } else if (ret == HNS3_OUTER_VLAN_TAG) { 1103 out_vtag = skb_vlan_tag_get(skb); 1104 out_vtag |= (skb->priority << VLAN_PRIO_SHIFT) & 1105 VLAN_PRIO_MASK; 1106 hns3_set_field(ol_type_vlan_len_msec, HNS3_TXD_OVLAN_B, 1107 1); 1108 } 1109 1110 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1111 u8 ol4_proto, il4_proto; 1112 1113 if (hns3_check_hw_tx_csum(skb)) { 1114 /* set checksum start and offset, defined in 2 Bytes */ 1115 hns3_set_field(type_cs_vlan_tso, HNS3_TXD_CSUM_START_S, 1116 skb_checksum_start_offset(skb) >> 1); 1117 hns3_set_field(ol_type_vlan_len_msec, 1118 HNS3_TXD_CSUM_OFFSET_S, 1119 skb->csum_offset >> 1); 1120 mss_hw_csum |= BIT(HNS3_TXD_HW_CS_B); 1121 goto out_hw_tx_csum; 1122 } 1123 1124 skb_reset_mac_len(skb); 1125 1126 ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto); 1127 if (unlikely(ret < 0)) { 1128 u64_stats_update_begin(&ring->syncp); 1129 ring->stats.tx_l4_proto_err++; 1130 u64_stats_update_end(&ring->syncp); 1131 return ret; 1132 } 1133 1134 ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto, 1135 &type_cs_vlan_tso, 1136 &ol_type_vlan_len_msec); 1137 if (unlikely(ret < 0)) { 1138 u64_stats_update_begin(&ring->syncp); 1139 ring->stats.tx_l2l3l4_err++; 1140 u64_stats_update_end(&ring->syncp); 1141 return ret; 1142 } 1143 1144 ret = hns3_set_tso(skb, &paylen_ol4cs, &mss_hw_csum, 1145 &type_cs_vlan_tso); 1146 if (unlikely(ret < 0)) { 1147 u64_stats_update_begin(&ring->syncp); 1148 ring->stats.tx_tso_err++; 1149 u64_stats_update_end(&ring->syncp); 1150 return ret; 1151 } 1152 } 1153 1154 out_hw_tx_csum: 1155 /* Set txbd */ 1156 desc->tx.ol_type_vlan_len_msec = 1157 cpu_to_le32(ol_type_vlan_len_msec); 1158 desc->tx.type_cs_vlan_tso_len = cpu_to_le32(type_cs_vlan_tso); 1159 desc->tx.paylen_ol4cs = cpu_to_le32(paylen_ol4cs); 1160 desc->tx.mss_hw_csum = cpu_to_le16(mss_hw_csum); 1161 desc->tx.vlan_tag = cpu_to_le16(inner_vtag); 1162 desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag); 1163 1164 return 0; 1165 } 1166 1167 static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, 1168 unsigned int size, enum hns_desc_type type) 1169 { 1170 #define HNS3_LIKELY_BD_NUM 1 1171 1172 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; 1173 struct hns3_desc *desc = &ring->desc[ring->next_to_use]; 1174 struct device *dev = ring_to_dev(ring); 1175 skb_frag_t *frag; 1176 unsigned int frag_buf_num; 1177 int k, sizeoflast; 1178 dma_addr_t dma; 1179 1180 if (type == DESC_TYPE_FRAGLIST_SKB || 1181 type == DESC_TYPE_SKB) { 1182 struct sk_buff *skb = (struct sk_buff *)priv; 1183 1184 dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); 1185 } else { 1186 frag = (skb_frag_t *)priv; 1187 dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE); 1188 } 1189 1190 if (unlikely(dma_mapping_error(dev, dma))) { 1191 u64_stats_update_begin(&ring->syncp); 1192 ring->stats.sw_err_cnt++; 1193 u64_stats_update_end(&ring->syncp); 1194 return -ENOMEM; 1195 } 1196 1197 desc_cb->priv = priv; 1198 desc_cb->length = size; 1199 desc_cb->dma = dma; 1200 desc_cb->type = type; 1201 1202 if (likely(size <= HNS3_MAX_BD_SIZE)) { 1203 desc->addr = cpu_to_le64(dma); 1204 desc->tx.send_size = cpu_to_le16(size); 1205 desc->tx.bdtp_fe_sc_vld_ra_ri = 1206 cpu_to_le16(BIT(HNS3_TXD_VLD_B)); 1207 1208 trace_hns3_tx_desc(ring, ring->next_to_use); 1209 ring_ptr_move_fw(ring, next_to_use); 1210 return HNS3_LIKELY_BD_NUM; 1211 } 1212 1213 frag_buf_num = hns3_tx_bd_count(size); 1214 sizeoflast = size % HNS3_MAX_BD_SIZE; 1215 sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE; 1216 1217 /* When frag size is bigger than hardware limit, split this frag */ 1218 for (k = 0; k < frag_buf_num; k++) { 1219 /* now, fill the descriptor */ 1220 desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k); 1221 desc->tx.send_size = cpu_to_le16((k == frag_buf_num - 1) ? 1222 (u16)sizeoflast : (u16)HNS3_MAX_BD_SIZE); 1223 desc->tx.bdtp_fe_sc_vld_ra_ri = 1224 cpu_to_le16(BIT(HNS3_TXD_VLD_B)); 1225 1226 trace_hns3_tx_desc(ring, ring->next_to_use); 1227 /* move ring pointer to next */ 1228 ring_ptr_move_fw(ring, next_to_use); 1229 1230 desc = &ring->desc[ring->next_to_use]; 1231 } 1232 1233 return frag_buf_num; 1234 } 1235 1236 static unsigned int hns3_skb_bd_num(struct sk_buff *skb, unsigned int *bd_size, 1237 unsigned int bd_num) 1238 { 1239 unsigned int size; 1240 int i; 1241 1242 size = skb_headlen(skb); 1243 while (size > HNS3_MAX_BD_SIZE) { 1244 bd_size[bd_num++] = HNS3_MAX_BD_SIZE; 1245 size -= HNS3_MAX_BD_SIZE; 1246 1247 if (bd_num > HNS3_MAX_TSO_BD_NUM) 1248 return bd_num; 1249 } 1250 1251 if (size) { 1252 bd_size[bd_num++] = size; 1253 if (bd_num > HNS3_MAX_TSO_BD_NUM) 1254 return bd_num; 1255 } 1256 1257 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1258 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1259 size = skb_frag_size(frag); 1260 if (!size) 1261 continue; 1262 1263 while (size > HNS3_MAX_BD_SIZE) { 1264 bd_size[bd_num++] = HNS3_MAX_BD_SIZE; 1265 size -= HNS3_MAX_BD_SIZE; 1266 1267 if (bd_num > HNS3_MAX_TSO_BD_NUM) 1268 return bd_num; 1269 } 1270 1271 bd_size[bd_num++] = size; 1272 if (bd_num > HNS3_MAX_TSO_BD_NUM) 1273 return bd_num; 1274 } 1275 1276 return bd_num; 1277 } 1278 1279 static unsigned int hns3_tx_bd_num(struct sk_buff *skb, unsigned int *bd_size, 1280 u8 max_non_tso_bd_num) 1281 { 1282 struct sk_buff *frag_skb; 1283 unsigned int bd_num = 0; 1284 1285 /* If the total len is within the max bd limit */ 1286 if (likely(skb->len <= HNS3_MAX_BD_SIZE && !skb_has_frag_list(skb) && 1287 skb_shinfo(skb)->nr_frags < max_non_tso_bd_num)) 1288 return skb_shinfo(skb)->nr_frags + 1U; 1289 1290 /* The below case will always be linearized, return 1291 * HNS3_MAX_BD_NUM_TSO + 1U to make sure it is linearized. 1292 */ 1293 if (unlikely(skb->len > HNS3_MAX_TSO_SIZE || 1294 (!skb_is_gso(skb) && skb->len > 1295 HNS3_MAX_NON_TSO_SIZE(max_non_tso_bd_num)))) 1296 return HNS3_MAX_TSO_BD_NUM + 1U; 1297 1298 bd_num = hns3_skb_bd_num(skb, bd_size, bd_num); 1299 1300 if (!skb_has_frag_list(skb) || bd_num > HNS3_MAX_TSO_BD_NUM) 1301 return bd_num; 1302 1303 skb_walk_frags(skb, frag_skb) { 1304 bd_num = hns3_skb_bd_num(frag_skb, bd_size, bd_num); 1305 if (bd_num > HNS3_MAX_TSO_BD_NUM) 1306 return bd_num; 1307 } 1308 1309 return bd_num; 1310 } 1311 1312 static unsigned int hns3_gso_hdr_len(struct sk_buff *skb) 1313 { 1314 if (!skb->encapsulation) 1315 return skb_transport_offset(skb) + tcp_hdrlen(skb); 1316 1317 return skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb); 1318 } 1319 1320 /* HW need every continuous max_non_tso_bd_num buffer data to be larger 1321 * than MSS, we simplify it by ensuring skb_headlen + the first continuous 1322 * max_non_tso_bd_num - 1 frags to be larger than gso header len + mss, 1323 * and the remaining continuous max_non_tso_bd_num - 1 frags to be larger 1324 * than MSS except the last max_non_tso_bd_num - 1 frags. 1325 */ 1326 static bool hns3_skb_need_linearized(struct sk_buff *skb, unsigned int *bd_size, 1327 unsigned int bd_num, u8 max_non_tso_bd_num) 1328 { 1329 unsigned int tot_len = 0; 1330 int i; 1331 1332 for (i = 0; i < max_non_tso_bd_num - 1U; i++) 1333 tot_len += bd_size[i]; 1334 1335 /* ensure the first max_non_tso_bd_num frags is greater than 1336 * mss + header 1337 */ 1338 if (tot_len + bd_size[max_non_tso_bd_num - 1U] < 1339 skb_shinfo(skb)->gso_size + hns3_gso_hdr_len(skb)) 1340 return true; 1341 1342 /* ensure every continuous max_non_tso_bd_num - 1 buffer is greater 1343 * than mss except the last one. 1344 */ 1345 for (i = 0; i < bd_num - max_non_tso_bd_num; i++) { 1346 tot_len -= bd_size[i]; 1347 tot_len += bd_size[i + max_non_tso_bd_num - 1U]; 1348 1349 if (tot_len < skb_shinfo(skb)->gso_size) 1350 return true; 1351 } 1352 1353 return false; 1354 } 1355 1356 void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size) 1357 { 1358 int i; 1359 1360 for (i = 0; i < MAX_SKB_FRAGS; i++) 1361 size[i] = skb_frag_size(&shinfo->frags[i]); 1362 } 1363 1364 static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring, 1365 struct net_device *netdev, 1366 struct sk_buff *skb) 1367 { 1368 struct hns3_nic_priv *priv = netdev_priv(netdev); 1369 u8 max_non_tso_bd_num = priv->max_non_tso_bd_num; 1370 unsigned int bd_size[HNS3_MAX_TSO_BD_NUM + 1U]; 1371 unsigned int bd_num; 1372 1373 bd_num = hns3_tx_bd_num(skb, bd_size, max_non_tso_bd_num); 1374 if (unlikely(bd_num > max_non_tso_bd_num)) { 1375 if (bd_num <= HNS3_MAX_TSO_BD_NUM && skb_is_gso(skb) && 1376 !hns3_skb_need_linearized(skb, bd_size, bd_num, 1377 max_non_tso_bd_num)) { 1378 trace_hns3_over_max_bd(skb); 1379 goto out; 1380 } 1381 1382 if (__skb_linearize(skb)) 1383 return -ENOMEM; 1384 1385 bd_num = hns3_tx_bd_count(skb->len); 1386 if ((skb_is_gso(skb) && bd_num > HNS3_MAX_TSO_BD_NUM) || 1387 (!skb_is_gso(skb) && 1388 bd_num > max_non_tso_bd_num)) { 1389 trace_hns3_over_max_bd(skb); 1390 return -ENOMEM; 1391 } 1392 1393 u64_stats_update_begin(&ring->syncp); 1394 ring->stats.tx_copy++; 1395 u64_stats_update_end(&ring->syncp); 1396 } 1397 1398 out: 1399 if (likely(ring_space(ring) >= bd_num)) 1400 return bd_num; 1401 1402 netif_stop_subqueue(netdev, ring->queue_index); 1403 smp_mb(); /* Memory barrier before checking ring_space */ 1404 1405 /* Start queue in case hns3_clean_tx_ring has just made room 1406 * available and has not seen the queue stopped state performed 1407 * by netif_stop_subqueue above. 1408 */ 1409 if (ring_space(ring) >= bd_num && netif_carrier_ok(netdev) && 1410 !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) { 1411 netif_start_subqueue(netdev, ring->queue_index); 1412 return bd_num; 1413 } 1414 1415 return -EBUSY; 1416 } 1417 1418 static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig) 1419 { 1420 struct device *dev = ring_to_dev(ring); 1421 unsigned int i; 1422 1423 for (i = 0; i < ring->desc_num; i++) { 1424 struct hns3_desc *desc = &ring->desc[ring->next_to_use]; 1425 1426 memset(desc, 0, sizeof(*desc)); 1427 1428 /* check if this is where we started */ 1429 if (ring->next_to_use == next_to_use_orig) 1430 break; 1431 1432 /* rollback one */ 1433 ring_ptr_move_bw(ring, next_to_use); 1434 1435 if (!ring->desc_cb[ring->next_to_use].dma) 1436 continue; 1437 1438 /* unmap the descriptor dma address */ 1439 if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB || 1440 ring->desc_cb[ring->next_to_use].type == 1441 DESC_TYPE_FRAGLIST_SKB) 1442 dma_unmap_single(dev, 1443 ring->desc_cb[ring->next_to_use].dma, 1444 ring->desc_cb[ring->next_to_use].length, 1445 DMA_TO_DEVICE); 1446 else if (ring->desc_cb[ring->next_to_use].length) 1447 dma_unmap_page(dev, 1448 ring->desc_cb[ring->next_to_use].dma, 1449 ring->desc_cb[ring->next_to_use].length, 1450 DMA_TO_DEVICE); 1451 1452 ring->desc_cb[ring->next_to_use].length = 0; 1453 ring->desc_cb[ring->next_to_use].dma = 0; 1454 ring->desc_cb[ring->next_to_use].type = DESC_TYPE_UNKNOWN; 1455 } 1456 } 1457 1458 static int hns3_fill_skb_to_desc(struct hns3_enet_ring *ring, 1459 struct sk_buff *skb, enum hns_desc_type type) 1460 { 1461 unsigned int size = skb_headlen(skb); 1462 int i, ret, bd_num = 0; 1463 1464 if (size) { 1465 ret = hns3_fill_desc(ring, skb, size, type); 1466 if (unlikely(ret < 0)) 1467 return ret; 1468 1469 bd_num += ret; 1470 } 1471 1472 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1473 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1474 1475 size = skb_frag_size(frag); 1476 if (!size) 1477 continue; 1478 1479 ret = hns3_fill_desc(ring, frag, size, DESC_TYPE_PAGE); 1480 if (unlikely(ret < 0)) 1481 return ret; 1482 1483 bd_num += ret; 1484 } 1485 1486 return bd_num; 1487 } 1488 1489 static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num, 1490 bool doorbell) 1491 { 1492 ring->pending_buf += num; 1493 1494 if (!doorbell) { 1495 u64_stats_update_begin(&ring->syncp); 1496 ring->stats.tx_more++; 1497 u64_stats_update_end(&ring->syncp); 1498 return; 1499 } 1500 1501 if (!ring->pending_buf) 1502 return; 1503 1504 writel(ring->pending_buf, 1505 ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG); 1506 ring->pending_buf = 0; 1507 WRITE_ONCE(ring->last_to_use, ring->next_to_use); 1508 } 1509 1510 netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) 1511 { 1512 struct hns3_nic_priv *priv = netdev_priv(netdev); 1513 struct hns3_enet_ring *ring = &priv->ring[skb->queue_mapping]; 1514 struct netdev_queue *dev_queue; 1515 int pre_ntu, next_to_use_head; 1516 struct sk_buff *frag_skb; 1517 int bd_num = 0; 1518 bool doorbell; 1519 int ret; 1520 1521 /* Hardware can only handle short frames above 32 bytes */ 1522 if (skb_put_padto(skb, HNS3_MIN_TX_LEN)) { 1523 hns3_tx_doorbell(ring, 0, !netdev_xmit_more()); 1524 return NETDEV_TX_OK; 1525 } 1526 1527 /* Prefetch the data used later */ 1528 prefetch(skb->data); 1529 1530 ret = hns3_nic_maybe_stop_tx(ring, netdev, skb); 1531 if (unlikely(ret <= 0)) { 1532 if (ret == -EBUSY) { 1533 u64_stats_update_begin(&ring->syncp); 1534 ring->stats.tx_busy++; 1535 u64_stats_update_end(&ring->syncp); 1536 hns3_tx_doorbell(ring, 0, true); 1537 return NETDEV_TX_BUSY; 1538 } else if (ret == -ENOMEM) { 1539 u64_stats_update_begin(&ring->syncp); 1540 ring->stats.sw_err_cnt++; 1541 u64_stats_update_end(&ring->syncp); 1542 } 1543 1544 hns3_rl_err(netdev, "xmit error: %d!\n", ret); 1545 goto out_err_tx_ok; 1546 } 1547 1548 next_to_use_head = ring->next_to_use; 1549 1550 ret = hns3_fill_skb_desc(ring, skb, &ring->desc[ring->next_to_use]); 1551 if (unlikely(ret < 0)) 1552 goto fill_err; 1553 1554 ret = hns3_fill_skb_to_desc(ring, skb, DESC_TYPE_SKB); 1555 if (unlikely(ret < 0)) 1556 goto fill_err; 1557 1558 bd_num += ret; 1559 1560 skb_walk_frags(skb, frag_skb) { 1561 ret = hns3_fill_skb_to_desc(ring, frag_skb, 1562 DESC_TYPE_FRAGLIST_SKB); 1563 if (unlikely(ret < 0)) 1564 goto fill_err; 1565 1566 bd_num += ret; 1567 } 1568 1569 pre_ntu = ring->next_to_use ? (ring->next_to_use - 1) : 1570 (ring->desc_num - 1); 1571 ring->desc[pre_ntu].tx.bdtp_fe_sc_vld_ra_ri |= 1572 cpu_to_le16(BIT(HNS3_TXD_FE_B)); 1573 trace_hns3_tx_desc(ring, pre_ntu); 1574 1575 /* Complete translate all packets */ 1576 dev_queue = netdev_get_tx_queue(netdev, ring->queue_index); 1577 doorbell = __netdev_tx_sent_queue(dev_queue, skb->len, 1578 netdev_xmit_more()); 1579 hns3_tx_doorbell(ring, bd_num, doorbell); 1580 1581 return NETDEV_TX_OK; 1582 1583 fill_err: 1584 hns3_clear_desc(ring, next_to_use_head); 1585 1586 out_err_tx_ok: 1587 dev_kfree_skb_any(skb); 1588 hns3_tx_doorbell(ring, 0, !netdev_xmit_more()); 1589 return NETDEV_TX_OK; 1590 } 1591 1592 static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p) 1593 { 1594 struct hnae3_handle *h = hns3_get_handle(netdev); 1595 struct sockaddr *mac_addr = p; 1596 int ret; 1597 1598 if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data)) 1599 return -EADDRNOTAVAIL; 1600 1601 if (ether_addr_equal(netdev->dev_addr, mac_addr->sa_data)) { 1602 netdev_info(netdev, "already using mac address %pM\n", 1603 mac_addr->sa_data); 1604 return 0; 1605 } 1606 1607 /* For VF device, if there is a perm_addr, then the user will not 1608 * be allowed to change the address. 1609 */ 1610 if (!hns3_is_phys_func(h->pdev) && 1611 !is_zero_ether_addr(netdev->perm_addr)) { 1612 netdev_err(netdev, "has permanent MAC %pM, user MAC %pM not allow\n", 1613 netdev->perm_addr, mac_addr->sa_data); 1614 return -EPERM; 1615 } 1616 1617 ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data, false); 1618 if (ret) { 1619 netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret); 1620 return ret; 1621 } 1622 1623 ether_addr_copy(netdev->dev_addr, mac_addr->sa_data); 1624 1625 return 0; 1626 } 1627 1628 static int hns3_nic_do_ioctl(struct net_device *netdev, 1629 struct ifreq *ifr, int cmd) 1630 { 1631 struct hnae3_handle *h = hns3_get_handle(netdev); 1632 1633 if (!netif_running(netdev)) 1634 return -EINVAL; 1635 1636 if (!h->ae_algo->ops->do_ioctl) 1637 return -EOPNOTSUPP; 1638 1639 return h->ae_algo->ops->do_ioctl(h, ifr, cmd); 1640 } 1641 1642 static int hns3_nic_set_features(struct net_device *netdev, 1643 netdev_features_t features) 1644 { 1645 netdev_features_t changed = netdev->features ^ features; 1646 struct hns3_nic_priv *priv = netdev_priv(netdev); 1647 struct hnae3_handle *h = priv->ae_handle; 1648 bool enable; 1649 int ret; 1650 1651 if (changed & (NETIF_F_GRO_HW) && h->ae_algo->ops->set_gro_en) { 1652 enable = !!(features & NETIF_F_GRO_HW); 1653 ret = h->ae_algo->ops->set_gro_en(h, enable); 1654 if (ret) 1655 return ret; 1656 } 1657 1658 if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && 1659 h->ae_algo->ops->enable_hw_strip_rxvtag) { 1660 enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX); 1661 ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, enable); 1662 if (ret) 1663 return ret; 1664 } 1665 1666 if ((changed & NETIF_F_NTUPLE) && h->ae_algo->ops->enable_fd) { 1667 enable = !!(features & NETIF_F_NTUPLE); 1668 h->ae_algo->ops->enable_fd(h, enable); 1669 } 1670 1671 if ((netdev->features & NETIF_F_HW_TC) > (features & NETIF_F_HW_TC) && 1672 h->ae_algo->ops->cls_flower_active(h)) { 1673 netdev_err(netdev, 1674 "there are offloaded TC filters active, cannot disable HW TC offload"); 1675 return -EINVAL; 1676 } 1677 1678 netdev->features = features; 1679 return 0; 1680 } 1681 1682 static netdev_features_t hns3_features_check(struct sk_buff *skb, 1683 struct net_device *dev, 1684 netdev_features_t features) 1685 { 1686 #define HNS3_MAX_HDR_LEN 480U 1687 #define HNS3_MAX_L4_HDR_LEN 60U 1688 1689 size_t len; 1690 1691 if (skb->ip_summed != CHECKSUM_PARTIAL) 1692 return features; 1693 1694 if (skb->encapsulation) 1695 len = skb_inner_transport_header(skb) - skb->data; 1696 else 1697 len = skb_transport_header(skb) - skb->data; 1698 1699 /* Assume L4 is 60 byte as TCP is the only protocol with a 1700 * a flexible value, and it's max len is 60 bytes. 1701 */ 1702 len += HNS3_MAX_L4_HDR_LEN; 1703 1704 /* Hardware only supports checksum on the skb with a max header 1705 * len of 480 bytes. 1706 */ 1707 if (len > HNS3_MAX_HDR_LEN) 1708 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 1709 1710 return features; 1711 } 1712 1713 static void hns3_nic_get_stats64(struct net_device *netdev, 1714 struct rtnl_link_stats64 *stats) 1715 { 1716 struct hns3_nic_priv *priv = netdev_priv(netdev); 1717 int queue_num = priv->ae_handle->kinfo.num_tqps; 1718 struct hnae3_handle *handle = priv->ae_handle; 1719 struct hns3_enet_ring *ring; 1720 u64 rx_length_errors = 0; 1721 u64 rx_crc_errors = 0; 1722 u64 rx_multicast = 0; 1723 unsigned int start; 1724 u64 tx_errors = 0; 1725 u64 rx_errors = 0; 1726 unsigned int idx; 1727 u64 tx_bytes = 0; 1728 u64 rx_bytes = 0; 1729 u64 tx_pkts = 0; 1730 u64 rx_pkts = 0; 1731 u64 tx_drop = 0; 1732 u64 rx_drop = 0; 1733 1734 if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) 1735 return; 1736 1737 handle->ae_algo->ops->update_stats(handle, &netdev->stats); 1738 1739 for (idx = 0; idx < queue_num; idx++) { 1740 /* fetch the tx stats */ 1741 ring = &priv->ring[idx]; 1742 do { 1743 start = u64_stats_fetch_begin_irq(&ring->syncp); 1744 tx_bytes += ring->stats.tx_bytes; 1745 tx_pkts += ring->stats.tx_pkts; 1746 tx_drop += ring->stats.sw_err_cnt; 1747 tx_drop += ring->stats.tx_vlan_err; 1748 tx_drop += ring->stats.tx_l4_proto_err; 1749 tx_drop += ring->stats.tx_l2l3l4_err; 1750 tx_drop += ring->stats.tx_tso_err; 1751 tx_errors += ring->stats.sw_err_cnt; 1752 tx_errors += ring->stats.tx_vlan_err; 1753 tx_errors += ring->stats.tx_l4_proto_err; 1754 tx_errors += ring->stats.tx_l2l3l4_err; 1755 tx_errors += ring->stats.tx_tso_err; 1756 } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); 1757 1758 /* fetch the rx stats */ 1759 ring = &priv->ring[idx + queue_num]; 1760 do { 1761 start = u64_stats_fetch_begin_irq(&ring->syncp); 1762 rx_bytes += ring->stats.rx_bytes; 1763 rx_pkts += ring->stats.rx_pkts; 1764 rx_drop += ring->stats.l2_err; 1765 rx_errors += ring->stats.l2_err; 1766 rx_errors += ring->stats.l3l4_csum_err; 1767 rx_crc_errors += ring->stats.l2_err; 1768 rx_multicast += ring->stats.rx_multicast; 1769 rx_length_errors += ring->stats.err_pkt_len; 1770 } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); 1771 } 1772 1773 stats->tx_bytes = tx_bytes; 1774 stats->tx_packets = tx_pkts; 1775 stats->rx_bytes = rx_bytes; 1776 stats->rx_packets = rx_pkts; 1777 1778 stats->rx_errors = rx_errors; 1779 stats->multicast = rx_multicast; 1780 stats->rx_length_errors = rx_length_errors; 1781 stats->rx_crc_errors = rx_crc_errors; 1782 stats->rx_missed_errors = netdev->stats.rx_missed_errors; 1783 1784 stats->tx_errors = tx_errors; 1785 stats->rx_dropped = rx_drop; 1786 stats->tx_dropped = tx_drop; 1787 stats->collisions = netdev->stats.collisions; 1788 stats->rx_over_errors = netdev->stats.rx_over_errors; 1789 stats->rx_frame_errors = netdev->stats.rx_frame_errors; 1790 stats->rx_fifo_errors = netdev->stats.rx_fifo_errors; 1791 stats->tx_aborted_errors = netdev->stats.tx_aborted_errors; 1792 stats->tx_carrier_errors = netdev->stats.tx_carrier_errors; 1793 stats->tx_fifo_errors = netdev->stats.tx_fifo_errors; 1794 stats->tx_heartbeat_errors = netdev->stats.tx_heartbeat_errors; 1795 stats->tx_window_errors = netdev->stats.tx_window_errors; 1796 stats->rx_compressed = netdev->stats.rx_compressed; 1797 stats->tx_compressed = netdev->stats.tx_compressed; 1798 } 1799 1800 static int hns3_setup_tc(struct net_device *netdev, void *type_data) 1801 { 1802 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data; 1803 struct hnae3_knic_private_info *kinfo; 1804 u8 tc = mqprio_qopt->qopt.num_tc; 1805 u16 mode = mqprio_qopt->mode; 1806 u8 hw = mqprio_qopt->qopt.hw; 1807 struct hnae3_handle *h; 1808 1809 if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS && 1810 mode == TC_MQPRIO_MODE_CHANNEL) || (!hw && tc == 0))) 1811 return -EOPNOTSUPP; 1812 1813 if (tc > HNAE3_MAX_TC) 1814 return -EINVAL; 1815 1816 if (!netdev) 1817 return -EINVAL; 1818 1819 h = hns3_get_handle(netdev); 1820 kinfo = &h->kinfo; 1821 1822 netif_dbg(h, drv, netdev, "setup tc: num_tc=%u\n", tc); 1823 1824 return (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ? 1825 kinfo->dcb_ops->setup_tc(h, mqprio_qopt) : -EOPNOTSUPP; 1826 } 1827 1828 static int hns3_setup_tc_cls_flower(struct hns3_nic_priv *priv, 1829 struct flow_cls_offload *flow) 1830 { 1831 int tc = tc_classid_to_hwtc(priv->netdev, flow->classid); 1832 struct hnae3_handle *h = hns3_get_handle(priv->netdev); 1833 1834 switch (flow->command) { 1835 case FLOW_CLS_REPLACE: 1836 if (h->ae_algo->ops->add_cls_flower) 1837 return h->ae_algo->ops->add_cls_flower(h, flow, tc); 1838 break; 1839 case FLOW_CLS_DESTROY: 1840 if (h->ae_algo->ops->del_cls_flower) 1841 return h->ae_algo->ops->del_cls_flower(h, flow); 1842 break; 1843 default: 1844 break; 1845 } 1846 1847 return -EOPNOTSUPP; 1848 } 1849 1850 static int hns3_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 1851 void *cb_priv) 1852 { 1853 struct hns3_nic_priv *priv = cb_priv; 1854 1855 if (!tc_cls_can_offload_and_chain0(priv->netdev, type_data)) 1856 return -EOPNOTSUPP; 1857 1858 switch (type) { 1859 case TC_SETUP_CLSFLOWER: 1860 return hns3_setup_tc_cls_flower(priv, type_data); 1861 default: 1862 return -EOPNOTSUPP; 1863 } 1864 } 1865 1866 static LIST_HEAD(hns3_block_cb_list); 1867 1868 static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type, 1869 void *type_data) 1870 { 1871 struct hns3_nic_priv *priv = netdev_priv(dev); 1872 int ret; 1873 1874 switch (type) { 1875 case TC_SETUP_QDISC_MQPRIO: 1876 ret = hns3_setup_tc(dev, type_data); 1877 break; 1878 case TC_SETUP_BLOCK: 1879 ret = flow_block_cb_setup_simple(type_data, 1880 &hns3_block_cb_list, 1881 hns3_setup_tc_block_cb, 1882 priv, priv, true); 1883 break; 1884 default: 1885 return -EOPNOTSUPP; 1886 } 1887 1888 return ret; 1889 } 1890 1891 static int hns3_vlan_rx_add_vid(struct net_device *netdev, 1892 __be16 proto, u16 vid) 1893 { 1894 struct hnae3_handle *h = hns3_get_handle(netdev); 1895 int ret = -EIO; 1896 1897 if (h->ae_algo->ops->set_vlan_filter) 1898 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false); 1899 1900 return ret; 1901 } 1902 1903 static int hns3_vlan_rx_kill_vid(struct net_device *netdev, 1904 __be16 proto, u16 vid) 1905 { 1906 struct hnae3_handle *h = hns3_get_handle(netdev); 1907 int ret = -EIO; 1908 1909 if (h->ae_algo->ops->set_vlan_filter) 1910 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true); 1911 1912 return ret; 1913 } 1914 1915 static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, 1916 u8 qos, __be16 vlan_proto) 1917 { 1918 struct hnae3_handle *h = hns3_get_handle(netdev); 1919 int ret = -EIO; 1920 1921 netif_dbg(h, drv, netdev, 1922 "set vf vlan: vf=%d, vlan=%u, qos=%u, vlan_proto=0x%x\n", 1923 vf, vlan, qos, ntohs(vlan_proto)); 1924 1925 if (h->ae_algo->ops->set_vf_vlan_filter) 1926 ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan, 1927 qos, vlan_proto); 1928 1929 return ret; 1930 } 1931 1932 static int hns3_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable) 1933 { 1934 struct hnae3_handle *handle = hns3_get_handle(netdev); 1935 1936 if (hns3_nic_resetting(netdev)) 1937 return -EBUSY; 1938 1939 if (!handle->ae_algo->ops->set_vf_spoofchk) 1940 return -EOPNOTSUPP; 1941 1942 return handle->ae_algo->ops->set_vf_spoofchk(handle, vf, enable); 1943 } 1944 1945 static int hns3_set_vf_trust(struct net_device *netdev, int vf, bool enable) 1946 { 1947 struct hnae3_handle *handle = hns3_get_handle(netdev); 1948 1949 if (!handle->ae_algo->ops->set_vf_trust) 1950 return -EOPNOTSUPP; 1951 1952 return handle->ae_algo->ops->set_vf_trust(handle, vf, enable); 1953 } 1954 1955 static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu) 1956 { 1957 struct hnae3_handle *h = hns3_get_handle(netdev); 1958 int ret; 1959 1960 if (hns3_nic_resetting(netdev)) 1961 return -EBUSY; 1962 1963 if (!h->ae_algo->ops->set_mtu) 1964 return -EOPNOTSUPP; 1965 1966 netif_dbg(h, drv, netdev, 1967 "change mtu from %u to %d\n", netdev->mtu, new_mtu); 1968 1969 ret = h->ae_algo->ops->set_mtu(h, new_mtu); 1970 if (ret) 1971 netdev_err(netdev, "failed to change MTU in hardware %d\n", 1972 ret); 1973 else 1974 netdev->mtu = new_mtu; 1975 1976 return ret; 1977 } 1978 1979 static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) 1980 { 1981 struct hns3_nic_priv *priv = netdev_priv(ndev); 1982 struct hnae3_handle *h = hns3_get_handle(ndev); 1983 struct hns3_enet_ring *tx_ring; 1984 struct napi_struct *napi; 1985 int timeout_queue = 0; 1986 int hw_head, hw_tail; 1987 int fbd_num, fbd_oft; 1988 int ebd_num, ebd_oft; 1989 int bd_num, bd_err; 1990 int ring_en, tc; 1991 int i; 1992 1993 /* Find the stopped queue the same way the stack does */ 1994 for (i = 0; i < ndev->num_tx_queues; i++) { 1995 struct netdev_queue *q; 1996 unsigned long trans_start; 1997 1998 q = netdev_get_tx_queue(ndev, i); 1999 trans_start = q->trans_start; 2000 if (netif_xmit_stopped(q) && 2001 time_after(jiffies, 2002 (trans_start + ndev->watchdog_timeo))) { 2003 timeout_queue = i; 2004 netdev_info(ndev, "queue state: 0x%lx, delta msecs: %u\n", 2005 q->state, 2006 jiffies_to_msecs(jiffies - trans_start)); 2007 break; 2008 } 2009 } 2010 2011 if (i == ndev->num_tx_queues) { 2012 netdev_info(ndev, 2013 "no netdev TX timeout queue found, timeout count: %llu\n", 2014 priv->tx_timeout_count); 2015 return false; 2016 } 2017 2018 priv->tx_timeout_count++; 2019 2020 tx_ring = &priv->ring[timeout_queue]; 2021 napi = &tx_ring->tqp_vector->napi; 2022 2023 netdev_info(ndev, 2024 "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, napi state: %lu\n", 2025 priv->tx_timeout_count, timeout_queue, tx_ring->next_to_use, 2026 tx_ring->next_to_clean, napi->state); 2027 2028 netdev_info(ndev, 2029 "tx_pkts: %llu, tx_bytes: %llu, sw_err_cnt: %llu, tx_pending: %d\n", 2030 tx_ring->stats.tx_pkts, tx_ring->stats.tx_bytes, 2031 tx_ring->stats.sw_err_cnt, tx_ring->pending_buf); 2032 2033 netdev_info(ndev, 2034 "seg_pkt_cnt: %llu, tx_more: %llu, restart_queue: %llu, tx_busy: %llu\n", 2035 tx_ring->stats.seg_pkt_cnt, tx_ring->stats.tx_more, 2036 tx_ring->stats.restart_queue, tx_ring->stats.tx_busy); 2037 2038 /* When mac received many pause frames continuous, it's unable to send 2039 * packets, which may cause tx timeout 2040 */ 2041 if (h->ae_algo->ops->get_mac_stats) { 2042 struct hns3_mac_stats mac_stats; 2043 2044 h->ae_algo->ops->get_mac_stats(h, &mac_stats); 2045 netdev_info(ndev, "tx_pause_cnt: %llu, rx_pause_cnt: %llu\n", 2046 mac_stats.tx_pause_cnt, mac_stats.rx_pause_cnt); 2047 } 2048 2049 hw_head = readl_relaxed(tx_ring->tqp->io_base + 2050 HNS3_RING_TX_RING_HEAD_REG); 2051 hw_tail = readl_relaxed(tx_ring->tqp->io_base + 2052 HNS3_RING_TX_RING_TAIL_REG); 2053 fbd_num = readl_relaxed(tx_ring->tqp->io_base + 2054 HNS3_RING_TX_RING_FBDNUM_REG); 2055 fbd_oft = readl_relaxed(tx_ring->tqp->io_base + 2056 HNS3_RING_TX_RING_OFFSET_REG); 2057 ebd_num = readl_relaxed(tx_ring->tqp->io_base + 2058 HNS3_RING_TX_RING_EBDNUM_REG); 2059 ebd_oft = readl_relaxed(tx_ring->tqp->io_base + 2060 HNS3_RING_TX_RING_EBD_OFFSET_REG); 2061 bd_num = readl_relaxed(tx_ring->tqp->io_base + 2062 HNS3_RING_TX_RING_BD_NUM_REG); 2063 bd_err = readl_relaxed(tx_ring->tqp->io_base + 2064 HNS3_RING_TX_RING_BD_ERR_REG); 2065 ring_en = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_EN_REG); 2066 tc = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_TX_RING_TC_REG); 2067 2068 netdev_info(ndev, 2069 "BD_NUM: 0x%x HW_HEAD: 0x%x, HW_TAIL: 0x%x, BD_ERR: 0x%x, INT: 0x%x\n", 2070 bd_num, hw_head, hw_tail, bd_err, 2071 readl(tx_ring->tqp_vector->mask_addr)); 2072 netdev_info(ndev, 2073 "RING_EN: 0x%x, TC: 0x%x, FBD_NUM: 0x%x FBD_OFT: 0x%x, EBD_NUM: 0x%x, EBD_OFT: 0x%x\n", 2074 ring_en, tc, fbd_num, fbd_oft, ebd_num, ebd_oft); 2075 2076 return true; 2077 } 2078 2079 static void hns3_nic_net_timeout(struct net_device *ndev, unsigned int txqueue) 2080 { 2081 struct hns3_nic_priv *priv = netdev_priv(ndev); 2082 struct hnae3_handle *h = priv->ae_handle; 2083 2084 if (!hns3_get_tx_timeo_queue_info(ndev)) 2085 return; 2086 2087 /* request the reset, and let the hclge to determine 2088 * which reset level should be done 2089 */ 2090 if (h->ae_algo->ops->reset_event) 2091 h->ae_algo->ops->reset_event(h->pdev, h); 2092 } 2093 2094 #ifdef CONFIG_RFS_ACCEL 2095 static int hns3_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, 2096 u16 rxq_index, u32 flow_id) 2097 { 2098 struct hnae3_handle *h = hns3_get_handle(dev); 2099 struct flow_keys fkeys; 2100 2101 if (!h->ae_algo->ops->add_arfs_entry) 2102 return -EOPNOTSUPP; 2103 2104 if (skb->encapsulation) 2105 return -EPROTONOSUPPORT; 2106 2107 if (!skb_flow_dissect_flow_keys(skb, &fkeys, 0)) 2108 return -EPROTONOSUPPORT; 2109 2110 if ((fkeys.basic.n_proto != htons(ETH_P_IP) && 2111 fkeys.basic.n_proto != htons(ETH_P_IPV6)) || 2112 (fkeys.basic.ip_proto != IPPROTO_TCP && 2113 fkeys.basic.ip_proto != IPPROTO_UDP)) 2114 return -EPROTONOSUPPORT; 2115 2116 return h->ae_algo->ops->add_arfs_entry(h, rxq_index, flow_id, &fkeys); 2117 } 2118 #endif 2119 2120 static int hns3_nic_get_vf_config(struct net_device *ndev, int vf, 2121 struct ifla_vf_info *ivf) 2122 { 2123 struct hnae3_handle *h = hns3_get_handle(ndev); 2124 2125 if (!h->ae_algo->ops->get_vf_config) 2126 return -EOPNOTSUPP; 2127 2128 return h->ae_algo->ops->get_vf_config(h, vf, ivf); 2129 } 2130 2131 static int hns3_nic_set_vf_link_state(struct net_device *ndev, int vf, 2132 int link_state) 2133 { 2134 struct hnae3_handle *h = hns3_get_handle(ndev); 2135 2136 if (!h->ae_algo->ops->set_vf_link_state) 2137 return -EOPNOTSUPP; 2138 2139 return h->ae_algo->ops->set_vf_link_state(h, vf, link_state); 2140 } 2141 2142 static int hns3_nic_set_vf_rate(struct net_device *ndev, int vf, 2143 int min_tx_rate, int max_tx_rate) 2144 { 2145 struct hnae3_handle *h = hns3_get_handle(ndev); 2146 2147 if (!h->ae_algo->ops->set_vf_rate) 2148 return -EOPNOTSUPP; 2149 2150 return h->ae_algo->ops->set_vf_rate(h, vf, min_tx_rate, max_tx_rate, 2151 false); 2152 } 2153 2154 static int hns3_nic_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) 2155 { 2156 struct hnae3_handle *h = hns3_get_handle(netdev); 2157 2158 if (!h->ae_algo->ops->set_vf_mac) 2159 return -EOPNOTSUPP; 2160 2161 if (is_multicast_ether_addr(mac)) { 2162 netdev_err(netdev, 2163 "Invalid MAC:%pM specified. Could not set MAC\n", 2164 mac); 2165 return -EINVAL; 2166 } 2167 2168 return h->ae_algo->ops->set_vf_mac(h, vf_id, mac); 2169 } 2170 2171 static const struct net_device_ops hns3_nic_netdev_ops = { 2172 .ndo_open = hns3_nic_net_open, 2173 .ndo_stop = hns3_nic_net_stop, 2174 .ndo_start_xmit = hns3_nic_net_xmit, 2175 .ndo_tx_timeout = hns3_nic_net_timeout, 2176 .ndo_set_mac_address = hns3_nic_net_set_mac_address, 2177 .ndo_do_ioctl = hns3_nic_do_ioctl, 2178 .ndo_change_mtu = hns3_nic_change_mtu, 2179 .ndo_set_features = hns3_nic_set_features, 2180 .ndo_features_check = hns3_features_check, 2181 .ndo_get_stats64 = hns3_nic_get_stats64, 2182 .ndo_setup_tc = hns3_nic_setup_tc, 2183 .ndo_set_rx_mode = hns3_nic_set_rx_mode, 2184 .ndo_vlan_rx_add_vid = hns3_vlan_rx_add_vid, 2185 .ndo_vlan_rx_kill_vid = hns3_vlan_rx_kill_vid, 2186 .ndo_set_vf_vlan = hns3_ndo_set_vf_vlan, 2187 .ndo_set_vf_spoofchk = hns3_set_vf_spoofchk, 2188 .ndo_set_vf_trust = hns3_set_vf_trust, 2189 #ifdef CONFIG_RFS_ACCEL 2190 .ndo_rx_flow_steer = hns3_rx_flow_steer, 2191 #endif 2192 .ndo_get_vf_config = hns3_nic_get_vf_config, 2193 .ndo_set_vf_link_state = hns3_nic_set_vf_link_state, 2194 .ndo_set_vf_rate = hns3_nic_set_vf_rate, 2195 .ndo_set_vf_mac = hns3_nic_set_vf_mac, 2196 }; 2197 2198 bool hns3_is_phys_func(struct pci_dev *pdev) 2199 { 2200 u32 dev_id = pdev->device; 2201 2202 switch (dev_id) { 2203 case HNAE3_DEV_ID_GE: 2204 case HNAE3_DEV_ID_25GE: 2205 case HNAE3_DEV_ID_25GE_RDMA: 2206 case HNAE3_DEV_ID_25GE_RDMA_MACSEC: 2207 case HNAE3_DEV_ID_50GE_RDMA: 2208 case HNAE3_DEV_ID_50GE_RDMA_MACSEC: 2209 case HNAE3_DEV_ID_100G_RDMA_MACSEC: 2210 case HNAE3_DEV_ID_200G_RDMA: 2211 return true; 2212 case HNAE3_DEV_ID_VF: 2213 case HNAE3_DEV_ID_RDMA_DCB_PFC_VF: 2214 return false; 2215 default: 2216 dev_warn(&pdev->dev, "un-recognized pci device-id %u", 2217 dev_id); 2218 } 2219 2220 return false; 2221 } 2222 2223 static void hns3_disable_sriov(struct pci_dev *pdev) 2224 { 2225 /* If our VFs are assigned we cannot shut down SR-IOV 2226 * without causing issues, so just leave the hardware 2227 * available but disabled 2228 */ 2229 if (pci_vfs_assigned(pdev)) { 2230 dev_warn(&pdev->dev, 2231 "disabling driver while VFs are assigned\n"); 2232 return; 2233 } 2234 2235 pci_disable_sriov(pdev); 2236 } 2237 2238 /* hns3_probe - Device initialization routine 2239 * @pdev: PCI device information struct 2240 * @ent: entry in hns3_pci_tbl 2241 * 2242 * hns3_probe initializes a PF identified by a pci_dev structure. 2243 * The OS initialization, configuring of the PF private structure, 2244 * and a hardware reset occur. 2245 * 2246 * Returns 0 on success, negative on failure 2247 */ 2248 static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 2249 { 2250 struct hnae3_ae_dev *ae_dev; 2251 int ret; 2252 2253 ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev), GFP_KERNEL); 2254 if (!ae_dev) 2255 return -ENOMEM; 2256 2257 ae_dev->pdev = pdev; 2258 ae_dev->flag = ent->driver_data; 2259 pci_set_drvdata(pdev, ae_dev); 2260 2261 ret = hnae3_register_ae_dev(ae_dev); 2262 if (ret) 2263 pci_set_drvdata(pdev, NULL); 2264 2265 return ret; 2266 } 2267 2268 /* hns3_remove - Device removal routine 2269 * @pdev: PCI device information struct 2270 */ 2271 static void hns3_remove(struct pci_dev *pdev) 2272 { 2273 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 2274 2275 if (hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV)) 2276 hns3_disable_sriov(pdev); 2277 2278 hnae3_unregister_ae_dev(ae_dev); 2279 pci_set_drvdata(pdev, NULL); 2280 } 2281 2282 /** 2283 * hns3_pci_sriov_configure 2284 * @pdev: pointer to a pci_dev structure 2285 * @num_vfs: number of VFs to allocate 2286 * 2287 * Enable or change the number of VFs. Called when the user updates the number 2288 * of VFs in sysfs. 2289 **/ 2290 static int hns3_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) 2291 { 2292 int ret; 2293 2294 if (!(hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))) { 2295 dev_warn(&pdev->dev, "Can not config SRIOV\n"); 2296 return -EINVAL; 2297 } 2298 2299 if (num_vfs) { 2300 ret = pci_enable_sriov(pdev, num_vfs); 2301 if (ret) 2302 dev_err(&pdev->dev, "SRIOV enable failed %d\n", ret); 2303 else 2304 return num_vfs; 2305 } else if (!pci_vfs_assigned(pdev)) { 2306 pci_disable_sriov(pdev); 2307 } else { 2308 dev_warn(&pdev->dev, 2309 "Unable to free VFs because some are assigned to VMs.\n"); 2310 } 2311 2312 return 0; 2313 } 2314 2315 static void hns3_shutdown(struct pci_dev *pdev) 2316 { 2317 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 2318 2319 hnae3_unregister_ae_dev(ae_dev); 2320 pci_set_drvdata(pdev, NULL); 2321 2322 if (system_state == SYSTEM_POWER_OFF) 2323 pci_set_power_state(pdev, PCI_D3hot); 2324 } 2325 2326 static pci_ers_result_t hns3_error_detected(struct pci_dev *pdev, 2327 pci_channel_state_t state) 2328 { 2329 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 2330 pci_ers_result_t ret; 2331 2332 dev_info(&pdev->dev, "PCI error detected, state(=%d)!!\n", state); 2333 2334 if (state == pci_channel_io_perm_failure) 2335 return PCI_ERS_RESULT_DISCONNECT; 2336 2337 if (!ae_dev || !ae_dev->ops) { 2338 dev_err(&pdev->dev, 2339 "Can't recover - error happened before device initialized\n"); 2340 return PCI_ERS_RESULT_NONE; 2341 } 2342 2343 if (ae_dev->ops->handle_hw_ras_error) 2344 ret = ae_dev->ops->handle_hw_ras_error(ae_dev); 2345 else 2346 return PCI_ERS_RESULT_NONE; 2347 2348 return ret; 2349 } 2350 2351 static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev) 2352 { 2353 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 2354 const struct hnae3_ae_ops *ops; 2355 enum hnae3_reset_type reset_type; 2356 struct device *dev = &pdev->dev; 2357 2358 if (!ae_dev || !ae_dev->ops) 2359 return PCI_ERS_RESULT_NONE; 2360 2361 ops = ae_dev->ops; 2362 /* request the reset */ 2363 if (ops->reset_event && ops->get_reset_level && 2364 ops->set_default_reset_request) { 2365 if (ae_dev->hw_err_reset_req) { 2366 reset_type = ops->get_reset_level(ae_dev, 2367 &ae_dev->hw_err_reset_req); 2368 ops->set_default_reset_request(ae_dev, reset_type); 2369 dev_info(dev, "requesting reset due to PCI error\n"); 2370 ops->reset_event(pdev, NULL); 2371 } 2372 2373 return PCI_ERS_RESULT_RECOVERED; 2374 } 2375 2376 return PCI_ERS_RESULT_DISCONNECT; 2377 } 2378 2379 static void hns3_reset_prepare(struct pci_dev *pdev) 2380 { 2381 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 2382 2383 dev_info(&pdev->dev, "FLR prepare\n"); 2384 if (ae_dev && ae_dev->ops && ae_dev->ops->flr_prepare) 2385 ae_dev->ops->flr_prepare(ae_dev); 2386 } 2387 2388 static void hns3_reset_done(struct pci_dev *pdev) 2389 { 2390 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 2391 2392 dev_info(&pdev->dev, "FLR done\n"); 2393 if (ae_dev && ae_dev->ops && ae_dev->ops->flr_done) 2394 ae_dev->ops->flr_done(ae_dev); 2395 } 2396 2397 static const struct pci_error_handlers hns3_err_handler = { 2398 .error_detected = hns3_error_detected, 2399 .slot_reset = hns3_slot_reset, 2400 .reset_prepare = hns3_reset_prepare, 2401 .reset_done = hns3_reset_done, 2402 }; 2403 2404 static struct pci_driver hns3_driver = { 2405 .name = hns3_driver_name, 2406 .id_table = hns3_pci_tbl, 2407 .probe = hns3_probe, 2408 .remove = hns3_remove, 2409 .shutdown = hns3_shutdown, 2410 .sriov_configure = hns3_pci_sriov_configure, 2411 .err_handler = &hns3_err_handler, 2412 }; 2413 2414 /* set default feature to hns3 */ 2415 static void hns3_set_default_feature(struct net_device *netdev) 2416 { 2417 struct hnae3_handle *h = hns3_get_handle(netdev); 2418 struct pci_dev *pdev = h->pdev; 2419 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 2420 2421 netdev->priv_flags |= IFF_UNICAST_FLT; 2422 2423 netdev->hw_enc_features |= NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | 2424 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | 2425 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | 2426 NETIF_F_SCTP_CRC | NETIF_F_TSO_MANGLEID | NETIF_F_FRAGLIST; 2427 2428 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; 2429 2430 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | 2431 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | 2432 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | 2433 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | 2434 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | 2435 NETIF_F_SCTP_CRC | NETIF_F_FRAGLIST; 2436 2437 netdev->vlan_features |= NETIF_F_RXCSUM | 2438 NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO | 2439 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | 2440 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | 2441 NETIF_F_SCTP_CRC | NETIF_F_FRAGLIST; 2442 2443 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | 2444 NETIF_F_HW_VLAN_CTAG_RX | 2445 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | 2446 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | 2447 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | 2448 NETIF_F_SCTP_CRC | NETIF_F_FRAGLIST; 2449 2450 if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 2451 netdev->hw_features |= NETIF_F_GRO_HW; 2452 netdev->features |= NETIF_F_GRO_HW; 2453 2454 if (!(h->flags & HNAE3_SUPPORT_VF)) { 2455 netdev->hw_features |= NETIF_F_NTUPLE; 2456 netdev->features |= NETIF_F_NTUPLE; 2457 } 2458 } 2459 2460 if (test_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, ae_dev->caps)) { 2461 netdev->hw_features |= NETIF_F_GSO_UDP_L4; 2462 netdev->features |= NETIF_F_GSO_UDP_L4; 2463 netdev->vlan_features |= NETIF_F_GSO_UDP_L4; 2464 netdev->hw_enc_features |= NETIF_F_GSO_UDP_L4; 2465 } 2466 2467 if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps)) { 2468 netdev->hw_features |= NETIF_F_HW_CSUM; 2469 netdev->features |= NETIF_F_HW_CSUM; 2470 netdev->vlan_features |= NETIF_F_HW_CSUM; 2471 netdev->hw_enc_features |= NETIF_F_HW_CSUM; 2472 } else { 2473 netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 2474 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 2475 netdev->vlan_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 2476 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 2477 } 2478 2479 if (test_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, ae_dev->caps)) { 2480 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; 2481 netdev->features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; 2482 netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; 2483 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; 2484 } 2485 2486 if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) { 2487 netdev->hw_features |= NETIF_F_HW_TC; 2488 netdev->features |= NETIF_F_HW_TC; 2489 } 2490 } 2491 2492 static int hns3_alloc_buffer(struct hns3_enet_ring *ring, 2493 struct hns3_desc_cb *cb) 2494 { 2495 unsigned int order = hns3_page_order(ring); 2496 struct page *p; 2497 2498 p = dev_alloc_pages(order); 2499 if (!p) 2500 return -ENOMEM; 2501 2502 cb->priv = p; 2503 cb->page_offset = 0; 2504 cb->reuse_flag = 0; 2505 cb->buf = page_address(p); 2506 cb->length = hns3_page_size(ring); 2507 cb->type = DESC_TYPE_PAGE; 2508 page_ref_add(p, USHRT_MAX - 1); 2509 cb->pagecnt_bias = USHRT_MAX; 2510 2511 return 0; 2512 } 2513 2514 static void hns3_free_buffer(struct hns3_enet_ring *ring, 2515 struct hns3_desc_cb *cb, int budget) 2516 { 2517 if (cb->type == DESC_TYPE_SKB) 2518 napi_consume_skb(cb->priv, budget); 2519 else if (!HNAE3_IS_TX_RING(ring) && cb->pagecnt_bias) 2520 __page_frag_cache_drain(cb->priv, cb->pagecnt_bias); 2521 memset(cb, 0, sizeof(*cb)); 2522 } 2523 2524 static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb) 2525 { 2526 cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0, 2527 cb->length, ring_to_dma_dir(ring)); 2528 2529 if (unlikely(dma_mapping_error(ring_to_dev(ring), cb->dma))) 2530 return -EIO; 2531 2532 return 0; 2533 } 2534 2535 static void hns3_unmap_buffer(struct hns3_enet_ring *ring, 2536 struct hns3_desc_cb *cb) 2537 { 2538 if (cb->type == DESC_TYPE_SKB || cb->type == DESC_TYPE_FRAGLIST_SKB) 2539 dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length, 2540 ring_to_dma_dir(ring)); 2541 else if (cb->length) 2542 dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length, 2543 ring_to_dma_dir(ring)); 2544 } 2545 2546 static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i) 2547 { 2548 hns3_unmap_buffer(ring, &ring->desc_cb[i]); 2549 ring->desc[i].addr = 0; 2550 } 2551 2552 static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i, 2553 int budget) 2554 { 2555 struct hns3_desc_cb *cb = &ring->desc_cb[i]; 2556 2557 if (!ring->desc_cb[i].dma) 2558 return; 2559 2560 hns3_buffer_detach(ring, i); 2561 hns3_free_buffer(ring, cb, budget); 2562 } 2563 2564 static void hns3_free_buffers(struct hns3_enet_ring *ring) 2565 { 2566 int i; 2567 2568 for (i = 0; i < ring->desc_num; i++) 2569 hns3_free_buffer_detach(ring, i, 0); 2570 } 2571 2572 /* free desc along with its attached buffer */ 2573 static void hns3_free_desc(struct hns3_enet_ring *ring) 2574 { 2575 int size = ring->desc_num * sizeof(ring->desc[0]); 2576 2577 hns3_free_buffers(ring); 2578 2579 if (ring->desc) { 2580 dma_free_coherent(ring_to_dev(ring), size, 2581 ring->desc, ring->desc_dma_addr); 2582 ring->desc = NULL; 2583 } 2584 } 2585 2586 static int hns3_alloc_desc(struct hns3_enet_ring *ring) 2587 { 2588 int size = ring->desc_num * sizeof(ring->desc[0]); 2589 2590 ring->desc = dma_alloc_coherent(ring_to_dev(ring), size, 2591 &ring->desc_dma_addr, GFP_KERNEL); 2592 if (!ring->desc) 2593 return -ENOMEM; 2594 2595 return 0; 2596 } 2597 2598 static int hns3_alloc_and_map_buffer(struct hns3_enet_ring *ring, 2599 struct hns3_desc_cb *cb) 2600 { 2601 int ret; 2602 2603 ret = hns3_alloc_buffer(ring, cb); 2604 if (ret) 2605 goto out; 2606 2607 ret = hns3_map_buffer(ring, cb); 2608 if (ret) 2609 goto out_with_buf; 2610 2611 return 0; 2612 2613 out_with_buf: 2614 hns3_free_buffer(ring, cb, 0); 2615 out: 2616 return ret; 2617 } 2618 2619 static int hns3_alloc_and_attach_buffer(struct hns3_enet_ring *ring, int i) 2620 { 2621 int ret = hns3_alloc_and_map_buffer(ring, &ring->desc_cb[i]); 2622 2623 if (ret) 2624 return ret; 2625 2626 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma); 2627 2628 return 0; 2629 } 2630 2631 /* Allocate memory for raw pkg, and map with dma */ 2632 static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring) 2633 { 2634 int i, j, ret; 2635 2636 for (i = 0; i < ring->desc_num; i++) { 2637 ret = hns3_alloc_and_attach_buffer(ring, i); 2638 if (ret) 2639 goto out_buffer_fail; 2640 } 2641 2642 return 0; 2643 2644 out_buffer_fail: 2645 for (j = i - 1; j >= 0; j--) 2646 hns3_free_buffer_detach(ring, j, 0); 2647 return ret; 2648 } 2649 2650 /* detach a in-used buffer and replace with a reserved one */ 2651 static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i, 2652 struct hns3_desc_cb *res_cb) 2653 { 2654 hns3_unmap_buffer(ring, &ring->desc_cb[i]); 2655 ring->desc_cb[i] = *res_cb; 2656 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma); 2657 ring->desc[i].rx.bd_base_info = 0; 2658 } 2659 2660 static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i) 2661 { 2662 ring->desc_cb[i].reuse_flag = 0; 2663 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma + 2664 ring->desc_cb[i].page_offset); 2665 ring->desc[i].rx.bd_base_info = 0; 2666 2667 dma_sync_single_for_device(ring_to_dev(ring), 2668 ring->desc_cb[i].dma + ring->desc_cb[i].page_offset, 2669 hns3_buf_size(ring), 2670 DMA_FROM_DEVICE); 2671 } 2672 2673 static bool hns3_nic_reclaim_desc(struct hns3_enet_ring *ring, 2674 int *bytes, int *pkts, int budget) 2675 { 2676 /* pair with ring->last_to_use update in hns3_tx_doorbell(), 2677 * smp_store_release() is not used in hns3_tx_doorbell() because 2678 * the doorbell operation already have the needed barrier operation. 2679 */ 2680 int ltu = smp_load_acquire(&ring->last_to_use); 2681 int ntc = ring->next_to_clean; 2682 struct hns3_desc_cb *desc_cb; 2683 bool reclaimed = false; 2684 struct hns3_desc *desc; 2685 2686 while (ltu != ntc) { 2687 desc = &ring->desc[ntc]; 2688 2689 if (le16_to_cpu(desc->tx.bdtp_fe_sc_vld_ra_ri) & 2690 BIT(HNS3_TXD_VLD_B)) 2691 break; 2692 2693 desc_cb = &ring->desc_cb[ntc]; 2694 (*pkts) += (desc_cb->type == DESC_TYPE_SKB); 2695 (*bytes) += desc_cb->length; 2696 /* desc_cb will be cleaned, after hnae3_free_buffer_detach */ 2697 hns3_free_buffer_detach(ring, ntc, budget); 2698 2699 if (++ntc == ring->desc_num) 2700 ntc = 0; 2701 2702 /* Issue prefetch for next Tx descriptor */ 2703 prefetch(&ring->desc_cb[ntc]); 2704 reclaimed = true; 2705 } 2706 2707 if (unlikely(!reclaimed)) 2708 return false; 2709 2710 /* This smp_store_release() pairs with smp_load_acquire() in 2711 * ring_space called by hns3_nic_net_xmit. 2712 */ 2713 smp_store_release(&ring->next_to_clean, ntc); 2714 return true; 2715 } 2716 2717 void hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget) 2718 { 2719 struct net_device *netdev = ring_to_netdev(ring); 2720 struct hns3_nic_priv *priv = netdev_priv(netdev); 2721 struct netdev_queue *dev_queue; 2722 int bytes, pkts; 2723 2724 bytes = 0; 2725 pkts = 0; 2726 2727 if (unlikely(!hns3_nic_reclaim_desc(ring, &bytes, &pkts, budget))) 2728 return; 2729 2730 ring->tqp_vector->tx_group.total_bytes += bytes; 2731 ring->tqp_vector->tx_group.total_packets += pkts; 2732 2733 u64_stats_update_begin(&ring->syncp); 2734 ring->stats.tx_bytes += bytes; 2735 ring->stats.tx_pkts += pkts; 2736 u64_stats_update_end(&ring->syncp); 2737 2738 dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index); 2739 netdev_tx_completed_queue(dev_queue, pkts, bytes); 2740 2741 if (unlikely(netif_carrier_ok(netdev) && 2742 ring_space(ring) > HNS3_MAX_TSO_BD_NUM)) { 2743 /* Make sure that anybody stopping the queue after this 2744 * sees the new next_to_clean. 2745 */ 2746 smp_mb(); 2747 if (netif_tx_queue_stopped(dev_queue) && 2748 !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) { 2749 netif_tx_wake_queue(dev_queue); 2750 ring->stats.restart_queue++; 2751 } 2752 } 2753 } 2754 2755 static int hns3_desc_unused(struct hns3_enet_ring *ring) 2756 { 2757 int ntc = ring->next_to_clean; 2758 int ntu = ring->next_to_use; 2759 2760 return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu; 2761 } 2762 2763 static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, 2764 int cleand_count) 2765 { 2766 struct hns3_desc_cb *desc_cb; 2767 struct hns3_desc_cb res_cbs; 2768 int i, ret; 2769 2770 for (i = 0; i < cleand_count; i++) { 2771 desc_cb = &ring->desc_cb[ring->next_to_use]; 2772 if (desc_cb->reuse_flag) { 2773 u64_stats_update_begin(&ring->syncp); 2774 ring->stats.reuse_pg_cnt++; 2775 u64_stats_update_end(&ring->syncp); 2776 2777 hns3_reuse_buffer(ring, ring->next_to_use); 2778 } else { 2779 ret = hns3_alloc_and_map_buffer(ring, &res_cbs); 2780 if (ret) { 2781 u64_stats_update_begin(&ring->syncp); 2782 ring->stats.sw_err_cnt++; 2783 u64_stats_update_end(&ring->syncp); 2784 2785 hns3_rl_err(ring_to_netdev(ring), 2786 "alloc rx buffer failed: %d\n", 2787 ret); 2788 break; 2789 } 2790 hns3_replace_buffer(ring, ring->next_to_use, &res_cbs); 2791 2792 u64_stats_update_begin(&ring->syncp); 2793 ring->stats.non_reuse_pg++; 2794 u64_stats_update_end(&ring->syncp); 2795 } 2796 2797 ring_ptr_move_fw(ring, next_to_use); 2798 } 2799 2800 writel(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG); 2801 } 2802 2803 static bool hns3_page_is_reusable(struct page *page) 2804 { 2805 return page_to_nid(page) == numa_mem_id() && 2806 !page_is_pfmemalloc(page); 2807 } 2808 2809 static bool hns3_can_reuse_page(struct hns3_desc_cb *cb) 2810 { 2811 return (page_count(cb->priv) - cb->pagecnt_bias) == 1; 2812 } 2813 2814 static void hns3_nic_reuse_page(struct sk_buff *skb, int i, 2815 struct hns3_enet_ring *ring, int pull_len, 2816 struct hns3_desc_cb *desc_cb) 2817 { 2818 struct hns3_desc *desc = &ring->desc[ring->next_to_clean]; 2819 int size = le16_to_cpu(desc->rx.size); 2820 u32 truesize = hns3_buf_size(ring); 2821 2822 desc_cb->pagecnt_bias--; 2823 skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len, 2824 size - pull_len, truesize); 2825 2826 /* Avoid re-using remote pages, or the stack is still using the page 2827 * when page_offset rollback to zero, flag default unreuse 2828 */ 2829 if (unlikely(!hns3_page_is_reusable(desc_cb->priv)) || 2830 (!desc_cb->page_offset && !hns3_can_reuse_page(desc_cb))) { 2831 __page_frag_cache_drain(desc_cb->priv, desc_cb->pagecnt_bias); 2832 return; 2833 } 2834 2835 /* Move offset up to the next cache line */ 2836 desc_cb->page_offset += truesize; 2837 2838 if (desc_cb->page_offset + truesize <= hns3_page_size(ring)) { 2839 desc_cb->reuse_flag = 1; 2840 } else if (hns3_can_reuse_page(desc_cb)) { 2841 desc_cb->reuse_flag = 1; 2842 desc_cb->page_offset = 0; 2843 } else if (desc_cb->pagecnt_bias) { 2844 __page_frag_cache_drain(desc_cb->priv, desc_cb->pagecnt_bias); 2845 return; 2846 } 2847 2848 if (unlikely(!desc_cb->pagecnt_bias)) { 2849 page_ref_add(desc_cb->priv, USHRT_MAX); 2850 desc_cb->pagecnt_bias = USHRT_MAX; 2851 } 2852 } 2853 2854 static int hns3_gro_complete(struct sk_buff *skb, u32 l234info) 2855 { 2856 __be16 type = skb->protocol; 2857 struct tcphdr *th; 2858 int depth = 0; 2859 2860 while (eth_type_vlan(type)) { 2861 struct vlan_hdr *vh; 2862 2863 if ((depth + VLAN_HLEN) > skb_headlen(skb)) 2864 return -EFAULT; 2865 2866 vh = (struct vlan_hdr *)(skb->data + depth); 2867 type = vh->h_vlan_encapsulated_proto; 2868 depth += VLAN_HLEN; 2869 } 2870 2871 skb_set_network_header(skb, depth); 2872 2873 if (type == htons(ETH_P_IP)) { 2874 const struct iphdr *iph = ip_hdr(skb); 2875 2876 depth += sizeof(struct iphdr); 2877 skb_set_transport_header(skb, depth); 2878 th = tcp_hdr(skb); 2879 th->check = ~tcp_v4_check(skb->len - depth, iph->saddr, 2880 iph->daddr, 0); 2881 } else if (type == htons(ETH_P_IPV6)) { 2882 const struct ipv6hdr *iph = ipv6_hdr(skb); 2883 2884 depth += sizeof(struct ipv6hdr); 2885 skb_set_transport_header(skb, depth); 2886 th = tcp_hdr(skb); 2887 th->check = ~tcp_v6_check(skb->len - depth, &iph->saddr, 2888 &iph->daddr, 0); 2889 } else { 2890 hns3_rl_err(skb->dev, 2891 "Error: FW GRO supports only IPv4/IPv6, not 0x%04x, depth: %d\n", 2892 be16_to_cpu(type), depth); 2893 return -EFAULT; 2894 } 2895 2896 skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count; 2897 if (th->cwr) 2898 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; 2899 2900 if (l234info & BIT(HNS3_RXD_GRO_FIXID_B)) 2901 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID; 2902 2903 skb->csum_start = (unsigned char *)th - skb->head; 2904 skb->csum_offset = offsetof(struct tcphdr, check); 2905 skb->ip_summed = CHECKSUM_PARTIAL; 2906 2907 trace_hns3_gro(skb); 2908 2909 return 0; 2910 } 2911 2912 static void hns3_checksum_complete(struct hns3_enet_ring *ring, 2913 struct sk_buff *skb, u32 l234info) 2914 { 2915 u32 lo, hi; 2916 2917 u64_stats_update_begin(&ring->syncp); 2918 ring->stats.csum_complete++; 2919 u64_stats_update_end(&ring->syncp); 2920 skb->ip_summed = CHECKSUM_COMPLETE; 2921 lo = hnae3_get_field(l234info, HNS3_RXD_L2_CSUM_L_M, 2922 HNS3_RXD_L2_CSUM_L_S); 2923 hi = hnae3_get_field(l234info, HNS3_RXD_L2_CSUM_H_M, 2924 HNS3_RXD_L2_CSUM_H_S); 2925 skb->csum = csum_unfold((__force __sum16)(lo | hi << 8)); 2926 } 2927 2928 static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, 2929 u32 l234info, u32 bd_base_info, u32 ol_info) 2930 { 2931 struct net_device *netdev = ring_to_netdev(ring); 2932 int l3_type, l4_type; 2933 int ol4_type; 2934 2935 skb->ip_summed = CHECKSUM_NONE; 2936 2937 skb_checksum_none_assert(skb); 2938 2939 if (!(netdev->features & NETIF_F_RXCSUM)) 2940 return; 2941 2942 if (l234info & BIT(HNS3_RXD_L2_CSUM_B)) { 2943 hns3_checksum_complete(ring, skb, l234info); 2944 return; 2945 } 2946 2947 /* check if hardware has done checksum */ 2948 if (!(bd_base_info & BIT(HNS3_RXD_L3L4P_B))) 2949 return; 2950 2951 if (unlikely(l234info & (BIT(HNS3_RXD_L3E_B) | BIT(HNS3_RXD_L4E_B) | 2952 BIT(HNS3_RXD_OL3E_B) | 2953 BIT(HNS3_RXD_OL4E_B)))) { 2954 u64_stats_update_begin(&ring->syncp); 2955 ring->stats.l3l4_csum_err++; 2956 u64_stats_update_end(&ring->syncp); 2957 2958 return; 2959 } 2960 2961 ol4_type = hnae3_get_field(ol_info, HNS3_RXD_OL4ID_M, 2962 HNS3_RXD_OL4ID_S); 2963 switch (ol4_type) { 2964 case HNS3_OL4_TYPE_MAC_IN_UDP: 2965 case HNS3_OL4_TYPE_NVGRE: 2966 skb->csum_level = 1; 2967 fallthrough; 2968 case HNS3_OL4_TYPE_NO_TUN: 2969 l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, 2970 HNS3_RXD_L3ID_S); 2971 l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M, 2972 HNS3_RXD_L4ID_S); 2973 2974 /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */ 2975 if ((l3_type == HNS3_L3_TYPE_IPV4 || 2976 l3_type == HNS3_L3_TYPE_IPV6) && 2977 (l4_type == HNS3_L4_TYPE_UDP || 2978 l4_type == HNS3_L4_TYPE_TCP || 2979 l4_type == HNS3_L4_TYPE_SCTP)) 2980 skb->ip_summed = CHECKSUM_UNNECESSARY; 2981 break; 2982 default: 2983 break; 2984 } 2985 } 2986 2987 static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb) 2988 { 2989 if (skb_has_frag_list(skb)) 2990 napi_gro_flush(&ring->tqp_vector->napi, false); 2991 2992 napi_gro_receive(&ring->tqp_vector->napi, skb); 2993 } 2994 2995 static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring, 2996 struct hns3_desc *desc, u32 l234info, 2997 u16 *vlan_tag) 2998 { 2999 struct hnae3_handle *handle = ring->tqp->handle; 3000 struct pci_dev *pdev = ring->tqp->handle->pdev; 3001 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 3002 3003 if (unlikely(ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)) { 3004 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); 3005 if (!(*vlan_tag & VLAN_VID_MASK)) 3006 *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); 3007 3008 return (*vlan_tag != 0); 3009 } 3010 3011 #define HNS3_STRP_OUTER_VLAN 0x1 3012 #define HNS3_STRP_INNER_VLAN 0x2 3013 #define HNS3_STRP_BOTH 0x3 3014 3015 /* Hardware always insert VLAN tag into RX descriptor when 3016 * remove the tag from packet, driver needs to determine 3017 * reporting which tag to stack. 3018 */ 3019 switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M, 3020 HNS3_RXD_STRP_TAGP_S)) { 3021 case HNS3_STRP_OUTER_VLAN: 3022 if (handle->port_base_vlan_state != 3023 HNAE3_PORT_BASE_VLAN_DISABLE) 3024 return false; 3025 3026 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); 3027 return true; 3028 case HNS3_STRP_INNER_VLAN: 3029 if (handle->port_base_vlan_state != 3030 HNAE3_PORT_BASE_VLAN_DISABLE) 3031 return false; 3032 3033 *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); 3034 return true; 3035 case HNS3_STRP_BOTH: 3036 if (handle->port_base_vlan_state == 3037 HNAE3_PORT_BASE_VLAN_DISABLE) 3038 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); 3039 else 3040 *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); 3041 3042 return true; 3043 default: 3044 return false; 3045 } 3046 } 3047 3048 static void hns3_rx_ring_move_fw(struct hns3_enet_ring *ring) 3049 { 3050 ring->desc[ring->next_to_clean].rx.bd_base_info &= 3051 cpu_to_le32(~BIT(HNS3_RXD_VLD_B)); 3052 ring->next_to_clean += 1; 3053 3054 if (unlikely(ring->next_to_clean == ring->desc_num)) 3055 ring->next_to_clean = 0; 3056 } 3057 3058 static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length, 3059 unsigned char *va) 3060 { 3061 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean]; 3062 struct net_device *netdev = ring_to_netdev(ring); 3063 struct sk_buff *skb; 3064 3065 ring->skb = napi_alloc_skb(&ring->tqp_vector->napi, HNS3_RX_HEAD_SIZE); 3066 skb = ring->skb; 3067 if (unlikely(!skb)) { 3068 hns3_rl_err(netdev, "alloc rx skb fail\n"); 3069 3070 u64_stats_update_begin(&ring->syncp); 3071 ring->stats.sw_err_cnt++; 3072 u64_stats_update_end(&ring->syncp); 3073 3074 return -ENOMEM; 3075 } 3076 3077 trace_hns3_rx_desc(ring); 3078 prefetchw(skb->data); 3079 3080 ring->pending_buf = 1; 3081 ring->frag_num = 0; 3082 ring->tail_skb = NULL; 3083 if (length <= HNS3_RX_HEAD_SIZE) { 3084 memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long))); 3085 3086 /* We can reuse buffer as-is, just make sure it is local */ 3087 if (likely(hns3_page_is_reusable(desc_cb->priv))) 3088 desc_cb->reuse_flag = 1; 3089 else /* This page cannot be reused so discard it */ 3090 __page_frag_cache_drain(desc_cb->priv, 3091 desc_cb->pagecnt_bias); 3092 3093 hns3_rx_ring_move_fw(ring); 3094 return 0; 3095 } 3096 u64_stats_update_begin(&ring->syncp); 3097 ring->stats.seg_pkt_cnt++; 3098 u64_stats_update_end(&ring->syncp); 3099 3100 ring->pull_len = eth_get_headlen(netdev, va, HNS3_RX_HEAD_SIZE); 3101 __skb_put(skb, ring->pull_len); 3102 hns3_nic_reuse_page(skb, ring->frag_num++, ring, ring->pull_len, 3103 desc_cb); 3104 hns3_rx_ring_move_fw(ring); 3105 3106 return 0; 3107 } 3108 3109 static int hns3_add_frag(struct hns3_enet_ring *ring) 3110 { 3111 struct sk_buff *skb = ring->skb; 3112 struct sk_buff *head_skb = skb; 3113 struct sk_buff *new_skb; 3114 struct hns3_desc_cb *desc_cb; 3115 struct hns3_desc *desc; 3116 u32 bd_base_info; 3117 3118 do { 3119 desc = &ring->desc[ring->next_to_clean]; 3120 desc_cb = &ring->desc_cb[ring->next_to_clean]; 3121 bd_base_info = le32_to_cpu(desc->rx.bd_base_info); 3122 /* make sure HW write desc complete */ 3123 dma_rmb(); 3124 if (!(bd_base_info & BIT(HNS3_RXD_VLD_B))) 3125 return -ENXIO; 3126 3127 if (unlikely(ring->frag_num >= MAX_SKB_FRAGS)) { 3128 new_skb = napi_alloc_skb(&ring->tqp_vector->napi, 0); 3129 if (unlikely(!new_skb)) { 3130 hns3_rl_err(ring_to_netdev(ring), 3131 "alloc rx fraglist skb fail\n"); 3132 return -ENXIO; 3133 } 3134 ring->frag_num = 0; 3135 3136 if (ring->tail_skb) { 3137 ring->tail_skb->next = new_skb; 3138 ring->tail_skb = new_skb; 3139 } else { 3140 skb_shinfo(skb)->frag_list = new_skb; 3141 ring->tail_skb = new_skb; 3142 } 3143 } 3144 3145 if (ring->tail_skb) { 3146 head_skb->truesize += hns3_buf_size(ring); 3147 head_skb->data_len += le16_to_cpu(desc->rx.size); 3148 head_skb->len += le16_to_cpu(desc->rx.size); 3149 skb = ring->tail_skb; 3150 } 3151 3152 dma_sync_single_for_cpu(ring_to_dev(ring), 3153 desc_cb->dma + desc_cb->page_offset, 3154 hns3_buf_size(ring), 3155 DMA_FROM_DEVICE); 3156 3157 hns3_nic_reuse_page(skb, ring->frag_num++, ring, 0, desc_cb); 3158 trace_hns3_rx_desc(ring); 3159 hns3_rx_ring_move_fw(ring); 3160 ring->pending_buf++; 3161 } while (!(bd_base_info & BIT(HNS3_RXD_FE_B))); 3162 3163 return 0; 3164 } 3165 3166 static int hns3_set_gro_and_checksum(struct hns3_enet_ring *ring, 3167 struct sk_buff *skb, u32 l234info, 3168 u32 bd_base_info, u32 ol_info) 3169 { 3170 u32 l3_type; 3171 3172 skb_shinfo(skb)->gso_size = hnae3_get_field(bd_base_info, 3173 HNS3_RXD_GRO_SIZE_M, 3174 HNS3_RXD_GRO_SIZE_S); 3175 /* if there is no HW GRO, do not set gro params */ 3176 if (!skb_shinfo(skb)->gso_size) { 3177 hns3_rx_checksum(ring, skb, l234info, bd_base_info, ol_info); 3178 return 0; 3179 } 3180 3181 NAPI_GRO_CB(skb)->count = hnae3_get_field(l234info, 3182 HNS3_RXD_GRO_COUNT_M, 3183 HNS3_RXD_GRO_COUNT_S); 3184 3185 l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S); 3186 if (l3_type == HNS3_L3_TYPE_IPV4) 3187 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; 3188 else if (l3_type == HNS3_L3_TYPE_IPV6) 3189 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; 3190 else 3191 return -EFAULT; 3192 3193 return hns3_gro_complete(skb, l234info); 3194 } 3195 3196 static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring, 3197 struct sk_buff *skb, u32 rss_hash) 3198 { 3199 struct hnae3_handle *handle = ring->tqp->handle; 3200 enum pkt_hash_types rss_type; 3201 3202 if (rss_hash) 3203 rss_type = handle->kinfo.rss_type; 3204 else 3205 rss_type = PKT_HASH_TYPE_NONE; 3206 3207 skb_set_hash(skb, rss_hash, rss_type); 3208 } 3209 3210 static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb) 3211 { 3212 struct net_device *netdev = ring_to_netdev(ring); 3213 enum hns3_pkt_l2t_type l2_frame_type; 3214 u32 bd_base_info, l234info, ol_info; 3215 struct hns3_desc *desc; 3216 unsigned int len; 3217 int pre_ntc, ret; 3218 3219 /* bdinfo handled below is only valid on the last BD of the 3220 * current packet, and ring->next_to_clean indicates the first 3221 * descriptor of next packet, so need - 1 below. 3222 */ 3223 pre_ntc = ring->next_to_clean ? (ring->next_to_clean - 1) : 3224 (ring->desc_num - 1); 3225 desc = &ring->desc[pre_ntc]; 3226 bd_base_info = le32_to_cpu(desc->rx.bd_base_info); 3227 l234info = le32_to_cpu(desc->rx.l234_info); 3228 ol_info = le32_to_cpu(desc->rx.ol_info); 3229 3230 /* Based on hw strategy, the tag offloaded will be stored at 3231 * ot_vlan_tag in two layer tag case, and stored at vlan_tag 3232 * in one layer tag case. 3233 */ 3234 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) { 3235 u16 vlan_tag; 3236 3237 if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag)) 3238 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 3239 vlan_tag); 3240 } 3241 3242 if (unlikely(!desc->rx.pkt_len || (l234info & (BIT(HNS3_RXD_TRUNCAT_B) | 3243 BIT(HNS3_RXD_L2E_B))))) { 3244 u64_stats_update_begin(&ring->syncp); 3245 if (l234info & BIT(HNS3_RXD_L2E_B)) 3246 ring->stats.l2_err++; 3247 else 3248 ring->stats.err_pkt_len++; 3249 u64_stats_update_end(&ring->syncp); 3250 3251 return -EFAULT; 3252 } 3253 3254 len = skb->len; 3255 3256 /* Do update ip stack process */ 3257 skb->protocol = eth_type_trans(skb, netdev); 3258 3259 /* This is needed in order to enable forwarding support */ 3260 ret = hns3_set_gro_and_checksum(ring, skb, l234info, 3261 bd_base_info, ol_info); 3262 if (unlikely(ret)) { 3263 u64_stats_update_begin(&ring->syncp); 3264 ring->stats.rx_err_cnt++; 3265 u64_stats_update_end(&ring->syncp); 3266 return ret; 3267 } 3268 3269 l2_frame_type = hnae3_get_field(l234info, HNS3_RXD_DMAC_M, 3270 HNS3_RXD_DMAC_S); 3271 3272 u64_stats_update_begin(&ring->syncp); 3273 ring->stats.rx_pkts++; 3274 ring->stats.rx_bytes += len; 3275 3276 if (l2_frame_type == HNS3_L2_TYPE_MULTICAST) 3277 ring->stats.rx_multicast++; 3278 3279 u64_stats_update_end(&ring->syncp); 3280 3281 ring->tqp_vector->rx_group.total_bytes += len; 3282 3283 hns3_set_rx_skb_rss_type(ring, skb, le32_to_cpu(desc->rx.rss_hash)); 3284 return 0; 3285 } 3286 3287 static int hns3_handle_rx_bd(struct hns3_enet_ring *ring) 3288 { 3289 struct sk_buff *skb = ring->skb; 3290 struct hns3_desc_cb *desc_cb; 3291 struct hns3_desc *desc; 3292 unsigned int length; 3293 u32 bd_base_info; 3294 int ret; 3295 3296 desc = &ring->desc[ring->next_to_clean]; 3297 desc_cb = &ring->desc_cb[ring->next_to_clean]; 3298 3299 prefetch(desc); 3300 3301 if (!skb) { 3302 bd_base_info = le32_to_cpu(desc->rx.bd_base_info); 3303 3304 /* Check valid BD */ 3305 if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B)))) 3306 return -ENXIO; 3307 3308 dma_rmb(); 3309 length = le16_to_cpu(desc->rx.size); 3310 3311 ring->va = desc_cb->buf + desc_cb->page_offset; 3312 3313 dma_sync_single_for_cpu(ring_to_dev(ring), 3314 desc_cb->dma + desc_cb->page_offset, 3315 hns3_buf_size(ring), 3316 DMA_FROM_DEVICE); 3317 3318 /* Prefetch first cache line of first page. 3319 * Idea is to cache few bytes of the header of the packet. 3320 * Our L1 Cache line size is 64B so need to prefetch twice to make 3321 * it 128B. But in actual we can have greater size of caches with 3322 * 128B Level 1 cache lines. In such a case, single fetch would 3323 * suffice to cache in the relevant part of the header. 3324 */ 3325 net_prefetch(ring->va); 3326 3327 ret = hns3_alloc_skb(ring, length, ring->va); 3328 skb = ring->skb; 3329 3330 if (ret < 0) /* alloc buffer fail */ 3331 return ret; 3332 if (!(bd_base_info & BIT(HNS3_RXD_FE_B))) { /* need add frag */ 3333 ret = hns3_add_frag(ring); 3334 if (ret) 3335 return ret; 3336 } 3337 } else { 3338 ret = hns3_add_frag(ring); 3339 if (ret) 3340 return ret; 3341 } 3342 3343 /* As the head data may be changed when GRO enable, copy 3344 * the head data in after other data rx completed 3345 */ 3346 if (skb->len > HNS3_RX_HEAD_SIZE) 3347 memcpy(skb->data, ring->va, 3348 ALIGN(ring->pull_len, sizeof(long))); 3349 3350 ret = hns3_handle_bdinfo(ring, skb); 3351 if (unlikely(ret)) { 3352 dev_kfree_skb_any(skb); 3353 return ret; 3354 } 3355 3356 skb_record_rx_queue(skb, ring->tqp->tqp_index); 3357 return 0; 3358 } 3359 3360 int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget, 3361 void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *)) 3362 { 3363 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16 3364 int unused_count = hns3_desc_unused(ring); 3365 int recv_pkts = 0; 3366 int err; 3367 3368 unused_count -= ring->pending_buf; 3369 3370 while (recv_pkts < budget) { 3371 /* Reuse or realloc buffers */ 3372 if (unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) { 3373 hns3_nic_alloc_rx_buffers(ring, unused_count); 3374 unused_count = hns3_desc_unused(ring) - 3375 ring->pending_buf; 3376 } 3377 3378 /* Poll one pkt */ 3379 err = hns3_handle_rx_bd(ring); 3380 /* Do not get FE for the packet or failed to alloc skb */ 3381 if (unlikely(!ring->skb || err == -ENXIO)) { 3382 goto out; 3383 } else if (likely(!err)) { 3384 rx_fn(ring, ring->skb); 3385 recv_pkts++; 3386 } 3387 3388 unused_count += ring->pending_buf; 3389 ring->skb = NULL; 3390 ring->pending_buf = 0; 3391 } 3392 3393 out: 3394 /* Make all data has been write before submit */ 3395 if (unused_count > 0) 3396 hns3_nic_alloc_rx_buffers(ring, unused_count); 3397 3398 return recv_pkts; 3399 } 3400 3401 static bool hns3_get_new_flow_lvl(struct hns3_enet_ring_group *ring_group) 3402 { 3403 #define HNS3_RX_LOW_BYTE_RATE 10000 3404 #define HNS3_RX_MID_BYTE_RATE 20000 3405 #define HNS3_RX_ULTRA_PACKET_RATE 40 3406 3407 enum hns3_flow_level_range new_flow_level; 3408 struct hns3_enet_tqp_vector *tqp_vector; 3409 int packets_per_msecs, bytes_per_msecs; 3410 u32 time_passed_ms; 3411 3412 tqp_vector = ring_group->ring->tqp_vector; 3413 time_passed_ms = 3414 jiffies_to_msecs(jiffies - tqp_vector->last_jiffies); 3415 if (!time_passed_ms) 3416 return false; 3417 3418 do_div(ring_group->total_packets, time_passed_ms); 3419 packets_per_msecs = ring_group->total_packets; 3420 3421 do_div(ring_group->total_bytes, time_passed_ms); 3422 bytes_per_msecs = ring_group->total_bytes; 3423 3424 new_flow_level = ring_group->coal.flow_level; 3425 3426 /* Simple throttlerate management 3427 * 0-10MB/s lower (50000 ints/s) 3428 * 10-20MB/s middle (20000 ints/s) 3429 * 20-1249MB/s high (18000 ints/s) 3430 * > 40000pps ultra (8000 ints/s) 3431 */ 3432 switch (new_flow_level) { 3433 case HNS3_FLOW_LOW: 3434 if (bytes_per_msecs > HNS3_RX_LOW_BYTE_RATE) 3435 new_flow_level = HNS3_FLOW_MID; 3436 break; 3437 case HNS3_FLOW_MID: 3438 if (bytes_per_msecs > HNS3_RX_MID_BYTE_RATE) 3439 new_flow_level = HNS3_FLOW_HIGH; 3440 else if (bytes_per_msecs <= HNS3_RX_LOW_BYTE_RATE) 3441 new_flow_level = HNS3_FLOW_LOW; 3442 break; 3443 case HNS3_FLOW_HIGH: 3444 case HNS3_FLOW_ULTRA: 3445 default: 3446 if (bytes_per_msecs <= HNS3_RX_MID_BYTE_RATE) 3447 new_flow_level = HNS3_FLOW_MID; 3448 break; 3449 } 3450 3451 if (packets_per_msecs > HNS3_RX_ULTRA_PACKET_RATE && 3452 &tqp_vector->rx_group == ring_group) 3453 new_flow_level = HNS3_FLOW_ULTRA; 3454 3455 ring_group->total_bytes = 0; 3456 ring_group->total_packets = 0; 3457 ring_group->coal.flow_level = new_flow_level; 3458 3459 return true; 3460 } 3461 3462 static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group) 3463 { 3464 struct hns3_enet_tqp_vector *tqp_vector; 3465 u16 new_int_gl; 3466 3467 if (!ring_group->ring) 3468 return false; 3469 3470 tqp_vector = ring_group->ring->tqp_vector; 3471 if (!tqp_vector->last_jiffies) 3472 return false; 3473 3474 if (ring_group->total_packets == 0) { 3475 ring_group->coal.int_gl = HNS3_INT_GL_50K; 3476 ring_group->coal.flow_level = HNS3_FLOW_LOW; 3477 return true; 3478 } 3479 3480 if (!hns3_get_new_flow_lvl(ring_group)) 3481 return false; 3482 3483 new_int_gl = ring_group->coal.int_gl; 3484 switch (ring_group->coal.flow_level) { 3485 case HNS3_FLOW_LOW: 3486 new_int_gl = HNS3_INT_GL_50K; 3487 break; 3488 case HNS3_FLOW_MID: 3489 new_int_gl = HNS3_INT_GL_20K; 3490 break; 3491 case HNS3_FLOW_HIGH: 3492 new_int_gl = HNS3_INT_GL_18K; 3493 break; 3494 case HNS3_FLOW_ULTRA: 3495 new_int_gl = HNS3_INT_GL_8K; 3496 break; 3497 default: 3498 break; 3499 } 3500 3501 if (new_int_gl != ring_group->coal.int_gl) { 3502 ring_group->coal.int_gl = new_int_gl; 3503 return true; 3504 } 3505 return false; 3506 } 3507 3508 static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector) 3509 { 3510 struct hns3_enet_ring_group *rx_group = &tqp_vector->rx_group; 3511 struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group; 3512 bool rx_update, tx_update; 3513 3514 /* update param every 1000ms */ 3515 if (time_before(jiffies, 3516 tqp_vector->last_jiffies + msecs_to_jiffies(1000))) 3517 return; 3518 3519 if (rx_group->coal.adapt_enable) { 3520 rx_update = hns3_get_new_int_gl(rx_group); 3521 if (rx_update) 3522 hns3_set_vector_coalesce_rx_gl(tqp_vector, 3523 rx_group->coal.int_gl); 3524 } 3525 3526 if (tx_group->coal.adapt_enable) { 3527 tx_update = hns3_get_new_int_gl(tx_group); 3528 if (tx_update) 3529 hns3_set_vector_coalesce_tx_gl(tqp_vector, 3530 tx_group->coal.int_gl); 3531 } 3532 3533 tqp_vector->last_jiffies = jiffies; 3534 } 3535 3536 static int hns3_nic_common_poll(struct napi_struct *napi, int budget) 3537 { 3538 struct hns3_nic_priv *priv = netdev_priv(napi->dev); 3539 struct hns3_enet_ring *ring; 3540 int rx_pkt_total = 0; 3541 3542 struct hns3_enet_tqp_vector *tqp_vector = 3543 container_of(napi, struct hns3_enet_tqp_vector, napi); 3544 bool clean_complete = true; 3545 int rx_budget = budget; 3546 3547 if (unlikely(test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) { 3548 napi_complete(napi); 3549 return 0; 3550 } 3551 3552 /* Since the actual Tx work is minimal, we can give the Tx a larger 3553 * budget and be more aggressive about cleaning up the Tx descriptors. 3554 */ 3555 hns3_for_each_ring(ring, tqp_vector->tx_group) 3556 hns3_clean_tx_ring(ring, budget); 3557 3558 /* make sure rx ring budget not smaller than 1 */ 3559 if (tqp_vector->num_tqps > 1) 3560 rx_budget = max(budget / tqp_vector->num_tqps, 1); 3561 3562 hns3_for_each_ring(ring, tqp_vector->rx_group) { 3563 int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget, 3564 hns3_rx_skb); 3565 3566 if (rx_cleaned >= rx_budget) 3567 clean_complete = false; 3568 3569 rx_pkt_total += rx_cleaned; 3570 } 3571 3572 tqp_vector->rx_group.total_packets += rx_pkt_total; 3573 3574 if (!clean_complete) 3575 return budget; 3576 3577 if (napi_complete(napi) && 3578 likely(!test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) { 3579 hns3_update_new_int_gl(tqp_vector); 3580 hns3_mask_vector_irq(tqp_vector, 1); 3581 } 3582 3583 return rx_pkt_total; 3584 } 3585 3586 static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, 3587 struct hnae3_ring_chain_node *head) 3588 { 3589 struct pci_dev *pdev = tqp_vector->handle->pdev; 3590 struct hnae3_ring_chain_node *cur_chain = head; 3591 struct hnae3_ring_chain_node *chain; 3592 struct hns3_enet_ring *tx_ring; 3593 struct hns3_enet_ring *rx_ring; 3594 3595 tx_ring = tqp_vector->tx_group.ring; 3596 if (tx_ring) { 3597 cur_chain->tqp_index = tx_ring->tqp->tqp_index; 3598 hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, 3599 HNAE3_RING_TYPE_TX); 3600 hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, 3601 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX); 3602 3603 cur_chain->next = NULL; 3604 3605 while (tx_ring->next) { 3606 tx_ring = tx_ring->next; 3607 3608 chain = devm_kzalloc(&pdev->dev, sizeof(*chain), 3609 GFP_KERNEL); 3610 if (!chain) 3611 goto err_free_chain; 3612 3613 cur_chain->next = chain; 3614 chain->tqp_index = tx_ring->tqp->tqp_index; 3615 hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B, 3616 HNAE3_RING_TYPE_TX); 3617 hnae3_set_field(chain->int_gl_idx, 3618 HNAE3_RING_GL_IDX_M, 3619 HNAE3_RING_GL_IDX_S, 3620 HNAE3_RING_GL_TX); 3621 3622 cur_chain = chain; 3623 } 3624 } 3625 3626 rx_ring = tqp_vector->rx_group.ring; 3627 if (!tx_ring && rx_ring) { 3628 cur_chain->next = NULL; 3629 cur_chain->tqp_index = rx_ring->tqp->tqp_index; 3630 hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, 3631 HNAE3_RING_TYPE_RX); 3632 hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, 3633 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX); 3634 3635 rx_ring = rx_ring->next; 3636 } 3637 3638 while (rx_ring) { 3639 chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL); 3640 if (!chain) 3641 goto err_free_chain; 3642 3643 cur_chain->next = chain; 3644 chain->tqp_index = rx_ring->tqp->tqp_index; 3645 hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B, 3646 HNAE3_RING_TYPE_RX); 3647 hnae3_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M, 3648 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX); 3649 3650 cur_chain = chain; 3651 3652 rx_ring = rx_ring->next; 3653 } 3654 3655 return 0; 3656 3657 err_free_chain: 3658 cur_chain = head->next; 3659 while (cur_chain) { 3660 chain = cur_chain->next; 3661 devm_kfree(&pdev->dev, cur_chain); 3662 cur_chain = chain; 3663 } 3664 head->next = NULL; 3665 3666 return -ENOMEM; 3667 } 3668 3669 static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, 3670 struct hnae3_ring_chain_node *head) 3671 { 3672 struct pci_dev *pdev = tqp_vector->handle->pdev; 3673 struct hnae3_ring_chain_node *chain_tmp, *chain; 3674 3675 chain = head->next; 3676 3677 while (chain) { 3678 chain_tmp = chain->next; 3679 devm_kfree(&pdev->dev, chain); 3680 chain = chain_tmp; 3681 } 3682 } 3683 3684 static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group, 3685 struct hns3_enet_ring *ring) 3686 { 3687 ring->next = group->ring; 3688 group->ring = ring; 3689 3690 group->count++; 3691 } 3692 3693 static void hns3_nic_set_cpumask(struct hns3_nic_priv *priv) 3694 { 3695 struct pci_dev *pdev = priv->ae_handle->pdev; 3696 struct hns3_enet_tqp_vector *tqp_vector; 3697 int num_vectors = priv->vector_num; 3698 int numa_node; 3699 int vector_i; 3700 3701 numa_node = dev_to_node(&pdev->dev); 3702 3703 for (vector_i = 0; vector_i < num_vectors; vector_i++) { 3704 tqp_vector = &priv->tqp_vector[vector_i]; 3705 cpumask_set_cpu(cpumask_local_spread(vector_i, numa_node), 3706 &tqp_vector->affinity_mask); 3707 } 3708 } 3709 3710 static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv) 3711 { 3712 struct hnae3_ring_chain_node vector_ring_chain; 3713 struct hnae3_handle *h = priv->ae_handle; 3714 struct hns3_enet_tqp_vector *tqp_vector; 3715 int ret; 3716 int i; 3717 3718 hns3_nic_set_cpumask(priv); 3719 3720 for (i = 0; i < priv->vector_num; i++) { 3721 tqp_vector = &priv->tqp_vector[i]; 3722 hns3_vector_coalesce_init_hw(tqp_vector, priv); 3723 tqp_vector->num_tqps = 0; 3724 } 3725 3726 for (i = 0; i < h->kinfo.num_tqps; i++) { 3727 u16 vector_i = i % priv->vector_num; 3728 u16 tqp_num = h->kinfo.num_tqps; 3729 3730 tqp_vector = &priv->tqp_vector[vector_i]; 3731 3732 hns3_add_ring_to_group(&tqp_vector->tx_group, 3733 &priv->ring[i]); 3734 3735 hns3_add_ring_to_group(&tqp_vector->rx_group, 3736 &priv->ring[i + tqp_num]); 3737 3738 priv->ring[i].tqp_vector = tqp_vector; 3739 priv->ring[i + tqp_num].tqp_vector = tqp_vector; 3740 tqp_vector->num_tqps++; 3741 } 3742 3743 for (i = 0; i < priv->vector_num; i++) { 3744 tqp_vector = &priv->tqp_vector[i]; 3745 3746 tqp_vector->rx_group.total_bytes = 0; 3747 tqp_vector->rx_group.total_packets = 0; 3748 tqp_vector->tx_group.total_bytes = 0; 3749 tqp_vector->tx_group.total_packets = 0; 3750 tqp_vector->handle = h; 3751 3752 ret = hns3_get_vector_ring_chain(tqp_vector, 3753 &vector_ring_chain); 3754 if (ret) 3755 goto map_ring_fail; 3756 3757 ret = h->ae_algo->ops->map_ring_to_vector(h, 3758 tqp_vector->vector_irq, &vector_ring_chain); 3759 3760 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); 3761 3762 if (ret) 3763 goto map_ring_fail; 3764 3765 netif_napi_add(priv->netdev, &tqp_vector->napi, 3766 hns3_nic_common_poll, NAPI_POLL_WEIGHT); 3767 } 3768 3769 return 0; 3770 3771 map_ring_fail: 3772 while (i--) 3773 netif_napi_del(&priv->tqp_vector[i].napi); 3774 3775 return ret; 3776 } 3777 3778 static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv) 3779 { 3780 struct hnae3_handle *h = priv->ae_handle; 3781 struct hns3_enet_tqp_vector *tqp_vector; 3782 struct hnae3_vector_info *vector; 3783 struct pci_dev *pdev = h->pdev; 3784 u16 tqp_num = h->kinfo.num_tqps; 3785 u16 vector_num; 3786 int ret = 0; 3787 u16 i; 3788 3789 /* RSS size, cpu online and vector_num should be the same */ 3790 /* Should consider 2p/4p later */ 3791 vector_num = min_t(u16, num_online_cpus(), tqp_num); 3792 3793 vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector), 3794 GFP_KERNEL); 3795 if (!vector) 3796 return -ENOMEM; 3797 3798 /* save the actual available vector number */ 3799 vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector); 3800 3801 priv->vector_num = vector_num; 3802 priv->tqp_vector = (struct hns3_enet_tqp_vector *) 3803 devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector), 3804 GFP_KERNEL); 3805 if (!priv->tqp_vector) { 3806 ret = -ENOMEM; 3807 goto out; 3808 } 3809 3810 for (i = 0; i < priv->vector_num; i++) { 3811 tqp_vector = &priv->tqp_vector[i]; 3812 tqp_vector->idx = i; 3813 tqp_vector->mask_addr = vector[i].io_addr; 3814 tqp_vector->vector_irq = vector[i].vector; 3815 hns3_vector_coalesce_init(tqp_vector, priv); 3816 } 3817 3818 out: 3819 devm_kfree(&pdev->dev, vector); 3820 return ret; 3821 } 3822 3823 static void hns3_clear_ring_group(struct hns3_enet_ring_group *group) 3824 { 3825 group->ring = NULL; 3826 group->count = 0; 3827 } 3828 3829 static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv) 3830 { 3831 struct hnae3_ring_chain_node vector_ring_chain; 3832 struct hnae3_handle *h = priv->ae_handle; 3833 struct hns3_enet_tqp_vector *tqp_vector; 3834 int i; 3835 3836 for (i = 0; i < priv->vector_num; i++) { 3837 tqp_vector = &priv->tqp_vector[i]; 3838 3839 if (!tqp_vector->rx_group.ring && !tqp_vector->tx_group.ring) 3840 continue; 3841 3842 /* Since the mapping can be overwritten, when fail to get the 3843 * chain between vector and ring, we should go on to deal with 3844 * the remaining options. 3845 */ 3846 if (hns3_get_vector_ring_chain(tqp_vector, &vector_ring_chain)) 3847 dev_warn(priv->dev, "failed to get ring chain\n"); 3848 3849 h->ae_algo->ops->unmap_ring_from_vector(h, 3850 tqp_vector->vector_irq, &vector_ring_chain); 3851 3852 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); 3853 3854 hns3_clear_ring_group(&tqp_vector->rx_group); 3855 hns3_clear_ring_group(&tqp_vector->tx_group); 3856 netif_napi_del(&priv->tqp_vector[i].napi); 3857 } 3858 } 3859 3860 static void hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv) 3861 { 3862 struct hnae3_handle *h = priv->ae_handle; 3863 struct pci_dev *pdev = h->pdev; 3864 int i, ret; 3865 3866 for (i = 0; i < priv->vector_num; i++) { 3867 struct hns3_enet_tqp_vector *tqp_vector; 3868 3869 tqp_vector = &priv->tqp_vector[i]; 3870 ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq); 3871 if (ret) 3872 return; 3873 } 3874 3875 devm_kfree(&pdev->dev, priv->tqp_vector); 3876 } 3877 3878 static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv, 3879 unsigned int ring_type) 3880 { 3881 int queue_num = priv->ae_handle->kinfo.num_tqps; 3882 struct hns3_enet_ring *ring; 3883 int desc_num; 3884 3885 if (ring_type == HNAE3_RING_TYPE_TX) { 3886 ring = &priv->ring[q->tqp_index]; 3887 desc_num = priv->ae_handle->kinfo.num_tx_desc; 3888 ring->queue_index = q->tqp_index; 3889 } else { 3890 ring = &priv->ring[q->tqp_index + queue_num]; 3891 desc_num = priv->ae_handle->kinfo.num_rx_desc; 3892 ring->queue_index = q->tqp_index; 3893 } 3894 3895 hnae3_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type); 3896 3897 ring->tqp = q; 3898 ring->desc = NULL; 3899 ring->desc_cb = NULL; 3900 ring->dev = priv->dev; 3901 ring->desc_dma_addr = 0; 3902 ring->buf_size = q->buf_size; 3903 ring->desc_num = desc_num; 3904 ring->next_to_use = 0; 3905 ring->next_to_clean = 0; 3906 ring->last_to_use = 0; 3907 } 3908 3909 static void hns3_queue_to_ring(struct hnae3_queue *tqp, 3910 struct hns3_nic_priv *priv) 3911 { 3912 hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX); 3913 hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX); 3914 } 3915 3916 static int hns3_get_ring_config(struct hns3_nic_priv *priv) 3917 { 3918 struct hnae3_handle *h = priv->ae_handle; 3919 struct pci_dev *pdev = h->pdev; 3920 int i; 3921 3922 priv->ring = devm_kzalloc(&pdev->dev, 3923 array3_size(h->kinfo.num_tqps, 3924 sizeof(*priv->ring), 2), 3925 GFP_KERNEL); 3926 if (!priv->ring) 3927 return -ENOMEM; 3928 3929 for (i = 0; i < h->kinfo.num_tqps; i++) 3930 hns3_queue_to_ring(h->kinfo.tqp[i], priv); 3931 3932 return 0; 3933 } 3934 3935 static void hns3_put_ring_config(struct hns3_nic_priv *priv) 3936 { 3937 if (!priv->ring) 3938 return; 3939 3940 devm_kfree(priv->dev, priv->ring); 3941 priv->ring = NULL; 3942 } 3943 3944 static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring) 3945 { 3946 int ret; 3947 3948 if (ring->desc_num <= 0 || ring->buf_size <= 0) 3949 return -EINVAL; 3950 3951 ring->desc_cb = devm_kcalloc(ring_to_dev(ring), ring->desc_num, 3952 sizeof(ring->desc_cb[0]), GFP_KERNEL); 3953 if (!ring->desc_cb) { 3954 ret = -ENOMEM; 3955 goto out; 3956 } 3957 3958 ret = hns3_alloc_desc(ring); 3959 if (ret) 3960 goto out_with_desc_cb; 3961 3962 if (!HNAE3_IS_TX_RING(ring)) { 3963 ret = hns3_alloc_ring_buffers(ring); 3964 if (ret) 3965 goto out_with_desc; 3966 } 3967 3968 return 0; 3969 3970 out_with_desc: 3971 hns3_free_desc(ring); 3972 out_with_desc_cb: 3973 devm_kfree(ring_to_dev(ring), ring->desc_cb); 3974 ring->desc_cb = NULL; 3975 out: 3976 return ret; 3977 } 3978 3979 void hns3_fini_ring(struct hns3_enet_ring *ring) 3980 { 3981 hns3_free_desc(ring); 3982 devm_kfree(ring_to_dev(ring), ring->desc_cb); 3983 ring->desc_cb = NULL; 3984 ring->next_to_clean = 0; 3985 ring->next_to_use = 0; 3986 ring->last_to_use = 0; 3987 ring->pending_buf = 0; 3988 if (ring->skb) { 3989 dev_kfree_skb_any(ring->skb); 3990 ring->skb = NULL; 3991 } 3992 } 3993 3994 static int hns3_buf_size2type(u32 buf_size) 3995 { 3996 int bd_size_type; 3997 3998 switch (buf_size) { 3999 case 512: 4000 bd_size_type = HNS3_BD_SIZE_512_TYPE; 4001 break; 4002 case 1024: 4003 bd_size_type = HNS3_BD_SIZE_1024_TYPE; 4004 break; 4005 case 2048: 4006 bd_size_type = HNS3_BD_SIZE_2048_TYPE; 4007 break; 4008 case 4096: 4009 bd_size_type = HNS3_BD_SIZE_4096_TYPE; 4010 break; 4011 default: 4012 bd_size_type = HNS3_BD_SIZE_2048_TYPE; 4013 } 4014 4015 return bd_size_type; 4016 } 4017 4018 static void hns3_init_ring_hw(struct hns3_enet_ring *ring) 4019 { 4020 dma_addr_t dma = ring->desc_dma_addr; 4021 struct hnae3_queue *q = ring->tqp; 4022 4023 if (!HNAE3_IS_TX_RING(ring)) { 4024 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG, (u32)dma); 4025 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG, 4026 (u32)((dma >> 31) >> 1)); 4027 4028 hns3_write_dev(q, HNS3_RING_RX_RING_BD_LEN_REG, 4029 hns3_buf_size2type(ring->buf_size)); 4030 hns3_write_dev(q, HNS3_RING_RX_RING_BD_NUM_REG, 4031 ring->desc_num / 8 - 1); 4032 4033 } else { 4034 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_L_REG, 4035 (u32)dma); 4036 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG, 4037 (u32)((dma >> 31) >> 1)); 4038 4039 hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG, 4040 ring->desc_num / 8 - 1); 4041 } 4042 } 4043 4044 static void hns3_init_tx_ring_tc(struct hns3_nic_priv *priv) 4045 { 4046 struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo; 4047 struct hnae3_tc_info *tc_info = &kinfo->tc_info; 4048 int i; 4049 4050 for (i = 0; i < HNAE3_MAX_TC; i++) { 4051 int j; 4052 4053 if (!test_bit(i, &tc_info->tc_en)) 4054 continue; 4055 4056 for (j = 0; j < tc_info->tqp_count[i]; j++) { 4057 struct hnae3_queue *q; 4058 4059 q = priv->ring[tc_info->tqp_offset[i] + j].tqp; 4060 hns3_write_dev(q, HNS3_RING_TX_RING_TC_REG, i); 4061 } 4062 } 4063 } 4064 4065 int hns3_init_all_ring(struct hns3_nic_priv *priv) 4066 { 4067 struct hnae3_handle *h = priv->ae_handle; 4068 int ring_num = h->kinfo.num_tqps * 2; 4069 int i, j; 4070 int ret; 4071 4072 for (i = 0; i < ring_num; i++) { 4073 ret = hns3_alloc_ring_memory(&priv->ring[i]); 4074 if (ret) { 4075 dev_err(priv->dev, 4076 "Alloc ring memory fail! ret=%d\n", ret); 4077 goto out_when_alloc_ring_memory; 4078 } 4079 4080 u64_stats_init(&priv->ring[i].syncp); 4081 } 4082 4083 return 0; 4084 4085 out_when_alloc_ring_memory: 4086 for (j = i - 1; j >= 0; j--) 4087 hns3_fini_ring(&priv->ring[j]); 4088 4089 return -ENOMEM; 4090 } 4091 4092 int hns3_uninit_all_ring(struct hns3_nic_priv *priv) 4093 { 4094 struct hnae3_handle *h = priv->ae_handle; 4095 int i; 4096 4097 for (i = 0; i < h->kinfo.num_tqps; i++) { 4098 hns3_fini_ring(&priv->ring[i]); 4099 hns3_fini_ring(&priv->ring[i + h->kinfo.num_tqps]); 4100 } 4101 return 0; 4102 } 4103 4104 /* Set mac addr if it is configured. or leave it to the AE driver */ 4105 static int hns3_init_mac_addr(struct net_device *netdev) 4106 { 4107 struct hns3_nic_priv *priv = netdev_priv(netdev); 4108 struct hnae3_handle *h = priv->ae_handle; 4109 u8 mac_addr_temp[ETH_ALEN]; 4110 int ret = 0; 4111 4112 if (h->ae_algo->ops->get_mac_addr) 4113 h->ae_algo->ops->get_mac_addr(h, mac_addr_temp); 4114 4115 /* Check if the MAC address is valid, if not get a random one */ 4116 if (!is_valid_ether_addr(mac_addr_temp)) { 4117 eth_hw_addr_random(netdev); 4118 dev_warn(priv->dev, "using random MAC address %pM\n", 4119 netdev->dev_addr); 4120 } else if (!ether_addr_equal(netdev->dev_addr, mac_addr_temp)) { 4121 ether_addr_copy(netdev->dev_addr, mac_addr_temp); 4122 ether_addr_copy(netdev->perm_addr, mac_addr_temp); 4123 } else { 4124 return 0; 4125 } 4126 4127 if (h->ae_algo->ops->set_mac_addr) 4128 ret = h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr, true); 4129 4130 return ret; 4131 } 4132 4133 static int hns3_init_phy(struct net_device *netdev) 4134 { 4135 struct hnae3_handle *h = hns3_get_handle(netdev); 4136 int ret = 0; 4137 4138 if (h->ae_algo->ops->mac_connect_phy) 4139 ret = h->ae_algo->ops->mac_connect_phy(h); 4140 4141 return ret; 4142 } 4143 4144 static void hns3_uninit_phy(struct net_device *netdev) 4145 { 4146 struct hnae3_handle *h = hns3_get_handle(netdev); 4147 4148 if (h->ae_algo->ops->mac_disconnect_phy) 4149 h->ae_algo->ops->mac_disconnect_phy(h); 4150 } 4151 4152 static void hns3_del_all_fd_rules(struct net_device *netdev, bool clear_list) 4153 { 4154 struct hnae3_handle *h = hns3_get_handle(netdev); 4155 4156 if (h->ae_algo->ops->del_all_fd_entries) 4157 h->ae_algo->ops->del_all_fd_entries(h, clear_list); 4158 } 4159 4160 static int hns3_client_start(struct hnae3_handle *handle) 4161 { 4162 if (!handle->ae_algo->ops->client_start) 4163 return 0; 4164 4165 return handle->ae_algo->ops->client_start(handle); 4166 } 4167 4168 static void hns3_client_stop(struct hnae3_handle *handle) 4169 { 4170 if (!handle->ae_algo->ops->client_stop) 4171 return; 4172 4173 handle->ae_algo->ops->client_stop(handle); 4174 } 4175 4176 static void hns3_info_show(struct hns3_nic_priv *priv) 4177 { 4178 struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo; 4179 4180 dev_info(priv->dev, "MAC address: %pM\n", priv->netdev->dev_addr); 4181 dev_info(priv->dev, "Task queue pairs numbers: %u\n", kinfo->num_tqps); 4182 dev_info(priv->dev, "RSS size: %u\n", kinfo->rss_size); 4183 dev_info(priv->dev, "Allocated RSS size: %u\n", kinfo->req_rss_size); 4184 dev_info(priv->dev, "RX buffer length: %u\n", kinfo->rx_buf_len); 4185 dev_info(priv->dev, "Desc num per TX queue: %u\n", kinfo->num_tx_desc); 4186 dev_info(priv->dev, "Desc num per RX queue: %u\n", kinfo->num_rx_desc); 4187 dev_info(priv->dev, "Total number of enabled TCs: %u\n", 4188 kinfo->tc_info.num_tc); 4189 dev_info(priv->dev, "Max mtu size: %u\n", priv->netdev->max_mtu); 4190 } 4191 4192 static int hns3_client_init(struct hnae3_handle *handle) 4193 { 4194 struct pci_dev *pdev = handle->pdev; 4195 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 4196 u16 alloc_tqps, max_rss_size; 4197 struct hns3_nic_priv *priv; 4198 struct net_device *netdev; 4199 int ret; 4200 4201 handle->ae_algo->ops->get_tqps_and_rss_info(handle, &alloc_tqps, 4202 &max_rss_size); 4203 netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv), alloc_tqps); 4204 if (!netdev) 4205 return -ENOMEM; 4206 4207 priv = netdev_priv(netdev); 4208 priv->dev = &pdev->dev; 4209 priv->netdev = netdev; 4210 priv->ae_handle = handle; 4211 priv->tx_timeout_count = 0; 4212 priv->max_non_tso_bd_num = ae_dev->dev_specs.max_non_tso_bd_num; 4213 set_bit(HNS3_NIC_STATE_DOWN, &priv->state); 4214 4215 handle->msg_enable = netif_msg_init(debug, DEFAULT_MSG_LEVEL); 4216 4217 handle->kinfo.netdev = netdev; 4218 handle->priv = (void *)priv; 4219 4220 hns3_init_mac_addr(netdev); 4221 4222 hns3_set_default_feature(netdev); 4223 4224 netdev->watchdog_timeo = HNS3_TX_TIMEOUT; 4225 netdev->priv_flags |= IFF_UNICAST_FLT; 4226 netdev->netdev_ops = &hns3_nic_netdev_ops; 4227 SET_NETDEV_DEV(netdev, &pdev->dev); 4228 hns3_ethtool_set_ops(netdev); 4229 4230 /* Carrier off reporting is important to ethtool even BEFORE open */ 4231 netif_carrier_off(netdev); 4232 4233 ret = hns3_get_ring_config(priv); 4234 if (ret) { 4235 ret = -ENOMEM; 4236 goto out_get_ring_cfg; 4237 } 4238 4239 ret = hns3_nic_alloc_vector_data(priv); 4240 if (ret) { 4241 ret = -ENOMEM; 4242 goto out_alloc_vector_data; 4243 } 4244 4245 ret = hns3_nic_init_vector_data(priv); 4246 if (ret) { 4247 ret = -ENOMEM; 4248 goto out_init_vector_data; 4249 } 4250 4251 ret = hns3_init_all_ring(priv); 4252 if (ret) { 4253 ret = -ENOMEM; 4254 goto out_init_ring; 4255 } 4256 4257 ret = hns3_init_phy(netdev); 4258 if (ret) 4259 goto out_init_phy; 4260 4261 ret = register_netdev(netdev); 4262 if (ret) { 4263 dev_err(priv->dev, "probe register netdev fail!\n"); 4264 goto out_reg_netdev_fail; 4265 } 4266 4267 /* the device can work without cpu rmap, only aRFS needs it */ 4268 ret = hns3_set_rx_cpu_rmap(netdev); 4269 if (ret) 4270 dev_warn(priv->dev, "set rx cpu rmap fail, ret=%d\n", ret); 4271 4272 ret = hns3_nic_init_irq(priv); 4273 if (ret) { 4274 dev_err(priv->dev, "init irq failed! ret=%d\n", ret); 4275 hns3_free_rx_cpu_rmap(netdev); 4276 goto out_init_irq_fail; 4277 } 4278 4279 ret = hns3_client_start(handle); 4280 if (ret) { 4281 dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret); 4282 goto out_client_start; 4283 } 4284 4285 hns3_dcbnl_setup(handle); 4286 4287 hns3_dbg_init(handle); 4288 4289 /* MTU range: (ETH_MIN_MTU(kernel default) - 9702) */ 4290 netdev->max_mtu = HNS3_MAX_MTU; 4291 4292 if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps)) 4293 set_bit(HNS3_NIC_STATE_HW_TX_CSUM_ENABLE, &priv->state); 4294 4295 set_bit(HNS3_NIC_STATE_INITED, &priv->state); 4296 4297 if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) 4298 set_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->supported_pflags); 4299 4300 if (netif_msg_drv(handle)) 4301 hns3_info_show(priv); 4302 4303 return ret; 4304 4305 out_client_start: 4306 hns3_free_rx_cpu_rmap(netdev); 4307 hns3_nic_uninit_irq(priv); 4308 out_init_irq_fail: 4309 unregister_netdev(netdev); 4310 out_reg_netdev_fail: 4311 hns3_uninit_phy(netdev); 4312 out_init_phy: 4313 hns3_uninit_all_ring(priv); 4314 out_init_ring: 4315 hns3_nic_uninit_vector_data(priv); 4316 out_init_vector_data: 4317 hns3_nic_dealloc_vector_data(priv); 4318 out_alloc_vector_data: 4319 priv->ring = NULL; 4320 out_get_ring_cfg: 4321 priv->ae_handle = NULL; 4322 free_netdev(netdev); 4323 return ret; 4324 } 4325 4326 static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) 4327 { 4328 struct net_device *netdev = handle->kinfo.netdev; 4329 struct hns3_nic_priv *priv = netdev_priv(netdev); 4330 int ret; 4331 4332 if (netdev->reg_state != NETREG_UNINITIALIZED) 4333 unregister_netdev(netdev); 4334 4335 hns3_client_stop(handle); 4336 4337 hns3_uninit_phy(netdev); 4338 4339 if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) { 4340 netdev_warn(netdev, "already uninitialized\n"); 4341 goto out_netdev_free; 4342 } 4343 4344 hns3_free_rx_cpu_rmap(netdev); 4345 4346 hns3_nic_uninit_irq(priv); 4347 4348 hns3_del_all_fd_rules(netdev, true); 4349 4350 hns3_clear_all_ring(handle, true); 4351 4352 hns3_nic_uninit_vector_data(priv); 4353 4354 hns3_nic_dealloc_vector_data(priv); 4355 4356 ret = hns3_uninit_all_ring(priv); 4357 if (ret) 4358 netdev_err(netdev, "uninit ring error\n"); 4359 4360 hns3_put_ring_config(priv); 4361 4362 out_netdev_free: 4363 hns3_dbg_uninit(handle); 4364 free_netdev(netdev); 4365 } 4366 4367 static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup) 4368 { 4369 struct net_device *netdev = handle->kinfo.netdev; 4370 4371 if (!netdev) 4372 return; 4373 4374 if (linkup) { 4375 netif_tx_wake_all_queues(netdev); 4376 netif_carrier_on(netdev); 4377 if (netif_msg_link(handle)) 4378 netdev_info(netdev, "link up\n"); 4379 } else { 4380 netif_carrier_off(netdev); 4381 netif_tx_stop_all_queues(netdev); 4382 if (netif_msg_link(handle)) 4383 netdev_info(netdev, "link down\n"); 4384 } 4385 } 4386 4387 static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc) 4388 { 4389 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 4390 struct net_device *ndev = kinfo->netdev; 4391 4392 if (tc > HNAE3_MAX_TC) 4393 return -EINVAL; 4394 4395 if (!ndev) 4396 return -ENODEV; 4397 4398 return hns3_nic_set_real_num_queue(ndev); 4399 } 4400 4401 static void hns3_clear_tx_ring(struct hns3_enet_ring *ring) 4402 { 4403 while (ring->next_to_clean != ring->next_to_use) { 4404 ring->desc[ring->next_to_clean].tx.bdtp_fe_sc_vld_ra_ri = 0; 4405 hns3_free_buffer_detach(ring, ring->next_to_clean, 0); 4406 ring_ptr_move_fw(ring, next_to_clean); 4407 } 4408 4409 ring->pending_buf = 0; 4410 } 4411 4412 static int hns3_clear_rx_ring(struct hns3_enet_ring *ring) 4413 { 4414 struct hns3_desc_cb res_cbs; 4415 int ret; 4416 4417 while (ring->next_to_use != ring->next_to_clean) { 4418 /* When a buffer is not reused, it's memory has been 4419 * freed in hns3_handle_rx_bd or will be freed by 4420 * stack, so we need to replace the buffer here. 4421 */ 4422 if (!ring->desc_cb[ring->next_to_use].reuse_flag) { 4423 ret = hns3_alloc_and_map_buffer(ring, &res_cbs); 4424 if (ret) { 4425 u64_stats_update_begin(&ring->syncp); 4426 ring->stats.sw_err_cnt++; 4427 u64_stats_update_end(&ring->syncp); 4428 /* if alloc new buffer fail, exit directly 4429 * and reclear in up flow. 4430 */ 4431 netdev_warn(ring_to_netdev(ring), 4432 "reserve buffer map failed, ret = %d\n", 4433 ret); 4434 return ret; 4435 } 4436 hns3_replace_buffer(ring, ring->next_to_use, &res_cbs); 4437 } 4438 ring_ptr_move_fw(ring, next_to_use); 4439 } 4440 4441 /* Free the pending skb in rx ring */ 4442 if (ring->skb) { 4443 dev_kfree_skb_any(ring->skb); 4444 ring->skb = NULL; 4445 ring->pending_buf = 0; 4446 } 4447 4448 return 0; 4449 } 4450 4451 static void hns3_force_clear_rx_ring(struct hns3_enet_ring *ring) 4452 { 4453 while (ring->next_to_use != ring->next_to_clean) { 4454 /* When a buffer is not reused, it's memory has been 4455 * freed in hns3_handle_rx_bd or will be freed by 4456 * stack, so only need to unmap the buffer here. 4457 */ 4458 if (!ring->desc_cb[ring->next_to_use].reuse_flag) { 4459 hns3_unmap_buffer(ring, 4460 &ring->desc_cb[ring->next_to_use]); 4461 ring->desc_cb[ring->next_to_use].dma = 0; 4462 } 4463 4464 ring_ptr_move_fw(ring, next_to_use); 4465 } 4466 } 4467 4468 static void hns3_clear_all_ring(struct hnae3_handle *h, bool force) 4469 { 4470 struct net_device *ndev = h->kinfo.netdev; 4471 struct hns3_nic_priv *priv = netdev_priv(ndev); 4472 u32 i; 4473 4474 for (i = 0; i < h->kinfo.num_tqps; i++) { 4475 struct hns3_enet_ring *ring; 4476 4477 ring = &priv->ring[i]; 4478 hns3_clear_tx_ring(ring); 4479 4480 ring = &priv->ring[i + h->kinfo.num_tqps]; 4481 /* Continue to clear other rings even if clearing some 4482 * rings failed. 4483 */ 4484 if (force) 4485 hns3_force_clear_rx_ring(ring); 4486 else 4487 hns3_clear_rx_ring(ring); 4488 } 4489 } 4490 4491 int hns3_nic_reset_all_ring(struct hnae3_handle *h) 4492 { 4493 struct net_device *ndev = h->kinfo.netdev; 4494 struct hns3_nic_priv *priv = netdev_priv(ndev); 4495 struct hns3_enet_ring *rx_ring; 4496 int i, j; 4497 int ret; 4498 4499 for (i = 0; i < h->kinfo.num_tqps; i++) { 4500 ret = h->ae_algo->ops->reset_queue(h, i); 4501 if (ret) 4502 return ret; 4503 4504 hns3_init_ring_hw(&priv->ring[i]); 4505 4506 /* We need to clear tx ring here because self test will 4507 * use the ring and will not run down before up 4508 */ 4509 hns3_clear_tx_ring(&priv->ring[i]); 4510 priv->ring[i].next_to_clean = 0; 4511 priv->ring[i].next_to_use = 0; 4512 priv->ring[i].last_to_use = 0; 4513 4514 rx_ring = &priv->ring[i + h->kinfo.num_tqps]; 4515 hns3_init_ring_hw(rx_ring); 4516 ret = hns3_clear_rx_ring(rx_ring); 4517 if (ret) 4518 return ret; 4519 4520 /* We can not know the hardware head and tail when this 4521 * function is called in reset flow, so we reuse all desc. 4522 */ 4523 for (j = 0; j < rx_ring->desc_num; j++) 4524 hns3_reuse_buffer(rx_ring, j); 4525 4526 rx_ring->next_to_clean = 0; 4527 rx_ring->next_to_use = 0; 4528 } 4529 4530 hns3_init_tx_ring_tc(priv); 4531 4532 return 0; 4533 } 4534 4535 static void hns3_store_coal(struct hns3_nic_priv *priv) 4536 { 4537 /* ethtool only support setting and querying one coal 4538 * configuration for now, so save the vector 0' coal 4539 * configuration here in order to restore it. 4540 */ 4541 memcpy(&priv->tx_coal, &priv->tqp_vector[0].tx_group.coal, 4542 sizeof(struct hns3_enet_coalesce)); 4543 memcpy(&priv->rx_coal, &priv->tqp_vector[0].rx_group.coal, 4544 sizeof(struct hns3_enet_coalesce)); 4545 } 4546 4547 static void hns3_restore_coal(struct hns3_nic_priv *priv) 4548 { 4549 u16 vector_num = priv->vector_num; 4550 int i; 4551 4552 for (i = 0; i < vector_num; i++) { 4553 memcpy(&priv->tqp_vector[i].tx_group.coal, &priv->tx_coal, 4554 sizeof(struct hns3_enet_coalesce)); 4555 memcpy(&priv->tqp_vector[i].rx_group.coal, &priv->rx_coal, 4556 sizeof(struct hns3_enet_coalesce)); 4557 } 4558 } 4559 4560 static int hns3_reset_notify_down_enet(struct hnae3_handle *handle) 4561 { 4562 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 4563 struct net_device *ndev = kinfo->netdev; 4564 struct hns3_nic_priv *priv = netdev_priv(ndev); 4565 4566 if (test_and_set_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) 4567 return 0; 4568 4569 if (!netif_running(ndev)) 4570 return 0; 4571 4572 return hns3_nic_net_stop(ndev); 4573 } 4574 4575 static int hns3_reset_notify_up_enet(struct hnae3_handle *handle) 4576 { 4577 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 4578 struct hns3_nic_priv *priv = netdev_priv(kinfo->netdev); 4579 int ret = 0; 4580 4581 clear_bit(HNS3_NIC_STATE_RESETTING, &priv->state); 4582 4583 if (netif_running(kinfo->netdev)) { 4584 ret = hns3_nic_net_open(kinfo->netdev); 4585 if (ret) { 4586 set_bit(HNS3_NIC_STATE_RESETTING, &priv->state); 4587 netdev_err(kinfo->netdev, 4588 "net up fail, ret=%d!\n", ret); 4589 return ret; 4590 } 4591 } 4592 4593 return ret; 4594 } 4595 4596 static int hns3_reset_notify_init_enet(struct hnae3_handle *handle) 4597 { 4598 struct net_device *netdev = handle->kinfo.netdev; 4599 struct hns3_nic_priv *priv = netdev_priv(netdev); 4600 int ret; 4601 4602 /* Carrier off reporting is important to ethtool even BEFORE open */ 4603 netif_carrier_off(netdev); 4604 4605 ret = hns3_get_ring_config(priv); 4606 if (ret) 4607 return ret; 4608 4609 ret = hns3_nic_alloc_vector_data(priv); 4610 if (ret) 4611 goto err_put_ring; 4612 4613 hns3_restore_coal(priv); 4614 4615 ret = hns3_nic_init_vector_data(priv); 4616 if (ret) 4617 goto err_dealloc_vector; 4618 4619 ret = hns3_init_all_ring(priv); 4620 if (ret) 4621 goto err_uninit_vector; 4622 4623 /* the device can work without cpu rmap, only aRFS needs it */ 4624 ret = hns3_set_rx_cpu_rmap(netdev); 4625 if (ret) 4626 dev_warn(priv->dev, "set rx cpu rmap fail, ret=%d\n", ret); 4627 4628 ret = hns3_nic_init_irq(priv); 4629 if (ret) { 4630 dev_err(priv->dev, "init irq failed! ret=%d\n", ret); 4631 hns3_free_rx_cpu_rmap(netdev); 4632 goto err_init_irq_fail; 4633 } 4634 4635 if (!hns3_is_phys_func(handle->pdev)) 4636 hns3_init_mac_addr(netdev); 4637 4638 ret = hns3_client_start(handle); 4639 if (ret) { 4640 dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret); 4641 goto err_client_start_fail; 4642 } 4643 4644 set_bit(HNS3_NIC_STATE_INITED, &priv->state); 4645 4646 return ret; 4647 4648 err_client_start_fail: 4649 hns3_free_rx_cpu_rmap(netdev); 4650 hns3_nic_uninit_irq(priv); 4651 err_init_irq_fail: 4652 hns3_uninit_all_ring(priv); 4653 err_uninit_vector: 4654 hns3_nic_uninit_vector_data(priv); 4655 err_dealloc_vector: 4656 hns3_nic_dealloc_vector_data(priv); 4657 err_put_ring: 4658 hns3_put_ring_config(priv); 4659 4660 return ret; 4661 } 4662 4663 static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) 4664 { 4665 struct net_device *netdev = handle->kinfo.netdev; 4666 struct hns3_nic_priv *priv = netdev_priv(netdev); 4667 int ret; 4668 4669 if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) { 4670 netdev_warn(netdev, "already uninitialized\n"); 4671 return 0; 4672 } 4673 4674 hns3_free_rx_cpu_rmap(netdev); 4675 hns3_nic_uninit_irq(priv); 4676 hns3_clear_all_ring(handle, true); 4677 hns3_reset_tx_queue(priv->ae_handle); 4678 4679 hns3_nic_uninit_vector_data(priv); 4680 4681 hns3_store_coal(priv); 4682 4683 hns3_nic_dealloc_vector_data(priv); 4684 4685 ret = hns3_uninit_all_ring(priv); 4686 if (ret) 4687 netdev_err(netdev, "uninit ring error\n"); 4688 4689 hns3_put_ring_config(priv); 4690 4691 return ret; 4692 } 4693 4694 static int hns3_reset_notify(struct hnae3_handle *handle, 4695 enum hnae3_reset_notify_type type) 4696 { 4697 int ret = 0; 4698 4699 switch (type) { 4700 case HNAE3_UP_CLIENT: 4701 ret = hns3_reset_notify_up_enet(handle); 4702 break; 4703 case HNAE3_DOWN_CLIENT: 4704 ret = hns3_reset_notify_down_enet(handle); 4705 break; 4706 case HNAE3_INIT_CLIENT: 4707 ret = hns3_reset_notify_init_enet(handle); 4708 break; 4709 case HNAE3_UNINIT_CLIENT: 4710 ret = hns3_reset_notify_uninit_enet(handle); 4711 break; 4712 default: 4713 break; 4714 } 4715 4716 return ret; 4717 } 4718 4719 static int hns3_change_channels(struct hnae3_handle *handle, u32 new_tqp_num, 4720 bool rxfh_configured) 4721 { 4722 int ret; 4723 4724 ret = handle->ae_algo->ops->set_channels(handle, new_tqp_num, 4725 rxfh_configured); 4726 if (ret) { 4727 dev_err(&handle->pdev->dev, 4728 "Change tqp num(%u) fail.\n", new_tqp_num); 4729 return ret; 4730 } 4731 4732 ret = hns3_reset_notify(handle, HNAE3_INIT_CLIENT); 4733 if (ret) 4734 return ret; 4735 4736 ret = hns3_reset_notify(handle, HNAE3_UP_CLIENT); 4737 if (ret) 4738 hns3_reset_notify(handle, HNAE3_UNINIT_CLIENT); 4739 4740 return ret; 4741 } 4742 4743 int hns3_set_channels(struct net_device *netdev, 4744 struct ethtool_channels *ch) 4745 { 4746 struct hnae3_handle *h = hns3_get_handle(netdev); 4747 struct hnae3_knic_private_info *kinfo = &h->kinfo; 4748 bool rxfh_configured = netif_is_rxfh_configured(netdev); 4749 u32 new_tqp_num = ch->combined_count; 4750 u16 org_tqp_num; 4751 int ret; 4752 4753 if (hns3_nic_resetting(netdev)) 4754 return -EBUSY; 4755 4756 if (ch->rx_count || ch->tx_count) 4757 return -EINVAL; 4758 4759 if (kinfo->tc_info.mqprio_active) { 4760 dev_err(&netdev->dev, 4761 "it's not allowed to set channels via ethtool when MQPRIO mode is on\n"); 4762 return -EINVAL; 4763 } 4764 4765 if (new_tqp_num > hns3_get_max_available_channels(h) || 4766 new_tqp_num < 1) { 4767 dev_err(&netdev->dev, 4768 "Change tqps fail, the tqp range is from 1 to %u", 4769 hns3_get_max_available_channels(h)); 4770 return -EINVAL; 4771 } 4772 4773 if (kinfo->rss_size == new_tqp_num) 4774 return 0; 4775 4776 netif_dbg(h, drv, netdev, 4777 "set channels: tqp_num=%u, rxfh=%d\n", 4778 new_tqp_num, rxfh_configured); 4779 4780 ret = hns3_reset_notify(h, HNAE3_DOWN_CLIENT); 4781 if (ret) 4782 return ret; 4783 4784 ret = hns3_reset_notify(h, HNAE3_UNINIT_CLIENT); 4785 if (ret) 4786 return ret; 4787 4788 org_tqp_num = h->kinfo.num_tqps; 4789 ret = hns3_change_channels(h, new_tqp_num, rxfh_configured); 4790 if (ret) { 4791 int ret1; 4792 4793 netdev_warn(netdev, 4794 "Change channels fail, revert to old value\n"); 4795 ret1 = hns3_change_channels(h, org_tqp_num, rxfh_configured); 4796 if (ret1) { 4797 netdev_err(netdev, 4798 "revert to old channel fail\n"); 4799 return ret1; 4800 } 4801 4802 return ret; 4803 } 4804 4805 return 0; 4806 } 4807 4808 static const struct hns3_hw_error_info hns3_hw_err[] = { 4809 { .type = HNAE3_PPU_POISON_ERROR, 4810 .msg = "PPU poison" }, 4811 { .type = HNAE3_CMDQ_ECC_ERROR, 4812 .msg = "IMP CMDQ error" }, 4813 { .type = HNAE3_IMP_RD_POISON_ERROR, 4814 .msg = "IMP RD poison" }, 4815 { .type = HNAE3_ROCEE_AXI_RESP_ERROR, 4816 .msg = "ROCEE AXI RESP error" }, 4817 }; 4818 4819 static void hns3_process_hw_error(struct hnae3_handle *handle, 4820 enum hnae3_hw_error_type type) 4821 { 4822 int i; 4823 4824 for (i = 0; i < ARRAY_SIZE(hns3_hw_err); i++) { 4825 if (hns3_hw_err[i].type == type) { 4826 dev_err(&handle->pdev->dev, "Detected %s!\n", 4827 hns3_hw_err[i].msg); 4828 break; 4829 } 4830 } 4831 } 4832 4833 static const struct hnae3_client_ops client_ops = { 4834 .init_instance = hns3_client_init, 4835 .uninit_instance = hns3_client_uninit, 4836 .link_status_change = hns3_link_status_change, 4837 .setup_tc = hns3_client_setup_tc, 4838 .reset_notify = hns3_reset_notify, 4839 .process_hw_error = hns3_process_hw_error, 4840 }; 4841 4842 /* hns3_init_module - Driver registration routine 4843 * hns3_init_module is the first routine called when the driver is 4844 * loaded. All it does is register with the PCI subsystem. 4845 */ 4846 static int __init hns3_init_module(void) 4847 { 4848 int ret; 4849 4850 pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string); 4851 pr_info("%s: %s\n", hns3_driver_name, hns3_copyright); 4852 4853 client.type = HNAE3_CLIENT_KNIC; 4854 snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH, "%s", 4855 hns3_driver_name); 4856 4857 client.ops = &client_ops; 4858 4859 INIT_LIST_HEAD(&client.node); 4860 4861 hns3_dbg_register_debugfs(hns3_driver_name); 4862 4863 ret = hnae3_register_client(&client); 4864 if (ret) 4865 goto err_reg_client; 4866 4867 ret = pci_register_driver(&hns3_driver); 4868 if (ret) 4869 goto err_reg_driver; 4870 4871 return ret; 4872 4873 err_reg_driver: 4874 hnae3_unregister_client(&client); 4875 err_reg_client: 4876 hns3_dbg_unregister_debugfs(); 4877 return ret; 4878 } 4879 module_init(hns3_init_module); 4880 4881 /* hns3_exit_module - Driver exit cleanup routine 4882 * hns3_exit_module is called just before the driver is removed 4883 * from memory. 4884 */ 4885 static void __exit hns3_exit_module(void) 4886 { 4887 pci_unregister_driver(&hns3_driver); 4888 hnae3_unregister_client(&client); 4889 hns3_dbg_unregister_debugfs(); 4890 } 4891 module_exit(hns3_exit_module); 4892 4893 MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver"); 4894 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 4895 MODULE_LICENSE("GPL"); 4896 MODULE_ALIAS("pci:hns-nic"); 4897