1 /* 2 * Copyright (c) 2014-2015 Hisilicon Limited. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 */ 9 10 #include <linux/clk.h> 11 #include <linux/cpumask.h> 12 #include <linux/etherdevice.h> 13 #include <linux/if_vlan.h> 14 #include <linux/interrupt.h> 15 #include <linux/io.h> 16 #include <linux/ip.h> 17 #include <linux/ipv6.h> 18 #include <linux/module.h> 19 #include <linux/phy.h> 20 #include <linux/platform_device.h> 21 #include <linux/skbuff.h> 22 23 #include "hnae.h" 24 #include "hns_enet.h" 25 26 #define NIC_MAX_Q_PER_VF 16 27 #define HNS_NIC_TX_TIMEOUT (5 * HZ) 28 29 #define SERVICE_TIMER_HZ (1 * HZ) 30 31 #define NIC_TX_CLEAN_MAX_NUM 256 32 #define NIC_RX_CLEAN_MAX_NUM 64 33 34 #define RCB_IRQ_NOT_INITED 0 35 #define RCB_IRQ_INITED 1 36 #define HNS_BUFFER_SIZE_2048 2048 37 38 #define BD_MAX_SEND_SIZE 8191 39 #define SKB_TMP_LEN(SKB) \ 40 (((SKB)->transport_header - (SKB)->mac_header) + tcp_hdrlen(SKB)) 41 42 static void fill_v2_desc(struct hnae_ring *ring, void *priv, 43 int size, dma_addr_t dma, int frag_end, 44 int buf_num, enum hns_desc_type type, int mtu) 45 { 46 struct hnae_desc *desc = &ring->desc[ring->next_to_use]; 47 struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; 48 struct iphdr *iphdr; 49 struct ipv6hdr *ipv6hdr; 50 struct sk_buff *skb; 51 __be16 protocol; 52 u8 bn_pid = 0; 53 u8 rrcfv = 0; 54 u8 ip_offset = 0; 55 u8 tvsvsn = 0; 56 u16 mss = 0; 57 u8 l4_len = 0; 58 u16 paylen = 0; 59 60 desc_cb->priv = priv; 61 desc_cb->length = size; 62 desc_cb->dma = dma; 63 desc_cb->type = type; 64 65 desc->addr = cpu_to_le64(dma); 66 desc->tx.send_size = cpu_to_le16((u16)size); 67 68 /* config bd buffer end */ 69 hnae_set_bit(rrcfv, HNSV2_TXD_VLD_B, 1); 70 hnae_set_field(bn_pid, HNSV2_TXD_BUFNUM_M, 0, buf_num - 1); 71 72 /* fill port_id in the tx bd for sending management pkts */ 73 hnae_set_field(bn_pid, HNSV2_TXD_PORTID_M, 74 HNSV2_TXD_PORTID_S, ring->q->handle->dport_id); 75 76 if (type == DESC_TYPE_SKB) { 77 skb = (struct sk_buff *)priv; 78 79 if (skb->ip_summed == CHECKSUM_PARTIAL) { 80 skb_reset_mac_len(skb); 81 protocol = skb->protocol; 82 ip_offset = ETH_HLEN; 83 84 if (protocol == htons(ETH_P_8021Q)) { 85 ip_offset += VLAN_HLEN; 86 protocol = vlan_get_protocol(skb); 87 skb->protocol = protocol; 88 } 89 90 if (skb->protocol == htons(ETH_P_IP)) { 91 iphdr = ip_hdr(skb); 92 hnae_set_bit(rrcfv, HNSV2_TXD_L3CS_B, 1); 93 hnae_set_bit(rrcfv, HNSV2_TXD_L4CS_B, 1); 94 95 /* check for tcp/udp header */ 96 if (iphdr->protocol == IPPROTO_TCP && 97 skb_is_gso(skb)) { 98 hnae_set_bit(tvsvsn, 99 HNSV2_TXD_TSE_B, 1); 100 l4_len = tcp_hdrlen(skb); 101 mss = skb_shinfo(skb)->gso_size; 102 paylen = skb->len - SKB_TMP_LEN(skb); 103 } 104 } else if (skb->protocol == htons(ETH_P_IPV6)) { 105 hnae_set_bit(tvsvsn, HNSV2_TXD_IPV6_B, 1); 106 ipv6hdr = ipv6_hdr(skb); 107 hnae_set_bit(rrcfv, HNSV2_TXD_L4CS_B, 1); 108 109 /* check for tcp/udp header */ 110 if (ipv6hdr->nexthdr == IPPROTO_TCP && 111 skb_is_gso(skb) && skb_is_gso_v6(skb)) { 112 hnae_set_bit(tvsvsn, 113 HNSV2_TXD_TSE_B, 1); 114 l4_len = tcp_hdrlen(skb); 115 mss = skb_shinfo(skb)->gso_size; 116 paylen = skb->len - SKB_TMP_LEN(skb); 117 } 118 } 119 desc->tx.ip_offset = ip_offset; 120 desc->tx.tse_vlan_snap_v6_sctp_nth = tvsvsn; 121 desc->tx.mss = cpu_to_le16(mss); 122 desc->tx.l4_len = l4_len; 123 desc->tx.paylen = cpu_to_le16(paylen); 124 } 125 } 126 127 hnae_set_bit(rrcfv, HNSV2_TXD_FE_B, frag_end); 128 129 desc->tx.bn_pid = bn_pid; 130 desc->tx.ra_ri_cs_fe_vld = rrcfv; 131 132 ring_ptr_move_fw(ring, next_to_use); 133 } 134 135 static const struct acpi_device_id hns_enet_acpi_match[] = { 136 { "HISI00C1", 0 }, 137 { "HISI00C2", 0 }, 138 { }, 139 }; 140 MODULE_DEVICE_TABLE(acpi, hns_enet_acpi_match); 141 142 static void fill_desc(struct hnae_ring *ring, void *priv, 143 int size, dma_addr_t dma, int frag_end, 144 int buf_num, enum hns_desc_type type, int mtu) 145 { 146 struct hnae_desc *desc = &ring->desc[ring->next_to_use]; 147 struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; 148 struct sk_buff *skb; 149 __be16 protocol; 150 u32 ip_offset; 151 u32 asid_bufnum_pid = 0; 152 u32 flag_ipoffset = 0; 153 154 desc_cb->priv = priv; 155 desc_cb->length = size; 156 desc_cb->dma = dma; 157 desc_cb->type = type; 158 159 desc->addr = cpu_to_le64(dma); 160 desc->tx.send_size = cpu_to_le16((u16)size); 161 162 /*config bd buffer end */ 163 flag_ipoffset |= 1 << HNS_TXD_VLD_B; 164 165 asid_bufnum_pid |= buf_num << HNS_TXD_BUFNUM_S; 166 167 if (type == DESC_TYPE_SKB) { 168 skb = (struct sk_buff *)priv; 169 170 if (skb->ip_summed == CHECKSUM_PARTIAL) { 171 protocol = skb->protocol; 172 ip_offset = ETH_HLEN; 173 174 /*if it is a SW VLAN check the next protocol*/ 175 if (protocol == htons(ETH_P_8021Q)) { 176 ip_offset += VLAN_HLEN; 177 protocol = vlan_get_protocol(skb); 178 skb->protocol = protocol; 179 } 180 181 if (skb->protocol == htons(ETH_P_IP)) { 182 flag_ipoffset |= 1 << HNS_TXD_L3CS_B; 183 /* check for tcp/udp header */ 184 flag_ipoffset |= 1 << HNS_TXD_L4CS_B; 185 186 } else if (skb->protocol == htons(ETH_P_IPV6)) { 187 /* ipv6 has not l3 cs, check for L4 header */ 188 flag_ipoffset |= 1 << HNS_TXD_L4CS_B; 189 } 190 191 flag_ipoffset |= ip_offset << HNS_TXD_IPOFFSET_S; 192 } 193 } 194 195 flag_ipoffset |= frag_end << HNS_TXD_FE_B; 196 197 desc->tx.asid_bufnum_pid = cpu_to_le16(asid_bufnum_pid); 198 desc->tx.flag_ipoffset = cpu_to_le32(flag_ipoffset); 199 200 ring_ptr_move_fw(ring, next_to_use); 201 } 202 203 static void unfill_desc(struct hnae_ring *ring) 204 { 205 ring_ptr_move_bw(ring, next_to_use); 206 } 207 208 static int hns_nic_maybe_stop_tx( 209 struct sk_buff **out_skb, int *bnum, struct hnae_ring *ring) 210 { 211 struct sk_buff *skb = *out_skb; 212 struct sk_buff *new_skb = NULL; 213 int buf_num; 214 215 /* no. of segments (plus a header) */ 216 buf_num = skb_shinfo(skb)->nr_frags + 1; 217 218 if (unlikely(buf_num > ring->max_desc_num_per_pkt)) { 219 if (ring_space(ring) < 1) 220 return -EBUSY; 221 222 new_skb = skb_copy(skb, GFP_ATOMIC); 223 if (!new_skb) 224 return -ENOMEM; 225 226 dev_kfree_skb_any(skb); 227 *out_skb = new_skb; 228 buf_num = 1; 229 } else if (buf_num > ring_space(ring)) { 230 return -EBUSY; 231 } 232 233 *bnum = buf_num; 234 return 0; 235 } 236 237 static int hns_nic_maybe_stop_tso( 238 struct sk_buff **out_skb, int *bnum, struct hnae_ring *ring) 239 { 240 int i; 241 int size; 242 int buf_num; 243 int frag_num; 244 struct sk_buff *skb = *out_skb; 245 struct sk_buff *new_skb = NULL; 246 struct skb_frag_struct *frag; 247 248 size = skb_headlen(skb); 249 buf_num = (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE; 250 251 frag_num = skb_shinfo(skb)->nr_frags; 252 for (i = 0; i < frag_num; i++) { 253 frag = &skb_shinfo(skb)->frags[i]; 254 size = skb_frag_size(frag); 255 buf_num += (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE; 256 } 257 258 if (unlikely(buf_num > ring->max_desc_num_per_pkt)) { 259 buf_num = (skb->len + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE; 260 if (ring_space(ring) < buf_num) 261 return -EBUSY; 262 /* manual split the send packet */ 263 new_skb = skb_copy(skb, GFP_ATOMIC); 264 if (!new_skb) 265 return -ENOMEM; 266 dev_kfree_skb_any(skb); 267 *out_skb = new_skb; 268 269 } else if (ring_space(ring) < buf_num) { 270 return -EBUSY; 271 } 272 273 *bnum = buf_num; 274 return 0; 275 } 276 277 static void fill_tso_desc(struct hnae_ring *ring, void *priv, 278 int size, dma_addr_t dma, int frag_end, 279 int buf_num, enum hns_desc_type type, int mtu) 280 { 281 int frag_buf_num; 282 int sizeoflast; 283 int k; 284 285 frag_buf_num = (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE; 286 sizeoflast = size % BD_MAX_SEND_SIZE; 287 sizeoflast = sizeoflast ? sizeoflast : BD_MAX_SEND_SIZE; 288 289 /* when the frag size is bigger than hardware, split this frag */ 290 for (k = 0; k < frag_buf_num; k++) 291 fill_v2_desc(ring, priv, 292 (k == frag_buf_num - 1) ? 293 sizeoflast : BD_MAX_SEND_SIZE, 294 dma + BD_MAX_SEND_SIZE * k, 295 frag_end && (k == frag_buf_num - 1) ? 1 : 0, 296 buf_num, 297 (type == DESC_TYPE_SKB && !k) ? 298 DESC_TYPE_SKB : DESC_TYPE_PAGE, 299 mtu); 300 } 301 302 int hns_nic_net_xmit_hw(struct net_device *ndev, 303 struct sk_buff *skb, 304 struct hns_nic_ring_data *ring_data) 305 { 306 struct hns_nic_priv *priv = netdev_priv(ndev); 307 struct device *dev = priv->dev; 308 struct hnae_ring *ring = ring_data->ring; 309 struct netdev_queue *dev_queue; 310 struct skb_frag_struct *frag; 311 int buf_num; 312 int seg_num; 313 dma_addr_t dma; 314 int size, next_to_use; 315 int i; 316 317 switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) { 318 case -EBUSY: 319 ring->stats.tx_busy++; 320 goto out_net_tx_busy; 321 case -ENOMEM: 322 ring->stats.sw_err_cnt++; 323 netdev_err(ndev, "no memory to xmit!\n"); 324 goto out_err_tx_ok; 325 default: 326 break; 327 } 328 329 /* no. of segments (plus a header) */ 330 seg_num = skb_shinfo(skb)->nr_frags + 1; 331 next_to_use = ring->next_to_use; 332 333 /* fill the first part */ 334 size = skb_headlen(skb); 335 dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); 336 if (dma_mapping_error(dev, dma)) { 337 netdev_err(ndev, "TX head DMA map failed\n"); 338 ring->stats.sw_err_cnt++; 339 goto out_err_tx_ok; 340 } 341 priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0, 342 buf_num, DESC_TYPE_SKB, ndev->mtu); 343 344 /* fill the fragments */ 345 for (i = 1; i < seg_num; i++) { 346 frag = &skb_shinfo(skb)->frags[i - 1]; 347 size = skb_frag_size(frag); 348 dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE); 349 if (dma_mapping_error(dev, dma)) { 350 netdev_err(ndev, "TX frag(%d) DMA map failed\n", i); 351 ring->stats.sw_err_cnt++; 352 goto out_map_frag_fail; 353 } 354 priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma, 355 seg_num - 1 == i ? 1 : 0, buf_num, 356 DESC_TYPE_PAGE, ndev->mtu); 357 } 358 359 /*complete translate all packets*/ 360 dev_queue = netdev_get_tx_queue(ndev, skb->queue_mapping); 361 netdev_tx_sent_queue(dev_queue, skb->len); 362 363 wmb(); /* commit all data before submit */ 364 assert(skb->queue_mapping < priv->ae_handle->q_num); 365 hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num); 366 ring->stats.tx_pkts++; 367 ring->stats.tx_bytes += skb->len; 368 369 return NETDEV_TX_OK; 370 371 out_map_frag_fail: 372 373 while (ring->next_to_use != next_to_use) { 374 unfill_desc(ring); 375 if (ring->next_to_use != next_to_use) 376 dma_unmap_page(dev, 377 ring->desc_cb[ring->next_to_use].dma, 378 ring->desc_cb[ring->next_to_use].length, 379 DMA_TO_DEVICE); 380 else 381 dma_unmap_single(dev, 382 ring->desc_cb[next_to_use].dma, 383 ring->desc_cb[next_to_use].length, 384 DMA_TO_DEVICE); 385 } 386 387 out_err_tx_ok: 388 389 dev_kfree_skb_any(skb); 390 return NETDEV_TX_OK; 391 392 out_net_tx_busy: 393 394 netif_stop_subqueue(ndev, skb->queue_mapping); 395 396 /* Herbert's original patch had: 397 * smp_mb__after_netif_stop_queue(); 398 * but since that doesn't exist yet, just open code it. 399 */ 400 smp_mb(); 401 return NETDEV_TX_BUSY; 402 } 403 404 /** 405 * hns_nic_get_headlen - determine size of header for RSC/LRO/GRO/FCOE 406 * @data: pointer to the start of the headers 407 * @max: total length of section to find headers in 408 * 409 * This function is meant to determine the length of headers that will 410 * be recognized by hardware for LRO, GRO, and RSC offloads. The main 411 * motivation of doing this is to only perform one pull for IPv4 TCP 412 * packets so that we can do basic things like calculating the gso_size 413 * based on the average data per packet. 414 **/ 415 static unsigned int hns_nic_get_headlen(unsigned char *data, u32 flag, 416 unsigned int max_size) 417 { 418 unsigned char *network; 419 u8 hlen; 420 421 /* this should never happen, but better safe than sorry */ 422 if (max_size < ETH_HLEN) 423 return max_size; 424 425 /* initialize network frame pointer */ 426 network = data; 427 428 /* set first protocol and move network header forward */ 429 network += ETH_HLEN; 430 431 /* handle any vlan tag if present */ 432 if (hnae_get_field(flag, HNS_RXD_VLAN_M, HNS_RXD_VLAN_S) 433 == HNS_RX_FLAG_VLAN_PRESENT) { 434 if ((typeof(max_size))(network - data) > (max_size - VLAN_HLEN)) 435 return max_size; 436 437 network += VLAN_HLEN; 438 } 439 440 /* handle L3 protocols */ 441 if (hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S) 442 == HNS_RX_FLAG_L3ID_IPV4) { 443 if ((typeof(max_size))(network - data) > 444 (max_size - sizeof(struct iphdr))) 445 return max_size; 446 447 /* access ihl as a u8 to avoid unaligned access on ia64 */ 448 hlen = (network[0] & 0x0F) << 2; 449 450 /* verify hlen meets minimum size requirements */ 451 if (hlen < sizeof(struct iphdr)) 452 return network - data; 453 454 /* record next protocol if header is present */ 455 } else if (hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S) 456 == HNS_RX_FLAG_L3ID_IPV6) { 457 if ((typeof(max_size))(network - data) > 458 (max_size - sizeof(struct ipv6hdr))) 459 return max_size; 460 461 /* record next protocol */ 462 hlen = sizeof(struct ipv6hdr); 463 } else { 464 return network - data; 465 } 466 467 /* relocate pointer to start of L4 header */ 468 network += hlen; 469 470 /* finally sort out TCP/UDP */ 471 if (hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S) 472 == HNS_RX_FLAG_L4ID_TCP) { 473 if ((typeof(max_size))(network - data) > 474 (max_size - sizeof(struct tcphdr))) 475 return max_size; 476 477 /* access doff as a u8 to avoid unaligned access on ia64 */ 478 hlen = (network[12] & 0xF0) >> 2; 479 480 /* verify hlen meets minimum size requirements */ 481 if (hlen < sizeof(struct tcphdr)) 482 return network - data; 483 484 network += hlen; 485 } else if (hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S) 486 == HNS_RX_FLAG_L4ID_UDP) { 487 if ((typeof(max_size))(network - data) > 488 (max_size - sizeof(struct udphdr))) 489 return max_size; 490 491 network += sizeof(struct udphdr); 492 } 493 494 /* If everything has gone correctly network should be the 495 * data section of the packet and will be the end of the header. 496 * If not then it probably represents the end of the last recognized 497 * header. 498 */ 499 if ((typeof(max_size))(network - data) < max_size) 500 return network - data; 501 else 502 return max_size; 503 } 504 505 static void hns_nic_reuse_page(struct sk_buff *skb, int i, 506 struct hnae_ring *ring, int pull_len, 507 struct hnae_desc_cb *desc_cb) 508 { 509 struct hnae_desc *desc; 510 int truesize, size; 511 int last_offset; 512 bool twobufs; 513 514 twobufs = ((PAGE_SIZE < 8192) && hnae_buf_size(ring) == HNS_BUFFER_SIZE_2048); 515 516 desc = &ring->desc[ring->next_to_clean]; 517 size = le16_to_cpu(desc->rx.size); 518 519 if (twobufs) { 520 truesize = hnae_buf_size(ring); 521 } else { 522 truesize = ALIGN(size, L1_CACHE_BYTES); 523 last_offset = hnae_page_size(ring) - hnae_buf_size(ring); 524 } 525 526 skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len, 527 size - pull_len, truesize - pull_len); 528 529 /* avoid re-using remote pages,flag default unreuse */ 530 if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id())) 531 return; 532 533 if (twobufs) { 534 /* if we are only owner of page we can reuse it */ 535 if (likely(page_count(desc_cb->priv) == 1)) { 536 /* flip page offset to other buffer */ 537 desc_cb->page_offset ^= truesize; 538 539 desc_cb->reuse_flag = 1; 540 /* bump ref count on page before it is given*/ 541 get_page(desc_cb->priv); 542 } 543 return; 544 } 545 546 /* move offset up to the next cache line */ 547 desc_cb->page_offset += truesize; 548 549 if (desc_cb->page_offset <= last_offset) { 550 desc_cb->reuse_flag = 1; 551 /* bump ref count on page before it is given*/ 552 get_page(desc_cb->priv); 553 } 554 } 555 556 static void get_v2rx_desc_bnum(u32 bnum_flag, int *out_bnum) 557 { 558 *out_bnum = hnae_get_field(bnum_flag, 559 HNS_RXD_BUFNUM_M, HNS_RXD_BUFNUM_S) + 1; 560 } 561 562 static void get_rx_desc_bnum(u32 bnum_flag, int *out_bnum) 563 { 564 *out_bnum = hnae_get_field(bnum_flag, 565 HNS_RXD_BUFNUM_M, HNS_RXD_BUFNUM_S); 566 } 567 568 static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data, 569 struct sk_buff **out_skb, int *out_bnum) 570 { 571 struct hnae_ring *ring = ring_data->ring; 572 struct net_device *ndev = ring_data->napi.dev; 573 struct hns_nic_priv *priv = netdev_priv(ndev); 574 struct sk_buff *skb; 575 struct hnae_desc *desc; 576 struct hnae_desc_cb *desc_cb; 577 unsigned char *va; 578 int bnum, length, i; 579 int pull_len; 580 u32 bnum_flag; 581 582 desc = &ring->desc[ring->next_to_clean]; 583 desc_cb = &ring->desc_cb[ring->next_to_clean]; 584 585 prefetch(desc); 586 587 va = (unsigned char *)desc_cb->buf + desc_cb->page_offset; 588 589 /* prefetch first cache line of first page */ 590 prefetch(va); 591 #if L1_CACHE_BYTES < 128 592 prefetch(va + L1_CACHE_BYTES); 593 #endif 594 595 skb = *out_skb = napi_alloc_skb(&ring_data->napi, 596 HNS_RX_HEAD_SIZE); 597 if (unlikely(!skb)) { 598 netdev_err(ndev, "alloc rx skb fail\n"); 599 ring->stats.sw_err_cnt++; 600 return -ENOMEM; 601 } 602 603 prefetchw(skb->data); 604 length = le16_to_cpu(desc->rx.pkt_len); 605 bnum_flag = le32_to_cpu(desc->rx.ipoff_bnum_pid_flag); 606 priv->ops.get_rxd_bnum(bnum_flag, &bnum); 607 *out_bnum = bnum; 608 609 if (length <= HNS_RX_HEAD_SIZE) { 610 memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long))); 611 612 /* we can reuse buffer as-is, just make sure it is local */ 613 if (likely(page_to_nid(desc_cb->priv) == numa_node_id())) 614 desc_cb->reuse_flag = 1; 615 else /* this page cannot be reused so discard it */ 616 put_page(desc_cb->priv); 617 618 ring_ptr_move_fw(ring, next_to_clean); 619 620 if (unlikely(bnum != 1)) { /* check err*/ 621 *out_bnum = 1; 622 goto out_bnum_err; 623 } 624 } else { 625 ring->stats.seg_pkt_cnt++; 626 627 pull_len = hns_nic_get_headlen(va, bnum_flag, HNS_RX_HEAD_SIZE); 628 memcpy(__skb_put(skb, pull_len), va, 629 ALIGN(pull_len, sizeof(long))); 630 631 hns_nic_reuse_page(skb, 0, ring, pull_len, desc_cb); 632 ring_ptr_move_fw(ring, next_to_clean); 633 634 if (unlikely(bnum >= (int)MAX_SKB_FRAGS)) { /* check err*/ 635 *out_bnum = 1; 636 goto out_bnum_err; 637 } 638 for (i = 1; i < bnum; i++) { 639 desc = &ring->desc[ring->next_to_clean]; 640 desc_cb = &ring->desc_cb[ring->next_to_clean]; 641 642 hns_nic_reuse_page(skb, i, ring, 0, desc_cb); 643 ring_ptr_move_fw(ring, next_to_clean); 644 } 645 } 646 647 /* check except process, free skb and jump the desc */ 648 if (unlikely((!bnum) || (bnum > ring->max_desc_num_per_pkt))) { 649 out_bnum_err: 650 *out_bnum = *out_bnum ? *out_bnum : 1; /* ntc moved,cannot 0*/ 651 netdev_err(ndev, "invalid bnum(%d,%d,%d,%d),%016llx,%016llx\n", 652 bnum, ring->max_desc_num_per_pkt, 653 length, (int)MAX_SKB_FRAGS, 654 ((u64 *)desc)[0], ((u64 *)desc)[1]); 655 ring->stats.err_bd_num++; 656 dev_kfree_skb_any(skb); 657 return -EDOM; 658 } 659 660 bnum_flag = le32_to_cpu(desc->rx.ipoff_bnum_pid_flag); 661 662 if (unlikely(!hnae_get_bit(bnum_flag, HNS_RXD_VLD_B))) { 663 netdev_err(ndev, "no valid bd,%016llx,%016llx\n", 664 ((u64 *)desc)[0], ((u64 *)desc)[1]); 665 ring->stats.non_vld_descs++; 666 dev_kfree_skb_any(skb); 667 return -EINVAL; 668 } 669 670 if (unlikely((!desc->rx.pkt_len) || 671 hnae_get_bit(bnum_flag, HNS_RXD_DROP_B))) { 672 ring->stats.err_pkt_len++; 673 dev_kfree_skb_any(skb); 674 return -EFAULT; 675 } 676 677 if (unlikely(hnae_get_bit(bnum_flag, HNS_RXD_L2E_B))) { 678 ring->stats.l2_err++; 679 dev_kfree_skb_any(skb); 680 return -EFAULT; 681 } 682 683 ring->stats.rx_pkts++; 684 ring->stats.rx_bytes += skb->len; 685 686 if (unlikely(hnae_get_bit(bnum_flag, HNS_RXD_L3E_B) || 687 hnae_get_bit(bnum_flag, HNS_RXD_L4E_B))) { 688 ring->stats.l3l4_csum_err++; 689 return 0; 690 } 691 692 skb->ip_summed = CHECKSUM_UNNECESSARY; 693 694 return 0; 695 } 696 697 static void 698 hns_nic_alloc_rx_buffers(struct hns_nic_ring_data *ring_data, int cleand_count) 699 { 700 int i, ret; 701 struct hnae_desc_cb res_cbs; 702 struct hnae_desc_cb *desc_cb; 703 struct hnae_ring *ring = ring_data->ring; 704 struct net_device *ndev = ring_data->napi.dev; 705 706 for (i = 0; i < cleand_count; i++) { 707 desc_cb = &ring->desc_cb[ring->next_to_use]; 708 if (desc_cb->reuse_flag) { 709 ring->stats.reuse_pg_cnt++; 710 hnae_reuse_buffer(ring, ring->next_to_use); 711 } else { 712 ret = hnae_reserve_buffer_map(ring, &res_cbs); 713 if (ret) { 714 ring->stats.sw_err_cnt++; 715 netdev_err(ndev, "hnae reserve buffer map failed.\n"); 716 break; 717 } 718 hnae_replace_buffer(ring, ring->next_to_use, &res_cbs); 719 } 720 721 ring_ptr_move_fw(ring, next_to_use); 722 } 723 724 wmb(); /* make all data has been write before submit */ 725 writel_relaxed(i, ring->io_base + RCB_REG_HEAD); 726 } 727 728 /* return error number for error or number of desc left to take 729 */ 730 static void hns_nic_rx_up_pro(struct hns_nic_ring_data *ring_data, 731 struct sk_buff *skb) 732 { 733 struct net_device *ndev = ring_data->napi.dev; 734 735 skb->protocol = eth_type_trans(skb, ndev); 736 (void)napi_gro_receive(&ring_data->napi, skb); 737 ndev->last_rx = jiffies; 738 } 739 740 static int hns_desc_unused(struct hnae_ring *ring) 741 { 742 int ntc = ring->next_to_clean; 743 int ntu = ring->next_to_use; 744 745 return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu; 746 } 747 748 static int hns_nic_rx_poll_one(struct hns_nic_ring_data *ring_data, 749 int budget, void *v) 750 { 751 struct hnae_ring *ring = ring_data->ring; 752 struct sk_buff *skb; 753 int num, bnum; 754 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16 755 int recv_pkts, recv_bds, clean_count, err; 756 int unused_count = hns_desc_unused(ring); 757 758 num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM); 759 rmb(); /* make sure num taken effect before the other data is touched */ 760 761 recv_pkts = 0, recv_bds = 0, clean_count = 0; 762 num -= unused_count; 763 764 while (recv_pkts < budget && recv_bds < num) { 765 /* reuse or realloc buffers */ 766 if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) { 767 hns_nic_alloc_rx_buffers(ring_data, 768 clean_count + unused_count); 769 clean_count = 0; 770 unused_count = hns_desc_unused(ring); 771 } 772 773 /* poll one pkt */ 774 err = hns_nic_poll_rx_skb(ring_data, &skb, &bnum); 775 if (unlikely(!skb)) /* this fault cannot be repaired */ 776 goto out; 777 778 recv_bds += bnum; 779 clean_count += bnum; 780 if (unlikely(err)) { /* do jump the err */ 781 recv_pkts++; 782 continue; 783 } 784 785 /* do update ip stack process*/ 786 ((void (*)(struct hns_nic_ring_data *, struct sk_buff *))v)( 787 ring_data, skb); 788 recv_pkts++; 789 } 790 791 out: 792 /* make all data has been write before submit */ 793 if (clean_count + unused_count > 0) 794 hns_nic_alloc_rx_buffers(ring_data, 795 clean_count + unused_count); 796 797 return recv_pkts; 798 } 799 800 static void hns_nic_rx_fini_pro(struct hns_nic_ring_data *ring_data) 801 { 802 struct hnae_ring *ring = ring_data->ring; 803 int num = 0; 804 805 ring_data->ring->q->handle->dev->ops->toggle_ring_irq(ring, 0); 806 807 /* for hardware bug fixed */ 808 num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM); 809 810 if (num > 0) { 811 ring_data->ring->q->handle->dev->ops->toggle_ring_irq( 812 ring_data->ring, 1); 813 814 napi_schedule(&ring_data->napi); 815 } 816 } 817 818 static void hns_nic_rx_fini_pro_v2(struct hns_nic_ring_data *ring_data) 819 { 820 struct hnae_ring *ring = ring_data->ring; 821 int num = 0; 822 823 num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM); 824 825 if (num == 0) 826 ring_data->ring->q->handle->dev->ops->toggle_ring_irq( 827 ring, 0); 828 else 829 napi_schedule(&ring_data->napi); 830 } 831 832 static inline void hns_nic_reclaim_one_desc(struct hnae_ring *ring, 833 int *bytes, int *pkts) 834 { 835 struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean]; 836 837 (*pkts) += (desc_cb->type == DESC_TYPE_SKB); 838 (*bytes) += desc_cb->length; 839 /* desc_cb will be cleaned, after hnae_free_buffer_detach*/ 840 hnae_free_buffer_detach(ring, ring->next_to_clean); 841 842 ring_ptr_move_fw(ring, next_to_clean); 843 } 844 845 static int is_valid_clean_head(struct hnae_ring *ring, int h) 846 { 847 int u = ring->next_to_use; 848 int c = ring->next_to_clean; 849 850 if (unlikely(h > ring->desc_num)) 851 return 0; 852 853 assert(u > 0 && u < ring->desc_num); 854 assert(c > 0 && c < ring->desc_num); 855 assert(u != c && h != c); /* must be checked before call this func */ 856 857 return u > c ? (h > c && h <= u) : (h > c || h <= u); 858 } 859 860 /* netif_tx_lock will turn down the performance, set only when necessary */ 861 #ifdef CONFIG_NET_POLL_CONTROLLER 862 #define NETIF_TX_LOCK(ndev) netif_tx_lock(ndev) 863 #define NETIF_TX_UNLOCK(ndev) netif_tx_unlock(ndev) 864 #else 865 #define NETIF_TX_LOCK(ndev) 866 #define NETIF_TX_UNLOCK(ndev) 867 #endif 868 /* reclaim all desc in one budget 869 * return error or number of desc left 870 */ 871 static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data, 872 int budget, void *v) 873 { 874 struct hnae_ring *ring = ring_data->ring; 875 struct net_device *ndev = ring_data->napi.dev; 876 struct netdev_queue *dev_queue; 877 struct hns_nic_priv *priv = netdev_priv(ndev); 878 int head; 879 int bytes, pkts; 880 881 NETIF_TX_LOCK(ndev); 882 883 head = readl_relaxed(ring->io_base + RCB_REG_HEAD); 884 rmb(); /* make sure head is ready before touch any data */ 885 886 if (is_ring_empty(ring) || head == ring->next_to_clean) { 887 NETIF_TX_UNLOCK(ndev); 888 return 0; /* no data to poll */ 889 } 890 891 if (!is_valid_clean_head(ring, head)) { 892 netdev_err(ndev, "wrong head (%d, %d-%d)\n", head, 893 ring->next_to_use, ring->next_to_clean); 894 ring->stats.io_err_cnt++; 895 NETIF_TX_UNLOCK(ndev); 896 return -EIO; 897 } 898 899 bytes = 0; 900 pkts = 0; 901 while (head != ring->next_to_clean) { 902 hns_nic_reclaim_one_desc(ring, &bytes, &pkts); 903 /* issue prefetch for next Tx descriptor */ 904 prefetch(&ring->desc_cb[ring->next_to_clean]); 905 } 906 907 NETIF_TX_UNLOCK(ndev); 908 909 dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index); 910 netdev_tx_completed_queue(dev_queue, pkts, bytes); 911 912 if (unlikely(priv->link && !netif_carrier_ok(ndev))) 913 netif_carrier_on(ndev); 914 915 if (unlikely(pkts && netif_carrier_ok(ndev) && 916 (ring_space(ring) >= ring->max_desc_num_per_pkt * 2))) { 917 /* Make sure that anybody stopping the queue after this 918 * sees the new next_to_clean. 919 */ 920 smp_mb(); 921 if (netif_tx_queue_stopped(dev_queue) && 922 !test_bit(NIC_STATE_DOWN, &priv->state)) { 923 netif_tx_wake_queue(dev_queue); 924 ring->stats.restart_queue++; 925 } 926 } 927 return 0; 928 } 929 930 static void hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data) 931 { 932 struct hnae_ring *ring = ring_data->ring; 933 int head; 934 935 ring_data->ring->q->handle->dev->ops->toggle_ring_irq(ring, 0); 936 937 head = readl_relaxed(ring->io_base + RCB_REG_HEAD); 938 939 if (head != ring->next_to_clean) { 940 ring_data->ring->q->handle->dev->ops->toggle_ring_irq( 941 ring_data->ring, 1); 942 943 napi_schedule(&ring_data->napi); 944 } 945 } 946 947 static void hns_nic_tx_fini_pro_v2(struct hns_nic_ring_data *ring_data) 948 { 949 struct hnae_ring *ring = ring_data->ring; 950 int head = readl_relaxed(ring->io_base + RCB_REG_HEAD); 951 952 if (head == ring->next_to_clean) 953 ring_data->ring->q->handle->dev->ops->toggle_ring_irq( 954 ring, 0); 955 else 956 napi_schedule(&ring_data->napi); 957 } 958 959 static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data) 960 { 961 struct hnae_ring *ring = ring_data->ring; 962 struct net_device *ndev = ring_data->napi.dev; 963 struct netdev_queue *dev_queue; 964 int head; 965 int bytes, pkts; 966 967 NETIF_TX_LOCK(ndev); 968 969 head = ring->next_to_use; /* ntu :soft setted ring position*/ 970 bytes = 0; 971 pkts = 0; 972 while (head != ring->next_to_clean) 973 hns_nic_reclaim_one_desc(ring, &bytes, &pkts); 974 975 NETIF_TX_UNLOCK(ndev); 976 977 dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index); 978 netdev_tx_reset_queue(dev_queue); 979 } 980 981 static int hns_nic_common_poll(struct napi_struct *napi, int budget) 982 { 983 struct hns_nic_ring_data *ring_data = 984 container_of(napi, struct hns_nic_ring_data, napi); 985 int clean_complete = ring_data->poll_one( 986 ring_data, budget, ring_data->ex_process); 987 988 if (clean_complete >= 0 && clean_complete < budget) { 989 napi_complete(napi); 990 ring_data->fini_process(ring_data); 991 return 0; 992 } 993 994 return clean_complete; 995 } 996 997 static irqreturn_t hns_irq_handle(int irq, void *dev) 998 { 999 struct hns_nic_ring_data *ring_data = (struct hns_nic_ring_data *)dev; 1000 1001 ring_data->ring->q->handle->dev->ops->toggle_ring_irq( 1002 ring_data->ring, 1); 1003 napi_schedule(&ring_data->napi); 1004 1005 return IRQ_HANDLED; 1006 } 1007 1008 /** 1009 *hns_nic_adjust_link - adjust net work mode by the phy stat or new param 1010 *@ndev: net device 1011 */ 1012 static void hns_nic_adjust_link(struct net_device *ndev) 1013 { 1014 struct hns_nic_priv *priv = netdev_priv(ndev); 1015 struct hnae_handle *h = priv->ae_handle; 1016 int state = 1; 1017 1018 if (ndev->phydev) { 1019 h->dev->ops->adjust_link(h, ndev->phydev->speed, 1020 ndev->phydev->duplex); 1021 state = ndev->phydev->link; 1022 } 1023 state = state && h->dev->ops->get_status(h); 1024 1025 if (state != priv->link) { 1026 if (state) { 1027 netif_carrier_on(ndev); 1028 netif_tx_wake_all_queues(ndev); 1029 netdev_info(ndev, "link up\n"); 1030 } else { 1031 netif_carrier_off(ndev); 1032 netdev_info(ndev, "link down\n"); 1033 } 1034 priv->link = state; 1035 } 1036 } 1037 1038 /** 1039 *hns_nic_init_phy - init phy 1040 *@ndev: net device 1041 *@h: ae handle 1042 * Return 0 on success, negative on failure 1043 */ 1044 int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h) 1045 { 1046 struct phy_device *phy_dev = h->phy_dev; 1047 int ret; 1048 1049 if (!h->phy_dev) 1050 return 0; 1051 1052 if (h->phy_if != PHY_INTERFACE_MODE_XGMII) { 1053 phy_dev->dev_flags = 0; 1054 1055 ret = phy_connect_direct(ndev, phy_dev, hns_nic_adjust_link, 1056 h->phy_if); 1057 } else { 1058 ret = phy_attach_direct(ndev, phy_dev, 0, h->phy_if); 1059 } 1060 if (unlikely(ret)) 1061 return -ENODEV; 1062 1063 phy_dev->supported &= h->if_support; 1064 phy_dev->advertising = phy_dev->supported; 1065 1066 if (h->phy_if == PHY_INTERFACE_MODE_XGMII) 1067 phy_dev->autoneg = false; 1068 1069 return 0; 1070 } 1071 1072 static int hns_nic_ring_open(struct net_device *netdev, int idx) 1073 { 1074 struct hns_nic_priv *priv = netdev_priv(netdev); 1075 struct hnae_handle *h = priv->ae_handle; 1076 1077 napi_enable(&priv->ring_data[idx].napi); 1078 1079 enable_irq(priv->ring_data[idx].ring->irq); 1080 h->dev->ops->toggle_ring_irq(priv->ring_data[idx].ring, 0); 1081 1082 return 0; 1083 } 1084 1085 static int hns_nic_net_set_mac_address(struct net_device *ndev, void *p) 1086 { 1087 struct hns_nic_priv *priv = netdev_priv(ndev); 1088 struct hnae_handle *h = priv->ae_handle; 1089 struct sockaddr *mac_addr = p; 1090 int ret; 1091 1092 if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data)) 1093 return -EADDRNOTAVAIL; 1094 1095 ret = h->dev->ops->set_mac_addr(h, mac_addr->sa_data); 1096 if (ret) { 1097 netdev_err(ndev, "set_mac_address fail, ret=%d!\n", ret); 1098 return ret; 1099 } 1100 1101 memcpy(ndev->dev_addr, mac_addr->sa_data, ndev->addr_len); 1102 1103 return 0; 1104 } 1105 1106 void hns_nic_update_stats(struct net_device *netdev) 1107 { 1108 struct hns_nic_priv *priv = netdev_priv(netdev); 1109 struct hnae_handle *h = priv->ae_handle; 1110 1111 h->dev->ops->update_stats(h, &netdev->stats); 1112 } 1113 1114 /* set mac addr if it is configed. or leave it to the AE driver */ 1115 static void hns_init_mac_addr(struct net_device *ndev) 1116 { 1117 struct hns_nic_priv *priv = netdev_priv(ndev); 1118 1119 if (!device_get_mac_address(priv->dev, ndev->dev_addr, ETH_ALEN)) { 1120 eth_hw_addr_random(ndev); 1121 dev_warn(priv->dev, "No valid mac, use random mac %pM", 1122 ndev->dev_addr); 1123 } 1124 } 1125 1126 static void hns_nic_ring_close(struct net_device *netdev, int idx) 1127 { 1128 struct hns_nic_priv *priv = netdev_priv(netdev); 1129 struct hnae_handle *h = priv->ae_handle; 1130 1131 h->dev->ops->toggle_ring_irq(priv->ring_data[idx].ring, 1); 1132 disable_irq(priv->ring_data[idx].ring->irq); 1133 1134 napi_disable(&priv->ring_data[idx].napi); 1135 } 1136 1137 static void hns_set_irq_affinity(struct hns_nic_priv *priv) 1138 { 1139 struct hnae_handle *h = priv->ae_handle; 1140 struct hns_nic_ring_data *rd; 1141 int i; 1142 int cpu; 1143 cpumask_t mask; 1144 1145 /*diffrent irq banlance for 16core and 32core*/ 1146 if (h->q_num == num_possible_cpus()) { 1147 for (i = 0; i < h->q_num * 2; i++) { 1148 rd = &priv->ring_data[i]; 1149 if (cpu_online(rd->queue_index)) { 1150 cpumask_clear(&mask); 1151 cpu = rd->queue_index; 1152 cpumask_set_cpu(cpu, &mask); 1153 (void)irq_set_affinity_hint(rd->ring->irq, 1154 &mask); 1155 } 1156 } 1157 } else { 1158 for (i = 0; i < h->q_num; i++) { 1159 rd = &priv->ring_data[i]; 1160 if (cpu_online(rd->queue_index * 2)) { 1161 cpumask_clear(&mask); 1162 cpu = rd->queue_index * 2; 1163 cpumask_set_cpu(cpu, &mask); 1164 (void)irq_set_affinity_hint(rd->ring->irq, 1165 &mask); 1166 } 1167 } 1168 1169 for (i = h->q_num; i < h->q_num * 2; i++) { 1170 rd = &priv->ring_data[i]; 1171 if (cpu_online(rd->queue_index * 2 + 1)) { 1172 cpumask_clear(&mask); 1173 cpu = rd->queue_index * 2 + 1; 1174 cpumask_set_cpu(cpu, &mask); 1175 (void)irq_set_affinity_hint(rd->ring->irq, 1176 &mask); 1177 } 1178 } 1179 } 1180 } 1181 1182 static int hns_nic_init_irq(struct hns_nic_priv *priv) 1183 { 1184 struct hnae_handle *h = priv->ae_handle; 1185 struct hns_nic_ring_data *rd; 1186 int i; 1187 int ret; 1188 1189 for (i = 0; i < h->q_num * 2; i++) { 1190 rd = &priv->ring_data[i]; 1191 1192 if (rd->ring->irq_init_flag == RCB_IRQ_INITED) 1193 break; 1194 1195 snprintf(rd->ring->ring_name, RCB_RING_NAME_LEN, 1196 "%s-%s%d", priv->netdev->name, 1197 (i < h->q_num ? "tx" : "rx"), rd->queue_index); 1198 1199 rd->ring->ring_name[RCB_RING_NAME_LEN - 1] = '\0'; 1200 1201 ret = request_irq(rd->ring->irq, 1202 hns_irq_handle, 0, rd->ring->ring_name, rd); 1203 if (ret) { 1204 netdev_err(priv->netdev, "request irq(%d) fail\n", 1205 rd->ring->irq); 1206 return ret; 1207 } 1208 disable_irq(rd->ring->irq); 1209 rd->ring->irq_init_flag = RCB_IRQ_INITED; 1210 } 1211 1212 /*set cpu affinity*/ 1213 hns_set_irq_affinity(priv); 1214 1215 return 0; 1216 } 1217 1218 static int hns_nic_net_up(struct net_device *ndev) 1219 { 1220 struct hns_nic_priv *priv = netdev_priv(ndev); 1221 struct hnae_handle *h = priv->ae_handle; 1222 int i, j; 1223 int ret; 1224 1225 ret = hns_nic_init_irq(priv); 1226 if (ret != 0) { 1227 netdev_err(ndev, "hns init irq failed! ret=%d\n", ret); 1228 return ret; 1229 } 1230 1231 for (i = 0; i < h->q_num * 2; i++) { 1232 ret = hns_nic_ring_open(ndev, i); 1233 if (ret) 1234 goto out_has_some_queues; 1235 } 1236 1237 ret = h->dev->ops->set_mac_addr(h, ndev->dev_addr); 1238 if (ret) 1239 goto out_set_mac_addr_err; 1240 1241 ret = h->dev->ops->start ? h->dev->ops->start(h) : 0; 1242 if (ret) 1243 goto out_start_err; 1244 1245 if (ndev->phydev) 1246 phy_start(ndev->phydev); 1247 1248 clear_bit(NIC_STATE_DOWN, &priv->state); 1249 (void)mod_timer(&priv->service_timer, jiffies + SERVICE_TIMER_HZ); 1250 1251 return 0; 1252 1253 out_start_err: 1254 netif_stop_queue(ndev); 1255 out_set_mac_addr_err: 1256 out_has_some_queues: 1257 for (j = i - 1; j >= 0; j--) 1258 hns_nic_ring_close(ndev, j); 1259 1260 set_bit(NIC_STATE_DOWN, &priv->state); 1261 1262 return ret; 1263 } 1264 1265 static void hns_nic_net_down(struct net_device *ndev) 1266 { 1267 int i; 1268 struct hnae_ae_ops *ops; 1269 struct hns_nic_priv *priv = netdev_priv(ndev); 1270 1271 if (test_and_set_bit(NIC_STATE_DOWN, &priv->state)) 1272 return; 1273 1274 (void)del_timer_sync(&priv->service_timer); 1275 netif_tx_stop_all_queues(ndev); 1276 netif_carrier_off(ndev); 1277 netif_tx_disable(ndev); 1278 priv->link = 0; 1279 1280 if (ndev->phydev) 1281 phy_stop(ndev->phydev); 1282 1283 ops = priv->ae_handle->dev->ops; 1284 1285 if (ops->stop) 1286 ops->stop(priv->ae_handle); 1287 1288 netif_tx_stop_all_queues(ndev); 1289 1290 for (i = priv->ae_handle->q_num - 1; i >= 0; i--) { 1291 hns_nic_ring_close(ndev, i); 1292 hns_nic_ring_close(ndev, i + priv->ae_handle->q_num); 1293 1294 /* clean tx buffers*/ 1295 hns_nic_tx_clr_all_bufs(priv->ring_data + i); 1296 } 1297 } 1298 1299 void hns_nic_net_reset(struct net_device *ndev) 1300 { 1301 struct hns_nic_priv *priv = netdev_priv(ndev); 1302 struct hnae_handle *handle = priv->ae_handle; 1303 1304 while (test_and_set_bit(NIC_STATE_RESETTING, &priv->state)) 1305 usleep_range(1000, 2000); 1306 1307 (void)hnae_reinit_handle(handle); 1308 1309 clear_bit(NIC_STATE_RESETTING, &priv->state); 1310 } 1311 1312 void hns_nic_net_reinit(struct net_device *netdev) 1313 { 1314 struct hns_nic_priv *priv = netdev_priv(netdev); 1315 1316 netif_trans_update(priv->netdev); 1317 while (test_and_set_bit(NIC_STATE_REINITING, &priv->state)) 1318 usleep_range(1000, 2000); 1319 1320 hns_nic_net_down(netdev); 1321 hns_nic_net_reset(netdev); 1322 (void)hns_nic_net_up(netdev); 1323 clear_bit(NIC_STATE_REINITING, &priv->state); 1324 } 1325 1326 static int hns_nic_net_open(struct net_device *ndev) 1327 { 1328 struct hns_nic_priv *priv = netdev_priv(ndev); 1329 struct hnae_handle *h = priv->ae_handle; 1330 int ret; 1331 1332 if (test_bit(NIC_STATE_TESTING, &priv->state)) 1333 return -EBUSY; 1334 1335 priv->link = 0; 1336 netif_carrier_off(ndev); 1337 1338 ret = netif_set_real_num_tx_queues(ndev, h->q_num); 1339 if (ret < 0) { 1340 netdev_err(ndev, "netif_set_real_num_tx_queues fail, ret=%d!\n", 1341 ret); 1342 return ret; 1343 } 1344 1345 ret = netif_set_real_num_rx_queues(ndev, h->q_num); 1346 if (ret < 0) { 1347 netdev_err(ndev, 1348 "netif_set_real_num_rx_queues fail, ret=%d!\n", ret); 1349 return ret; 1350 } 1351 1352 ret = hns_nic_net_up(ndev); 1353 if (ret) { 1354 netdev_err(ndev, 1355 "hns net up fail, ret=%d!\n", ret); 1356 return ret; 1357 } 1358 1359 return 0; 1360 } 1361 1362 static int hns_nic_net_stop(struct net_device *ndev) 1363 { 1364 hns_nic_net_down(ndev); 1365 1366 return 0; 1367 } 1368 1369 static void hns_tx_timeout_reset(struct hns_nic_priv *priv); 1370 static void hns_nic_net_timeout(struct net_device *ndev) 1371 { 1372 struct hns_nic_priv *priv = netdev_priv(ndev); 1373 1374 hns_tx_timeout_reset(priv); 1375 } 1376 1377 static int hns_nic_do_ioctl(struct net_device *netdev, struct ifreq *ifr, 1378 int cmd) 1379 { 1380 struct phy_device *phy_dev = netdev->phydev; 1381 1382 if (!netif_running(netdev)) 1383 return -EINVAL; 1384 1385 if (!phy_dev) 1386 return -ENOTSUPP; 1387 1388 return phy_mii_ioctl(phy_dev, ifr, cmd); 1389 } 1390 1391 /* use only for netconsole to poll with the device without interrupt */ 1392 #ifdef CONFIG_NET_POLL_CONTROLLER 1393 void hns_nic_poll_controller(struct net_device *ndev) 1394 { 1395 struct hns_nic_priv *priv = netdev_priv(ndev); 1396 unsigned long flags; 1397 int i; 1398 1399 local_irq_save(flags); 1400 for (i = 0; i < priv->ae_handle->q_num * 2; i++) 1401 napi_schedule(&priv->ring_data[i].napi); 1402 local_irq_restore(flags); 1403 } 1404 #endif 1405 1406 static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb, 1407 struct net_device *ndev) 1408 { 1409 struct hns_nic_priv *priv = netdev_priv(ndev); 1410 int ret; 1411 1412 assert(skb->queue_mapping < ndev->ae_handle->q_num); 1413 ret = hns_nic_net_xmit_hw(ndev, skb, 1414 &tx_ring_data(priv, skb->queue_mapping)); 1415 if (ret == NETDEV_TX_OK) { 1416 netif_trans_update(ndev); 1417 ndev->stats.tx_bytes += skb->len; 1418 ndev->stats.tx_packets++; 1419 } 1420 return (netdev_tx_t)ret; 1421 } 1422 1423 static int hns_nic_change_mtu(struct net_device *ndev, int new_mtu) 1424 { 1425 struct hns_nic_priv *priv = netdev_priv(ndev); 1426 struct hnae_handle *h = priv->ae_handle; 1427 int ret; 1428 1429 /* MTU < 68 is an error and causes problems on some kernels */ 1430 if (new_mtu < 68) 1431 return -EINVAL; 1432 1433 if (!h->dev->ops->set_mtu) 1434 return -ENOTSUPP; 1435 1436 if (netif_running(ndev)) { 1437 (void)hns_nic_net_stop(ndev); 1438 msleep(100); 1439 1440 ret = h->dev->ops->set_mtu(h, new_mtu); 1441 if (ret) 1442 netdev_err(ndev, "set mtu fail, return value %d\n", 1443 ret); 1444 1445 if (hns_nic_net_open(ndev)) 1446 netdev_err(ndev, "hns net open fail\n"); 1447 } else { 1448 ret = h->dev->ops->set_mtu(h, new_mtu); 1449 } 1450 1451 if (!ret) 1452 ndev->mtu = new_mtu; 1453 1454 return ret; 1455 } 1456 1457 static int hns_nic_set_features(struct net_device *netdev, 1458 netdev_features_t features) 1459 { 1460 struct hns_nic_priv *priv = netdev_priv(netdev); 1461 1462 switch (priv->enet_ver) { 1463 case AE_VERSION_1: 1464 if (features & (NETIF_F_TSO | NETIF_F_TSO6)) 1465 netdev_info(netdev, "enet v1 do not support tso!\n"); 1466 break; 1467 default: 1468 if (features & (NETIF_F_TSO | NETIF_F_TSO6)) { 1469 priv->ops.fill_desc = fill_tso_desc; 1470 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso; 1471 /* The chip only support 7*4096 */ 1472 netif_set_gso_max_size(netdev, 7 * 4096); 1473 } else { 1474 priv->ops.fill_desc = fill_v2_desc; 1475 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx; 1476 } 1477 break; 1478 } 1479 netdev->features = features; 1480 return 0; 1481 } 1482 1483 static netdev_features_t hns_nic_fix_features( 1484 struct net_device *netdev, netdev_features_t features) 1485 { 1486 struct hns_nic_priv *priv = netdev_priv(netdev); 1487 1488 switch (priv->enet_ver) { 1489 case AE_VERSION_1: 1490 features &= ~(NETIF_F_TSO | NETIF_F_TSO6 | 1491 NETIF_F_HW_VLAN_CTAG_FILTER); 1492 break; 1493 default: 1494 break; 1495 } 1496 return features; 1497 } 1498 1499 /** 1500 * nic_set_multicast_list - set mutl mac address 1501 * @netdev: net device 1502 * @p: mac address 1503 * 1504 * return void 1505 */ 1506 void hns_set_multicast_list(struct net_device *ndev) 1507 { 1508 struct hns_nic_priv *priv = netdev_priv(ndev); 1509 struct hnae_handle *h = priv->ae_handle; 1510 struct netdev_hw_addr *ha = NULL; 1511 1512 if (!h) { 1513 netdev_err(ndev, "hnae handle is null\n"); 1514 return; 1515 } 1516 1517 if (h->dev->ops->set_mc_addr) { 1518 netdev_for_each_mc_addr(ha, ndev) 1519 if (h->dev->ops->set_mc_addr(h, ha->addr)) 1520 netdev_err(ndev, "set multicast fail\n"); 1521 } 1522 } 1523 1524 void hns_nic_set_rx_mode(struct net_device *ndev) 1525 { 1526 struct hns_nic_priv *priv = netdev_priv(ndev); 1527 struct hnae_handle *h = priv->ae_handle; 1528 1529 if (h->dev->ops->set_promisc_mode) { 1530 if (ndev->flags & IFF_PROMISC) 1531 h->dev->ops->set_promisc_mode(h, 1); 1532 else 1533 h->dev->ops->set_promisc_mode(h, 0); 1534 } 1535 1536 hns_set_multicast_list(ndev); 1537 } 1538 1539 struct rtnl_link_stats64 *hns_nic_get_stats64(struct net_device *ndev, 1540 struct rtnl_link_stats64 *stats) 1541 { 1542 int idx = 0; 1543 u64 tx_bytes = 0; 1544 u64 rx_bytes = 0; 1545 u64 tx_pkts = 0; 1546 u64 rx_pkts = 0; 1547 struct hns_nic_priv *priv = netdev_priv(ndev); 1548 struct hnae_handle *h = priv->ae_handle; 1549 1550 for (idx = 0; idx < h->q_num; idx++) { 1551 tx_bytes += h->qs[idx]->tx_ring.stats.tx_bytes; 1552 tx_pkts += h->qs[idx]->tx_ring.stats.tx_pkts; 1553 rx_bytes += h->qs[idx]->rx_ring.stats.rx_bytes; 1554 rx_pkts += h->qs[idx]->rx_ring.stats.rx_pkts; 1555 } 1556 1557 stats->tx_bytes = tx_bytes; 1558 stats->tx_packets = tx_pkts; 1559 stats->rx_bytes = rx_bytes; 1560 stats->rx_packets = rx_pkts; 1561 1562 stats->rx_errors = ndev->stats.rx_errors; 1563 stats->multicast = ndev->stats.multicast; 1564 stats->rx_length_errors = ndev->stats.rx_length_errors; 1565 stats->rx_crc_errors = ndev->stats.rx_crc_errors; 1566 stats->rx_missed_errors = ndev->stats.rx_missed_errors; 1567 1568 stats->tx_errors = ndev->stats.tx_errors; 1569 stats->rx_dropped = ndev->stats.rx_dropped; 1570 stats->tx_dropped = ndev->stats.tx_dropped; 1571 stats->collisions = ndev->stats.collisions; 1572 stats->rx_over_errors = ndev->stats.rx_over_errors; 1573 stats->rx_frame_errors = ndev->stats.rx_frame_errors; 1574 stats->rx_fifo_errors = ndev->stats.rx_fifo_errors; 1575 stats->tx_aborted_errors = ndev->stats.tx_aborted_errors; 1576 stats->tx_carrier_errors = ndev->stats.tx_carrier_errors; 1577 stats->tx_fifo_errors = ndev->stats.tx_fifo_errors; 1578 stats->tx_heartbeat_errors = ndev->stats.tx_heartbeat_errors; 1579 stats->tx_window_errors = ndev->stats.tx_window_errors; 1580 stats->rx_compressed = ndev->stats.rx_compressed; 1581 stats->tx_compressed = ndev->stats.tx_compressed; 1582 1583 return stats; 1584 } 1585 1586 static u16 1587 hns_nic_select_queue(struct net_device *ndev, struct sk_buff *skb, 1588 void *accel_priv, select_queue_fallback_t fallback) 1589 { 1590 struct ethhdr *eth_hdr = (struct ethhdr *)skb->data; 1591 struct hns_nic_priv *priv = netdev_priv(ndev); 1592 1593 /* fix hardware broadcast/multicast packets queue loopback */ 1594 if (!AE_IS_VER1(priv->enet_ver) && 1595 is_multicast_ether_addr(eth_hdr->h_dest)) 1596 return 0; 1597 else 1598 return fallback(ndev, skb); 1599 } 1600 1601 static const struct net_device_ops hns_nic_netdev_ops = { 1602 .ndo_open = hns_nic_net_open, 1603 .ndo_stop = hns_nic_net_stop, 1604 .ndo_start_xmit = hns_nic_net_xmit, 1605 .ndo_tx_timeout = hns_nic_net_timeout, 1606 .ndo_set_mac_address = hns_nic_net_set_mac_address, 1607 .ndo_change_mtu = hns_nic_change_mtu, 1608 .ndo_do_ioctl = hns_nic_do_ioctl, 1609 .ndo_set_features = hns_nic_set_features, 1610 .ndo_fix_features = hns_nic_fix_features, 1611 .ndo_get_stats64 = hns_nic_get_stats64, 1612 #ifdef CONFIG_NET_POLL_CONTROLLER 1613 .ndo_poll_controller = hns_nic_poll_controller, 1614 #endif 1615 .ndo_set_rx_mode = hns_nic_set_rx_mode, 1616 .ndo_select_queue = hns_nic_select_queue, 1617 }; 1618 1619 static void hns_nic_update_link_status(struct net_device *netdev) 1620 { 1621 struct hns_nic_priv *priv = netdev_priv(netdev); 1622 1623 struct hnae_handle *h = priv->ae_handle; 1624 1625 if (h->phy_dev) { 1626 if (h->phy_if != PHY_INTERFACE_MODE_XGMII) 1627 return; 1628 1629 (void)genphy_read_status(h->phy_dev); 1630 } 1631 hns_nic_adjust_link(netdev); 1632 } 1633 1634 /* for dumping key regs*/ 1635 static void hns_nic_dump(struct hns_nic_priv *priv) 1636 { 1637 struct hnae_handle *h = priv->ae_handle; 1638 struct hnae_ae_ops *ops = h->dev->ops; 1639 u32 *data, reg_num, i; 1640 1641 if (ops->get_regs_len && ops->get_regs) { 1642 reg_num = ops->get_regs_len(priv->ae_handle); 1643 reg_num = (reg_num + 3ul) & ~3ul; 1644 data = kcalloc(reg_num, sizeof(u32), GFP_KERNEL); 1645 if (data) { 1646 ops->get_regs(priv->ae_handle, data); 1647 for (i = 0; i < reg_num; i += 4) 1648 pr_info("0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n", 1649 i, data[i], data[i + 1], 1650 data[i + 2], data[i + 3]); 1651 kfree(data); 1652 } 1653 } 1654 1655 for (i = 0; i < h->q_num; i++) { 1656 pr_info("tx_queue%d_next_to_clean:%d\n", 1657 i, h->qs[i]->tx_ring.next_to_clean); 1658 pr_info("tx_queue%d_next_to_use:%d\n", 1659 i, h->qs[i]->tx_ring.next_to_use); 1660 pr_info("rx_queue%d_next_to_clean:%d\n", 1661 i, h->qs[i]->rx_ring.next_to_clean); 1662 pr_info("rx_queue%d_next_to_use:%d\n", 1663 i, h->qs[i]->rx_ring.next_to_use); 1664 } 1665 } 1666 1667 /* for resetting subtask */ 1668 static void hns_nic_reset_subtask(struct hns_nic_priv *priv) 1669 { 1670 enum hnae_port_type type = priv->ae_handle->port_type; 1671 1672 if (!test_bit(NIC_STATE2_RESET_REQUESTED, &priv->state)) 1673 return; 1674 clear_bit(NIC_STATE2_RESET_REQUESTED, &priv->state); 1675 1676 /* If we're already down, removing or resetting, just bail */ 1677 if (test_bit(NIC_STATE_DOWN, &priv->state) || 1678 test_bit(NIC_STATE_REMOVING, &priv->state) || 1679 test_bit(NIC_STATE_RESETTING, &priv->state)) 1680 return; 1681 1682 hns_nic_dump(priv); 1683 netdev_info(priv->netdev, "try to reset %s port!\n", 1684 (type == HNAE_PORT_DEBUG ? "debug" : "service")); 1685 1686 rtnl_lock(); 1687 /* put off any impending NetWatchDogTimeout */ 1688 netif_trans_update(priv->netdev); 1689 1690 if (type == HNAE_PORT_DEBUG) { 1691 hns_nic_net_reinit(priv->netdev); 1692 } else { 1693 netif_carrier_off(priv->netdev); 1694 netif_tx_disable(priv->netdev); 1695 } 1696 rtnl_unlock(); 1697 } 1698 1699 /* for doing service complete*/ 1700 static void hns_nic_service_event_complete(struct hns_nic_priv *priv) 1701 { 1702 WARN_ON(!test_bit(NIC_STATE_SERVICE_SCHED, &priv->state)); 1703 1704 smp_mb__before_atomic(); 1705 clear_bit(NIC_STATE_SERVICE_SCHED, &priv->state); 1706 } 1707 1708 static void hns_nic_service_task(struct work_struct *work) 1709 { 1710 struct hns_nic_priv *priv 1711 = container_of(work, struct hns_nic_priv, service_task); 1712 struct hnae_handle *h = priv->ae_handle; 1713 1714 hns_nic_update_link_status(priv->netdev); 1715 h->dev->ops->update_led_status(h); 1716 hns_nic_update_stats(priv->netdev); 1717 1718 hns_nic_reset_subtask(priv); 1719 hns_nic_service_event_complete(priv); 1720 } 1721 1722 static void hns_nic_task_schedule(struct hns_nic_priv *priv) 1723 { 1724 if (!test_bit(NIC_STATE_DOWN, &priv->state) && 1725 !test_bit(NIC_STATE_REMOVING, &priv->state) && 1726 !test_and_set_bit(NIC_STATE_SERVICE_SCHED, &priv->state)) 1727 (void)schedule_work(&priv->service_task); 1728 } 1729 1730 static void hns_nic_service_timer(unsigned long data) 1731 { 1732 struct hns_nic_priv *priv = (struct hns_nic_priv *)data; 1733 1734 (void)mod_timer(&priv->service_timer, jiffies + SERVICE_TIMER_HZ); 1735 1736 hns_nic_task_schedule(priv); 1737 } 1738 1739 /** 1740 * hns_tx_timeout_reset - initiate reset due to Tx timeout 1741 * @priv: driver private struct 1742 **/ 1743 static void hns_tx_timeout_reset(struct hns_nic_priv *priv) 1744 { 1745 /* Do the reset outside of interrupt context */ 1746 if (!test_bit(NIC_STATE_DOWN, &priv->state)) { 1747 set_bit(NIC_STATE2_RESET_REQUESTED, &priv->state); 1748 netdev_warn(priv->netdev, 1749 "initiating reset due to tx timeout(%llu,0x%lx)\n", 1750 priv->tx_timeout_count, priv->state); 1751 priv->tx_timeout_count++; 1752 hns_nic_task_schedule(priv); 1753 } 1754 } 1755 1756 static int hns_nic_init_ring_data(struct hns_nic_priv *priv) 1757 { 1758 struct hnae_handle *h = priv->ae_handle; 1759 struct hns_nic_ring_data *rd; 1760 bool is_ver1 = AE_IS_VER1(priv->enet_ver); 1761 int i; 1762 1763 if (h->q_num > NIC_MAX_Q_PER_VF) { 1764 netdev_err(priv->netdev, "too much queue (%d)\n", h->q_num); 1765 return -EINVAL; 1766 } 1767 1768 priv->ring_data = kzalloc(h->q_num * sizeof(*priv->ring_data) * 2, 1769 GFP_KERNEL); 1770 if (!priv->ring_data) 1771 return -ENOMEM; 1772 1773 for (i = 0; i < h->q_num; i++) { 1774 rd = &priv->ring_data[i]; 1775 rd->queue_index = i; 1776 rd->ring = &h->qs[i]->tx_ring; 1777 rd->poll_one = hns_nic_tx_poll_one; 1778 rd->fini_process = is_ver1 ? hns_nic_tx_fini_pro : 1779 hns_nic_tx_fini_pro_v2; 1780 1781 netif_napi_add(priv->netdev, &rd->napi, 1782 hns_nic_common_poll, NIC_TX_CLEAN_MAX_NUM); 1783 rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED; 1784 } 1785 for (i = h->q_num; i < h->q_num * 2; i++) { 1786 rd = &priv->ring_data[i]; 1787 rd->queue_index = i - h->q_num; 1788 rd->ring = &h->qs[i - h->q_num]->rx_ring; 1789 rd->poll_one = hns_nic_rx_poll_one; 1790 rd->ex_process = hns_nic_rx_up_pro; 1791 rd->fini_process = is_ver1 ? hns_nic_rx_fini_pro : 1792 hns_nic_rx_fini_pro_v2; 1793 1794 netif_napi_add(priv->netdev, &rd->napi, 1795 hns_nic_common_poll, NIC_RX_CLEAN_MAX_NUM); 1796 rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED; 1797 } 1798 1799 return 0; 1800 } 1801 1802 static void hns_nic_uninit_ring_data(struct hns_nic_priv *priv) 1803 { 1804 struct hnae_handle *h = priv->ae_handle; 1805 int i; 1806 1807 for (i = 0; i < h->q_num * 2; i++) { 1808 netif_napi_del(&priv->ring_data[i].napi); 1809 if (priv->ring_data[i].ring->irq_init_flag == RCB_IRQ_INITED) { 1810 (void)irq_set_affinity_hint( 1811 priv->ring_data[i].ring->irq, 1812 NULL); 1813 free_irq(priv->ring_data[i].ring->irq, 1814 &priv->ring_data[i]); 1815 } 1816 1817 priv->ring_data[i].ring->irq_init_flag = RCB_IRQ_NOT_INITED; 1818 } 1819 kfree(priv->ring_data); 1820 } 1821 1822 static void hns_nic_set_priv_ops(struct net_device *netdev) 1823 { 1824 struct hns_nic_priv *priv = netdev_priv(netdev); 1825 struct hnae_handle *h = priv->ae_handle; 1826 1827 if (AE_IS_VER1(priv->enet_ver)) { 1828 priv->ops.fill_desc = fill_desc; 1829 priv->ops.get_rxd_bnum = get_rx_desc_bnum; 1830 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx; 1831 } else { 1832 priv->ops.get_rxd_bnum = get_v2rx_desc_bnum; 1833 if ((netdev->features & NETIF_F_TSO) || 1834 (netdev->features & NETIF_F_TSO6)) { 1835 priv->ops.fill_desc = fill_tso_desc; 1836 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso; 1837 /* This chip only support 7*4096 */ 1838 netif_set_gso_max_size(netdev, 7 * 4096); 1839 } else { 1840 priv->ops.fill_desc = fill_v2_desc; 1841 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx; 1842 } 1843 /* enable tso when init 1844 * control tso on/off through TSE bit in bd 1845 */ 1846 h->dev->ops->set_tso_stats(h, 1); 1847 } 1848 } 1849 1850 static int hns_nic_try_get_ae(struct net_device *ndev) 1851 { 1852 struct hns_nic_priv *priv = netdev_priv(ndev); 1853 struct hnae_handle *h; 1854 int ret; 1855 1856 h = hnae_get_handle(&priv->netdev->dev, 1857 priv->fwnode, priv->port_id, NULL); 1858 if (IS_ERR_OR_NULL(h)) { 1859 ret = -ENODEV; 1860 dev_dbg(priv->dev, "has not handle, register notifier!\n"); 1861 goto out; 1862 } 1863 priv->ae_handle = h; 1864 1865 ret = hns_nic_init_phy(ndev, h); 1866 if (ret) { 1867 dev_err(priv->dev, "probe phy device fail!\n"); 1868 goto out_init_phy; 1869 } 1870 1871 ret = hns_nic_init_ring_data(priv); 1872 if (ret) { 1873 ret = -ENOMEM; 1874 goto out_init_ring_data; 1875 } 1876 1877 hns_nic_set_priv_ops(ndev); 1878 1879 ret = register_netdev(ndev); 1880 if (ret) { 1881 dev_err(priv->dev, "probe register netdev fail!\n"); 1882 goto out_reg_ndev_fail; 1883 } 1884 return 0; 1885 1886 out_reg_ndev_fail: 1887 hns_nic_uninit_ring_data(priv); 1888 priv->ring_data = NULL; 1889 out_init_phy: 1890 out_init_ring_data: 1891 hnae_put_handle(priv->ae_handle); 1892 priv->ae_handle = NULL; 1893 out: 1894 return ret; 1895 } 1896 1897 static int hns_nic_notifier_action(struct notifier_block *nb, 1898 unsigned long action, void *data) 1899 { 1900 struct hns_nic_priv *priv = 1901 container_of(nb, struct hns_nic_priv, notifier_block); 1902 1903 assert(action == HNAE_AE_REGISTER); 1904 1905 if (!hns_nic_try_get_ae(priv->netdev)) { 1906 hnae_unregister_notifier(&priv->notifier_block); 1907 priv->notifier_block.notifier_call = NULL; 1908 } 1909 return 0; 1910 } 1911 1912 static int hns_nic_dev_probe(struct platform_device *pdev) 1913 { 1914 struct device *dev = &pdev->dev; 1915 struct net_device *ndev; 1916 struct hns_nic_priv *priv; 1917 u32 port_id; 1918 int ret; 1919 1920 ndev = alloc_etherdev_mq(sizeof(struct hns_nic_priv), NIC_MAX_Q_PER_VF); 1921 if (!ndev) 1922 return -ENOMEM; 1923 1924 platform_set_drvdata(pdev, ndev); 1925 1926 priv = netdev_priv(ndev); 1927 priv->dev = dev; 1928 priv->netdev = ndev; 1929 1930 if (dev_of_node(dev)) { 1931 struct device_node *ae_node; 1932 1933 if (of_device_is_compatible(dev->of_node, 1934 "hisilicon,hns-nic-v1")) 1935 priv->enet_ver = AE_VERSION_1; 1936 else 1937 priv->enet_ver = AE_VERSION_2; 1938 1939 ae_node = of_parse_phandle(dev->of_node, "ae-handle", 0); 1940 if (IS_ERR_OR_NULL(ae_node)) { 1941 ret = PTR_ERR(ae_node); 1942 dev_err(dev, "not find ae-handle\n"); 1943 goto out_read_prop_fail; 1944 } 1945 priv->fwnode = &ae_node->fwnode; 1946 } else if (is_acpi_node(dev->fwnode)) { 1947 struct acpi_reference_args args; 1948 1949 if (acpi_dev_found(hns_enet_acpi_match[0].id)) 1950 priv->enet_ver = AE_VERSION_1; 1951 else if (acpi_dev_found(hns_enet_acpi_match[1].id)) 1952 priv->enet_ver = AE_VERSION_2; 1953 else 1954 return -ENXIO; 1955 1956 /* try to find port-idx-in-ae first */ 1957 ret = acpi_node_get_property_reference(dev->fwnode, 1958 "ae-handle", 0, &args); 1959 if (ret) { 1960 dev_err(dev, "not find ae-handle\n"); 1961 goto out_read_prop_fail; 1962 } 1963 priv->fwnode = acpi_fwnode_handle(args.adev); 1964 } else { 1965 dev_err(dev, "cannot read cfg data from OF or acpi\n"); 1966 return -ENXIO; 1967 } 1968 1969 ret = device_property_read_u32(dev, "port-idx-in-ae", &port_id); 1970 if (ret) { 1971 /* only for old code compatible */ 1972 ret = device_property_read_u32(dev, "port-id", &port_id); 1973 if (ret) 1974 goto out_read_prop_fail; 1975 /* for old dts, we need to caculate the port offset */ 1976 port_id = port_id < HNS_SRV_OFFSET ? port_id + HNS_DEBUG_OFFSET 1977 : port_id - HNS_SRV_OFFSET; 1978 } 1979 priv->port_id = port_id; 1980 1981 hns_init_mac_addr(ndev); 1982 1983 ndev->watchdog_timeo = HNS_NIC_TX_TIMEOUT; 1984 ndev->priv_flags |= IFF_UNICAST_FLT; 1985 ndev->netdev_ops = &hns_nic_netdev_ops; 1986 hns_ethtool_set_ops(ndev); 1987 1988 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 1989 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | 1990 NETIF_F_GRO; 1991 ndev->vlan_features |= 1992 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM; 1993 ndev->vlan_features |= NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO; 1994 1995 switch (priv->enet_ver) { 1996 case AE_VERSION_2: 1997 ndev->features |= NETIF_F_TSO | NETIF_F_TSO6; 1998 ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 1999 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | 2000 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6; 2001 break; 2002 default: 2003 break; 2004 } 2005 2006 SET_NETDEV_DEV(ndev, dev); 2007 2008 if (!dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) 2009 dev_dbg(dev, "set mask to 64bit\n"); 2010 else 2011 dev_err(dev, "set mask to 64bit fail!\n"); 2012 2013 /* carrier off reporting is important to ethtool even BEFORE open */ 2014 netif_carrier_off(ndev); 2015 2016 setup_timer(&priv->service_timer, hns_nic_service_timer, 2017 (unsigned long)priv); 2018 INIT_WORK(&priv->service_task, hns_nic_service_task); 2019 2020 set_bit(NIC_STATE_SERVICE_INITED, &priv->state); 2021 clear_bit(NIC_STATE_SERVICE_SCHED, &priv->state); 2022 set_bit(NIC_STATE_DOWN, &priv->state); 2023 2024 if (hns_nic_try_get_ae(priv->netdev)) { 2025 priv->notifier_block.notifier_call = hns_nic_notifier_action; 2026 ret = hnae_register_notifier(&priv->notifier_block); 2027 if (ret) { 2028 dev_err(dev, "register notifier fail!\n"); 2029 goto out_notify_fail; 2030 } 2031 dev_dbg(dev, "has not handle, register notifier!\n"); 2032 } 2033 2034 return 0; 2035 2036 out_notify_fail: 2037 (void)cancel_work_sync(&priv->service_task); 2038 out_read_prop_fail: 2039 free_netdev(ndev); 2040 return ret; 2041 } 2042 2043 static int hns_nic_dev_remove(struct platform_device *pdev) 2044 { 2045 struct net_device *ndev = platform_get_drvdata(pdev); 2046 struct hns_nic_priv *priv = netdev_priv(ndev); 2047 2048 if (ndev->reg_state != NETREG_UNINITIALIZED) 2049 unregister_netdev(ndev); 2050 2051 if (priv->ring_data) 2052 hns_nic_uninit_ring_data(priv); 2053 priv->ring_data = NULL; 2054 2055 if (ndev->phydev) 2056 phy_disconnect(ndev->phydev); 2057 2058 if (!IS_ERR_OR_NULL(priv->ae_handle)) 2059 hnae_put_handle(priv->ae_handle); 2060 priv->ae_handle = NULL; 2061 if (priv->notifier_block.notifier_call) 2062 hnae_unregister_notifier(&priv->notifier_block); 2063 priv->notifier_block.notifier_call = NULL; 2064 2065 set_bit(NIC_STATE_REMOVING, &priv->state); 2066 (void)cancel_work_sync(&priv->service_task); 2067 2068 free_netdev(ndev); 2069 return 0; 2070 } 2071 2072 static const struct of_device_id hns_enet_of_match[] = { 2073 {.compatible = "hisilicon,hns-nic-v1",}, 2074 {.compatible = "hisilicon,hns-nic-v2",}, 2075 {}, 2076 }; 2077 2078 MODULE_DEVICE_TABLE(of, hns_enet_of_match); 2079 2080 static struct platform_driver hns_nic_dev_driver = { 2081 .driver = { 2082 .name = "hns-nic", 2083 .of_match_table = hns_enet_of_match, 2084 .acpi_match_table = ACPI_PTR(hns_enet_acpi_match), 2085 }, 2086 .probe = hns_nic_dev_probe, 2087 .remove = hns_nic_dev_remove, 2088 }; 2089 2090 module_platform_driver(hns_nic_dev_driver); 2091 2092 MODULE_DESCRIPTION("HISILICON HNS Ethernet driver"); 2093 MODULE_AUTHOR("Hisilicon, Inc."); 2094 MODULE_LICENSE("GPL"); 2095 MODULE_ALIAS("platform:hns-nic"); 2096