1 /* 2 * Copyright (c) 2014-2015 Hisilicon Limited. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 */ 9 10 #include <linux/clk.h> 11 #include <linux/cpumask.h> 12 #include <linux/etherdevice.h> 13 #include <linux/if_vlan.h> 14 #include <linux/interrupt.h> 15 #include <linux/io.h> 16 #include <linux/ip.h> 17 #include <linux/ipv6.h> 18 #include <linux/module.h> 19 #include <linux/phy.h> 20 #include <linux/platform_device.h> 21 #include <linux/skbuff.h> 22 23 #include "hnae.h" 24 #include "hns_enet.h" 25 26 #define NIC_MAX_Q_PER_VF 16 27 #define HNS_NIC_TX_TIMEOUT (5 * HZ) 28 29 #define SERVICE_TIMER_HZ (1 * HZ) 30 31 #define NIC_TX_CLEAN_MAX_NUM 256 32 #define NIC_RX_CLEAN_MAX_NUM 64 33 34 #define RCB_IRQ_NOT_INITED 0 35 #define RCB_IRQ_INITED 1 36 #define HNS_BUFFER_SIZE_2048 2048 37 38 #define BD_MAX_SEND_SIZE 8191 39 #define SKB_TMP_LEN(SKB) \ 40 (((SKB)->transport_header - (SKB)->mac_header) + tcp_hdrlen(SKB)) 41 42 static void fill_v2_desc(struct hnae_ring *ring, void *priv, 43 int size, dma_addr_t dma, int frag_end, 44 int buf_num, enum hns_desc_type type, int mtu) 45 { 46 struct hnae_desc *desc = &ring->desc[ring->next_to_use]; 47 struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; 48 struct iphdr *iphdr; 49 struct ipv6hdr *ipv6hdr; 50 struct sk_buff *skb; 51 __be16 protocol; 52 u8 bn_pid = 0; 53 u8 rrcfv = 0; 54 u8 ip_offset = 0; 55 u8 tvsvsn = 0; 56 u16 mss = 0; 57 u8 l4_len = 0; 58 u16 paylen = 0; 59 60 desc_cb->priv = priv; 61 desc_cb->length = size; 62 desc_cb->dma = dma; 63 desc_cb->type = type; 64 65 desc->addr = cpu_to_le64(dma); 66 desc->tx.send_size = cpu_to_le16((u16)size); 67 68 /* config bd buffer end */ 69 hnae_set_bit(rrcfv, HNSV2_TXD_VLD_B, 1); 70 hnae_set_field(bn_pid, HNSV2_TXD_BUFNUM_M, 0, buf_num - 1); 71 72 /* fill port_id in the tx bd for sending management pkts */ 73 hnae_set_field(bn_pid, HNSV2_TXD_PORTID_M, 74 HNSV2_TXD_PORTID_S, ring->q->handle->dport_id); 75 76 if (type == DESC_TYPE_SKB) { 77 skb = (struct sk_buff *)priv; 78 79 if (skb->ip_summed == CHECKSUM_PARTIAL) { 80 skb_reset_mac_len(skb); 81 protocol = skb->protocol; 82 ip_offset = ETH_HLEN; 83 84 if (protocol == htons(ETH_P_8021Q)) { 85 ip_offset += VLAN_HLEN; 86 protocol = vlan_get_protocol(skb); 87 skb->protocol = protocol; 88 } 89 90 if (skb->protocol == htons(ETH_P_IP)) { 91 iphdr = ip_hdr(skb); 92 hnae_set_bit(rrcfv, HNSV2_TXD_L3CS_B, 1); 93 hnae_set_bit(rrcfv, HNSV2_TXD_L4CS_B, 1); 94 95 /* check for tcp/udp header */ 96 if (iphdr->protocol == IPPROTO_TCP && 97 skb_is_gso(skb)) { 98 hnae_set_bit(tvsvsn, 99 HNSV2_TXD_TSE_B, 1); 100 l4_len = tcp_hdrlen(skb); 101 mss = skb_shinfo(skb)->gso_size; 102 paylen = skb->len - SKB_TMP_LEN(skb); 103 } 104 } else if (skb->protocol == htons(ETH_P_IPV6)) { 105 hnae_set_bit(tvsvsn, HNSV2_TXD_IPV6_B, 1); 106 ipv6hdr = ipv6_hdr(skb); 107 hnae_set_bit(rrcfv, HNSV2_TXD_L4CS_B, 1); 108 109 /* check for tcp/udp header */ 110 if (ipv6hdr->nexthdr == IPPROTO_TCP && 111 skb_is_gso(skb) && skb_is_gso_v6(skb)) { 112 hnae_set_bit(tvsvsn, 113 HNSV2_TXD_TSE_B, 1); 114 l4_len = tcp_hdrlen(skb); 115 mss = skb_shinfo(skb)->gso_size; 116 paylen = skb->len - SKB_TMP_LEN(skb); 117 } 118 } 119 desc->tx.ip_offset = ip_offset; 120 desc->tx.tse_vlan_snap_v6_sctp_nth = tvsvsn; 121 desc->tx.mss = cpu_to_le16(mss); 122 desc->tx.l4_len = l4_len; 123 desc->tx.paylen = cpu_to_le16(paylen); 124 } 125 } 126 127 hnae_set_bit(rrcfv, HNSV2_TXD_FE_B, frag_end); 128 129 desc->tx.bn_pid = bn_pid; 130 desc->tx.ra_ri_cs_fe_vld = rrcfv; 131 132 ring_ptr_move_fw(ring, next_to_use); 133 } 134 135 static const struct acpi_device_id hns_enet_acpi_match[] = { 136 { "HISI00C1", 0 }, 137 { "HISI00C2", 0 }, 138 { }, 139 }; 140 MODULE_DEVICE_TABLE(acpi, hns_enet_acpi_match); 141 142 static void fill_desc(struct hnae_ring *ring, void *priv, 143 int size, dma_addr_t dma, int frag_end, 144 int buf_num, enum hns_desc_type type, int mtu) 145 { 146 struct hnae_desc *desc = &ring->desc[ring->next_to_use]; 147 struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; 148 struct sk_buff *skb; 149 __be16 protocol; 150 u32 ip_offset; 151 u32 asid_bufnum_pid = 0; 152 u32 flag_ipoffset = 0; 153 154 desc_cb->priv = priv; 155 desc_cb->length = size; 156 desc_cb->dma = dma; 157 desc_cb->type = type; 158 159 desc->addr = cpu_to_le64(dma); 160 desc->tx.send_size = cpu_to_le16((u16)size); 161 162 /*config bd buffer end */ 163 flag_ipoffset |= 1 << HNS_TXD_VLD_B; 164 165 asid_bufnum_pid |= buf_num << HNS_TXD_BUFNUM_S; 166 167 if (type == DESC_TYPE_SKB) { 168 skb = (struct sk_buff *)priv; 169 170 if (skb->ip_summed == CHECKSUM_PARTIAL) { 171 protocol = skb->protocol; 172 ip_offset = ETH_HLEN; 173 174 /*if it is a SW VLAN check the next protocol*/ 175 if (protocol == htons(ETH_P_8021Q)) { 176 ip_offset += VLAN_HLEN; 177 protocol = vlan_get_protocol(skb); 178 skb->protocol = protocol; 179 } 180 181 if (skb->protocol == htons(ETH_P_IP)) { 182 flag_ipoffset |= 1 << HNS_TXD_L3CS_B; 183 /* check for tcp/udp header */ 184 flag_ipoffset |= 1 << HNS_TXD_L4CS_B; 185 186 } else if (skb->protocol == htons(ETH_P_IPV6)) { 187 /* ipv6 has not l3 cs, check for L4 header */ 188 flag_ipoffset |= 1 << HNS_TXD_L4CS_B; 189 } 190 191 flag_ipoffset |= ip_offset << HNS_TXD_IPOFFSET_S; 192 } 193 } 194 195 flag_ipoffset |= frag_end << HNS_TXD_FE_B; 196 197 desc->tx.asid_bufnum_pid = cpu_to_le16(asid_bufnum_pid); 198 desc->tx.flag_ipoffset = cpu_to_le32(flag_ipoffset); 199 200 ring_ptr_move_fw(ring, next_to_use); 201 } 202 203 static void unfill_desc(struct hnae_ring *ring) 204 { 205 ring_ptr_move_bw(ring, next_to_use); 206 } 207 208 static int hns_nic_maybe_stop_tx( 209 struct sk_buff **out_skb, int *bnum, struct hnae_ring *ring) 210 { 211 struct sk_buff *skb = *out_skb; 212 struct sk_buff *new_skb = NULL; 213 int buf_num; 214 215 /* no. of segments (plus a header) */ 216 buf_num = skb_shinfo(skb)->nr_frags + 1; 217 218 if (unlikely(buf_num > ring->max_desc_num_per_pkt)) { 219 if (ring_space(ring) < 1) 220 return -EBUSY; 221 222 new_skb = skb_copy(skb, GFP_ATOMIC); 223 if (!new_skb) 224 return -ENOMEM; 225 226 dev_kfree_skb_any(skb); 227 *out_skb = new_skb; 228 buf_num = 1; 229 } else if (buf_num > ring_space(ring)) { 230 return -EBUSY; 231 } 232 233 *bnum = buf_num; 234 return 0; 235 } 236 237 static int hns_nic_maybe_stop_tso( 238 struct sk_buff **out_skb, int *bnum, struct hnae_ring *ring) 239 { 240 int i; 241 int size; 242 int buf_num; 243 int frag_num; 244 struct sk_buff *skb = *out_skb; 245 struct sk_buff *new_skb = NULL; 246 struct skb_frag_struct *frag; 247 248 size = skb_headlen(skb); 249 buf_num = (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE; 250 251 frag_num = skb_shinfo(skb)->nr_frags; 252 for (i = 0; i < frag_num; i++) { 253 frag = &skb_shinfo(skb)->frags[i]; 254 size = skb_frag_size(frag); 255 buf_num += (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE; 256 } 257 258 if (unlikely(buf_num > ring->max_desc_num_per_pkt)) { 259 buf_num = (skb->len + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE; 260 if (ring_space(ring) < buf_num) 261 return -EBUSY; 262 /* manual split the send packet */ 263 new_skb = skb_copy(skb, GFP_ATOMIC); 264 if (!new_skb) 265 return -ENOMEM; 266 dev_kfree_skb_any(skb); 267 *out_skb = new_skb; 268 269 } else if (ring_space(ring) < buf_num) { 270 return -EBUSY; 271 } 272 273 *bnum = buf_num; 274 return 0; 275 } 276 277 static void fill_tso_desc(struct hnae_ring *ring, void *priv, 278 int size, dma_addr_t dma, int frag_end, 279 int buf_num, enum hns_desc_type type, int mtu) 280 { 281 int frag_buf_num; 282 int sizeoflast; 283 int k; 284 285 frag_buf_num = (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE; 286 sizeoflast = size % BD_MAX_SEND_SIZE; 287 sizeoflast = sizeoflast ? sizeoflast : BD_MAX_SEND_SIZE; 288 289 /* when the frag size is bigger than hardware, split this frag */ 290 for (k = 0; k < frag_buf_num; k++) 291 fill_v2_desc(ring, priv, 292 (k == frag_buf_num - 1) ? 293 sizeoflast : BD_MAX_SEND_SIZE, 294 dma + BD_MAX_SEND_SIZE * k, 295 frag_end && (k == frag_buf_num - 1) ? 1 : 0, 296 buf_num, 297 (type == DESC_TYPE_SKB && !k) ? 298 DESC_TYPE_SKB : DESC_TYPE_PAGE, 299 mtu); 300 } 301 302 int hns_nic_net_xmit_hw(struct net_device *ndev, 303 struct sk_buff *skb, 304 struct hns_nic_ring_data *ring_data) 305 { 306 struct hns_nic_priv *priv = netdev_priv(ndev); 307 struct device *dev = priv->dev; 308 struct hnae_ring *ring = ring_data->ring; 309 struct netdev_queue *dev_queue; 310 struct skb_frag_struct *frag; 311 int buf_num; 312 int seg_num; 313 dma_addr_t dma; 314 int size, next_to_use; 315 int i; 316 317 switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) { 318 case -EBUSY: 319 ring->stats.tx_busy++; 320 goto out_net_tx_busy; 321 case -ENOMEM: 322 ring->stats.sw_err_cnt++; 323 netdev_err(ndev, "no memory to xmit!\n"); 324 goto out_err_tx_ok; 325 default: 326 break; 327 } 328 329 /* no. of segments (plus a header) */ 330 seg_num = skb_shinfo(skb)->nr_frags + 1; 331 next_to_use = ring->next_to_use; 332 333 /* fill the first part */ 334 size = skb_headlen(skb); 335 dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); 336 if (dma_mapping_error(dev, dma)) { 337 netdev_err(ndev, "TX head DMA map failed\n"); 338 ring->stats.sw_err_cnt++; 339 goto out_err_tx_ok; 340 } 341 priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0, 342 buf_num, DESC_TYPE_SKB, ndev->mtu); 343 344 /* fill the fragments */ 345 for (i = 1; i < seg_num; i++) { 346 frag = &skb_shinfo(skb)->frags[i - 1]; 347 size = skb_frag_size(frag); 348 dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE); 349 if (dma_mapping_error(dev, dma)) { 350 netdev_err(ndev, "TX frag(%d) DMA map failed\n", i); 351 ring->stats.sw_err_cnt++; 352 goto out_map_frag_fail; 353 } 354 priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma, 355 seg_num - 1 == i ? 1 : 0, buf_num, 356 DESC_TYPE_PAGE, ndev->mtu); 357 } 358 359 /*complete translate all packets*/ 360 dev_queue = netdev_get_tx_queue(ndev, skb->queue_mapping); 361 netdev_tx_sent_queue(dev_queue, skb->len); 362 363 wmb(); /* commit all data before submit */ 364 assert(skb->queue_mapping < priv->ae_handle->q_num); 365 hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num); 366 ring->stats.tx_pkts++; 367 ring->stats.tx_bytes += skb->len; 368 369 return NETDEV_TX_OK; 370 371 out_map_frag_fail: 372 373 while (ring->next_to_use != next_to_use) { 374 unfill_desc(ring); 375 if (ring->next_to_use != next_to_use) 376 dma_unmap_page(dev, 377 ring->desc_cb[ring->next_to_use].dma, 378 ring->desc_cb[ring->next_to_use].length, 379 DMA_TO_DEVICE); 380 else 381 dma_unmap_single(dev, 382 ring->desc_cb[next_to_use].dma, 383 ring->desc_cb[next_to_use].length, 384 DMA_TO_DEVICE); 385 } 386 387 out_err_tx_ok: 388 389 dev_kfree_skb_any(skb); 390 return NETDEV_TX_OK; 391 392 out_net_tx_busy: 393 394 netif_stop_subqueue(ndev, skb->queue_mapping); 395 396 /* Herbert's original patch had: 397 * smp_mb__after_netif_stop_queue(); 398 * but since that doesn't exist yet, just open code it. 399 */ 400 smp_mb(); 401 return NETDEV_TX_BUSY; 402 } 403 404 /** 405 * hns_nic_get_headlen - determine size of header for RSC/LRO/GRO/FCOE 406 * @data: pointer to the start of the headers 407 * @max: total length of section to find headers in 408 * 409 * This function is meant to determine the length of headers that will 410 * be recognized by hardware for LRO, GRO, and RSC offloads. The main 411 * motivation of doing this is to only perform one pull for IPv4 TCP 412 * packets so that we can do basic things like calculating the gso_size 413 * based on the average data per packet. 414 **/ 415 static unsigned int hns_nic_get_headlen(unsigned char *data, u32 flag, 416 unsigned int max_size) 417 { 418 unsigned char *network; 419 u8 hlen; 420 421 /* this should never happen, but better safe than sorry */ 422 if (max_size < ETH_HLEN) 423 return max_size; 424 425 /* initialize network frame pointer */ 426 network = data; 427 428 /* set first protocol and move network header forward */ 429 network += ETH_HLEN; 430 431 /* handle any vlan tag if present */ 432 if (hnae_get_field(flag, HNS_RXD_VLAN_M, HNS_RXD_VLAN_S) 433 == HNS_RX_FLAG_VLAN_PRESENT) { 434 if ((typeof(max_size))(network - data) > (max_size - VLAN_HLEN)) 435 return max_size; 436 437 network += VLAN_HLEN; 438 } 439 440 /* handle L3 protocols */ 441 if (hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S) 442 == HNS_RX_FLAG_L3ID_IPV4) { 443 if ((typeof(max_size))(network - data) > 444 (max_size - sizeof(struct iphdr))) 445 return max_size; 446 447 /* access ihl as a u8 to avoid unaligned access on ia64 */ 448 hlen = (network[0] & 0x0F) << 2; 449 450 /* verify hlen meets minimum size requirements */ 451 if (hlen < sizeof(struct iphdr)) 452 return network - data; 453 454 /* record next protocol if header is present */ 455 } else if (hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S) 456 == HNS_RX_FLAG_L3ID_IPV6) { 457 if ((typeof(max_size))(network - data) > 458 (max_size - sizeof(struct ipv6hdr))) 459 return max_size; 460 461 /* record next protocol */ 462 hlen = sizeof(struct ipv6hdr); 463 } else { 464 return network - data; 465 } 466 467 /* relocate pointer to start of L4 header */ 468 network += hlen; 469 470 /* finally sort out TCP/UDP */ 471 if (hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S) 472 == HNS_RX_FLAG_L4ID_TCP) { 473 if ((typeof(max_size))(network - data) > 474 (max_size - sizeof(struct tcphdr))) 475 return max_size; 476 477 /* access doff as a u8 to avoid unaligned access on ia64 */ 478 hlen = (network[12] & 0xF0) >> 2; 479 480 /* verify hlen meets minimum size requirements */ 481 if (hlen < sizeof(struct tcphdr)) 482 return network - data; 483 484 network += hlen; 485 } else if (hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S) 486 == HNS_RX_FLAG_L4ID_UDP) { 487 if ((typeof(max_size))(network - data) > 488 (max_size - sizeof(struct udphdr))) 489 return max_size; 490 491 network += sizeof(struct udphdr); 492 } 493 494 /* If everything has gone correctly network should be the 495 * data section of the packet and will be the end of the header. 496 * If not then it probably represents the end of the last recognized 497 * header. 498 */ 499 if ((typeof(max_size))(network - data) < max_size) 500 return network - data; 501 else 502 return max_size; 503 } 504 505 static void hns_nic_reuse_page(struct sk_buff *skb, int i, 506 struct hnae_ring *ring, int pull_len, 507 struct hnae_desc_cb *desc_cb) 508 { 509 struct hnae_desc *desc; 510 int truesize, size; 511 int last_offset; 512 bool twobufs; 513 514 twobufs = ((PAGE_SIZE < 8192) && hnae_buf_size(ring) == HNS_BUFFER_SIZE_2048); 515 516 desc = &ring->desc[ring->next_to_clean]; 517 size = le16_to_cpu(desc->rx.size); 518 519 if (twobufs) { 520 truesize = hnae_buf_size(ring); 521 } else { 522 truesize = ALIGN(size, L1_CACHE_BYTES); 523 last_offset = hnae_page_size(ring) - hnae_buf_size(ring); 524 } 525 526 skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len, 527 size - pull_len, truesize - pull_len); 528 529 /* avoid re-using remote pages,flag default unreuse */ 530 if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id())) 531 return; 532 533 if (twobufs) { 534 /* if we are only owner of page we can reuse it */ 535 if (likely(page_count(desc_cb->priv) == 1)) { 536 /* flip page offset to other buffer */ 537 desc_cb->page_offset ^= truesize; 538 539 desc_cb->reuse_flag = 1; 540 /* bump ref count on page before it is given*/ 541 get_page(desc_cb->priv); 542 } 543 return; 544 } 545 546 /* move offset up to the next cache line */ 547 desc_cb->page_offset += truesize; 548 549 if (desc_cb->page_offset <= last_offset) { 550 desc_cb->reuse_flag = 1; 551 /* bump ref count on page before it is given*/ 552 get_page(desc_cb->priv); 553 } 554 } 555 556 static void get_v2rx_desc_bnum(u32 bnum_flag, int *out_bnum) 557 { 558 *out_bnum = hnae_get_field(bnum_flag, 559 HNS_RXD_BUFNUM_M, HNS_RXD_BUFNUM_S) + 1; 560 } 561 562 static void get_rx_desc_bnum(u32 bnum_flag, int *out_bnum) 563 { 564 *out_bnum = hnae_get_field(bnum_flag, 565 HNS_RXD_BUFNUM_M, HNS_RXD_BUFNUM_S); 566 } 567 568 static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data, 569 struct sk_buff **out_skb, int *out_bnum) 570 { 571 struct hnae_ring *ring = ring_data->ring; 572 struct net_device *ndev = ring_data->napi.dev; 573 struct hns_nic_priv *priv = netdev_priv(ndev); 574 struct sk_buff *skb; 575 struct hnae_desc *desc; 576 struct hnae_desc_cb *desc_cb; 577 struct ethhdr *eh; 578 unsigned char *va; 579 int bnum, length, i; 580 int pull_len; 581 u32 bnum_flag; 582 583 desc = &ring->desc[ring->next_to_clean]; 584 desc_cb = &ring->desc_cb[ring->next_to_clean]; 585 586 prefetch(desc); 587 588 va = (unsigned char *)desc_cb->buf + desc_cb->page_offset; 589 590 /* prefetch first cache line of first page */ 591 prefetch(va); 592 #if L1_CACHE_BYTES < 128 593 prefetch(va + L1_CACHE_BYTES); 594 #endif 595 596 skb = *out_skb = napi_alloc_skb(&ring_data->napi, 597 HNS_RX_HEAD_SIZE); 598 if (unlikely(!skb)) { 599 netdev_err(ndev, "alloc rx skb fail\n"); 600 ring->stats.sw_err_cnt++; 601 return -ENOMEM; 602 } 603 skb_reset_mac_header(skb); 604 605 prefetchw(skb->data); 606 length = le16_to_cpu(desc->rx.pkt_len); 607 bnum_flag = le32_to_cpu(desc->rx.ipoff_bnum_pid_flag); 608 priv->ops.get_rxd_bnum(bnum_flag, &bnum); 609 *out_bnum = bnum; 610 611 if (length <= HNS_RX_HEAD_SIZE) { 612 memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long))); 613 614 /* we can reuse buffer as-is, just make sure it is local */ 615 if (likely(page_to_nid(desc_cb->priv) == numa_node_id())) 616 desc_cb->reuse_flag = 1; 617 else /* this page cannot be reused so discard it */ 618 put_page(desc_cb->priv); 619 620 ring_ptr_move_fw(ring, next_to_clean); 621 622 if (unlikely(bnum != 1)) { /* check err*/ 623 *out_bnum = 1; 624 goto out_bnum_err; 625 } 626 } else { 627 ring->stats.seg_pkt_cnt++; 628 629 pull_len = hns_nic_get_headlen(va, bnum_flag, HNS_RX_HEAD_SIZE); 630 memcpy(__skb_put(skb, pull_len), va, 631 ALIGN(pull_len, sizeof(long))); 632 633 hns_nic_reuse_page(skb, 0, ring, pull_len, desc_cb); 634 ring_ptr_move_fw(ring, next_to_clean); 635 636 if (unlikely(bnum >= (int)MAX_SKB_FRAGS)) { /* check err*/ 637 *out_bnum = 1; 638 goto out_bnum_err; 639 } 640 for (i = 1; i < bnum; i++) { 641 desc = &ring->desc[ring->next_to_clean]; 642 desc_cb = &ring->desc_cb[ring->next_to_clean]; 643 644 hns_nic_reuse_page(skb, i, ring, 0, desc_cb); 645 ring_ptr_move_fw(ring, next_to_clean); 646 } 647 } 648 649 /* check except process, free skb and jump the desc */ 650 if (unlikely((!bnum) || (bnum > ring->max_desc_num_per_pkt))) { 651 out_bnum_err: 652 *out_bnum = *out_bnum ? *out_bnum : 1; /* ntc moved,cannot 0*/ 653 netdev_err(ndev, "invalid bnum(%d,%d,%d,%d),%016llx,%016llx\n", 654 bnum, ring->max_desc_num_per_pkt, 655 length, (int)MAX_SKB_FRAGS, 656 ((u64 *)desc)[0], ((u64 *)desc)[1]); 657 ring->stats.err_bd_num++; 658 dev_kfree_skb_any(skb); 659 return -EDOM; 660 } 661 662 bnum_flag = le32_to_cpu(desc->rx.ipoff_bnum_pid_flag); 663 664 if (unlikely(!hnae_get_bit(bnum_flag, HNS_RXD_VLD_B))) { 665 netdev_err(ndev, "no valid bd,%016llx,%016llx\n", 666 ((u64 *)desc)[0], ((u64 *)desc)[1]); 667 ring->stats.non_vld_descs++; 668 dev_kfree_skb_any(skb); 669 return -EINVAL; 670 } 671 672 if (unlikely((!desc->rx.pkt_len) || 673 hnae_get_bit(bnum_flag, HNS_RXD_DROP_B))) { 674 ring->stats.err_pkt_len++; 675 dev_kfree_skb_any(skb); 676 return -EFAULT; 677 } 678 679 if (unlikely(hnae_get_bit(bnum_flag, HNS_RXD_L2E_B))) { 680 ring->stats.l2_err++; 681 dev_kfree_skb_any(skb); 682 return -EFAULT; 683 } 684 685 /* filter out multicast pkt with the same src mac as this port */ 686 eh = eth_hdr(skb); 687 if (unlikely(is_multicast_ether_addr(eh->h_dest) && 688 ether_addr_equal(ndev->dev_addr, eh->h_source))) { 689 dev_kfree_skb_any(skb); 690 return -EFAULT; 691 } 692 693 ring->stats.rx_pkts++; 694 ring->stats.rx_bytes += skb->len; 695 696 if (unlikely(hnae_get_bit(bnum_flag, HNS_RXD_L3E_B) || 697 hnae_get_bit(bnum_flag, HNS_RXD_L4E_B))) { 698 ring->stats.l3l4_csum_err++; 699 return 0; 700 } 701 702 skb->ip_summed = CHECKSUM_UNNECESSARY; 703 704 return 0; 705 } 706 707 static void 708 hns_nic_alloc_rx_buffers(struct hns_nic_ring_data *ring_data, int cleand_count) 709 { 710 int i, ret; 711 struct hnae_desc_cb res_cbs; 712 struct hnae_desc_cb *desc_cb; 713 struct hnae_ring *ring = ring_data->ring; 714 struct net_device *ndev = ring_data->napi.dev; 715 716 for (i = 0; i < cleand_count; i++) { 717 desc_cb = &ring->desc_cb[ring->next_to_use]; 718 if (desc_cb->reuse_flag) { 719 ring->stats.reuse_pg_cnt++; 720 hnae_reuse_buffer(ring, ring->next_to_use); 721 } else { 722 ret = hnae_reserve_buffer_map(ring, &res_cbs); 723 if (ret) { 724 ring->stats.sw_err_cnt++; 725 netdev_err(ndev, "hnae reserve buffer map failed.\n"); 726 break; 727 } 728 hnae_replace_buffer(ring, ring->next_to_use, &res_cbs); 729 } 730 731 ring_ptr_move_fw(ring, next_to_use); 732 } 733 734 wmb(); /* make all data has been write before submit */ 735 writel_relaxed(i, ring->io_base + RCB_REG_HEAD); 736 } 737 738 /* return error number for error or number of desc left to take 739 */ 740 static void hns_nic_rx_up_pro(struct hns_nic_ring_data *ring_data, 741 struct sk_buff *skb) 742 { 743 struct net_device *ndev = ring_data->napi.dev; 744 745 skb->protocol = eth_type_trans(skb, ndev); 746 (void)napi_gro_receive(&ring_data->napi, skb); 747 ndev->last_rx = jiffies; 748 } 749 750 static int hns_nic_rx_poll_one(struct hns_nic_ring_data *ring_data, 751 int budget, void *v) 752 { 753 struct hnae_ring *ring = ring_data->ring; 754 struct sk_buff *skb; 755 int num, bnum, ex_num; 756 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16 757 int recv_pkts, recv_bds, clean_count, err; 758 759 num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM); 760 rmb(); /* make sure num taken effect before the other data is touched */ 761 762 recv_pkts = 0, recv_bds = 0, clean_count = 0; 763 recv: 764 while (recv_pkts < budget && recv_bds < num) { 765 /* reuse or realloc buffers */ 766 if (clean_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) { 767 hns_nic_alloc_rx_buffers(ring_data, clean_count); 768 clean_count = 0; 769 } 770 771 /* poll one pkt */ 772 err = hns_nic_poll_rx_skb(ring_data, &skb, &bnum); 773 if (unlikely(!skb)) /* this fault cannot be repaired */ 774 goto out; 775 776 recv_bds += bnum; 777 clean_count += bnum; 778 if (unlikely(err)) { /* do jump the err */ 779 recv_pkts++; 780 continue; 781 } 782 783 /* do update ip stack process*/ 784 ((void (*)(struct hns_nic_ring_data *, struct sk_buff *))v)( 785 ring_data, skb); 786 recv_pkts++; 787 } 788 789 /* make all data has been write before submit */ 790 if (recv_pkts < budget) { 791 ex_num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM); 792 793 if (ex_num > clean_count) { 794 num += ex_num - clean_count; 795 rmb(); /*complete read rx ring bd number*/ 796 goto recv; 797 } 798 } 799 800 out: 801 /* make all data has been write before submit */ 802 if (clean_count > 0) 803 hns_nic_alloc_rx_buffers(ring_data, clean_count); 804 805 return recv_pkts; 806 } 807 808 static void hns_nic_rx_fini_pro(struct hns_nic_ring_data *ring_data) 809 { 810 struct hnae_ring *ring = ring_data->ring; 811 int num = 0; 812 813 /* for hardware bug fixed */ 814 num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM); 815 816 if (num > 0) { 817 ring_data->ring->q->handle->dev->ops->toggle_ring_irq( 818 ring_data->ring, 1); 819 820 napi_schedule(&ring_data->napi); 821 } 822 } 823 824 static inline void hns_nic_reclaim_one_desc(struct hnae_ring *ring, 825 int *bytes, int *pkts) 826 { 827 struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean]; 828 829 (*pkts) += (desc_cb->type == DESC_TYPE_SKB); 830 (*bytes) += desc_cb->length; 831 /* desc_cb will be cleaned, after hnae_free_buffer_detach*/ 832 hnae_free_buffer_detach(ring, ring->next_to_clean); 833 834 ring_ptr_move_fw(ring, next_to_clean); 835 } 836 837 static int is_valid_clean_head(struct hnae_ring *ring, int h) 838 { 839 int u = ring->next_to_use; 840 int c = ring->next_to_clean; 841 842 if (unlikely(h > ring->desc_num)) 843 return 0; 844 845 assert(u > 0 && u < ring->desc_num); 846 assert(c > 0 && c < ring->desc_num); 847 assert(u != c && h != c); /* must be checked before call this func */ 848 849 return u > c ? (h > c && h <= u) : (h > c || h <= u); 850 } 851 852 /* netif_tx_lock will turn down the performance, set only when necessary */ 853 #ifdef CONFIG_NET_POLL_CONTROLLER 854 #define NETIF_TX_LOCK(ndev) netif_tx_lock(ndev) 855 #define NETIF_TX_UNLOCK(ndev) netif_tx_unlock(ndev) 856 #else 857 #define NETIF_TX_LOCK(ndev) 858 #define NETIF_TX_UNLOCK(ndev) 859 #endif 860 /* reclaim all desc in one budget 861 * return error or number of desc left 862 */ 863 static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data, 864 int budget, void *v) 865 { 866 struct hnae_ring *ring = ring_data->ring; 867 struct net_device *ndev = ring_data->napi.dev; 868 struct netdev_queue *dev_queue; 869 struct hns_nic_priv *priv = netdev_priv(ndev); 870 int head; 871 int bytes, pkts; 872 873 NETIF_TX_LOCK(ndev); 874 875 head = readl_relaxed(ring->io_base + RCB_REG_HEAD); 876 rmb(); /* make sure head is ready before touch any data */ 877 878 if (is_ring_empty(ring) || head == ring->next_to_clean) { 879 NETIF_TX_UNLOCK(ndev); 880 return 0; /* no data to poll */ 881 } 882 883 if (!is_valid_clean_head(ring, head)) { 884 netdev_err(ndev, "wrong head (%d, %d-%d)\n", head, 885 ring->next_to_use, ring->next_to_clean); 886 ring->stats.io_err_cnt++; 887 NETIF_TX_UNLOCK(ndev); 888 return -EIO; 889 } 890 891 bytes = 0; 892 pkts = 0; 893 while (head != ring->next_to_clean) { 894 hns_nic_reclaim_one_desc(ring, &bytes, &pkts); 895 /* issue prefetch for next Tx descriptor */ 896 prefetch(&ring->desc_cb[ring->next_to_clean]); 897 } 898 899 NETIF_TX_UNLOCK(ndev); 900 901 dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index); 902 netdev_tx_completed_queue(dev_queue, pkts, bytes); 903 904 if (unlikely(priv->link && !netif_carrier_ok(ndev))) 905 netif_carrier_on(ndev); 906 907 if (unlikely(pkts && netif_carrier_ok(ndev) && 908 (ring_space(ring) >= ring->max_desc_num_per_pkt * 2))) { 909 /* Make sure that anybody stopping the queue after this 910 * sees the new next_to_clean. 911 */ 912 smp_mb(); 913 if (netif_tx_queue_stopped(dev_queue) && 914 !test_bit(NIC_STATE_DOWN, &priv->state)) { 915 netif_tx_wake_queue(dev_queue); 916 ring->stats.restart_queue++; 917 } 918 } 919 return 0; 920 } 921 922 static void hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data) 923 { 924 struct hnae_ring *ring = ring_data->ring; 925 int head = readl_relaxed(ring->io_base + RCB_REG_HEAD); 926 927 if (head != ring->next_to_clean) { 928 ring_data->ring->q->handle->dev->ops->toggle_ring_irq( 929 ring_data->ring, 1); 930 931 napi_schedule(&ring_data->napi); 932 } 933 } 934 935 static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data) 936 { 937 struct hnae_ring *ring = ring_data->ring; 938 struct net_device *ndev = ring_data->napi.dev; 939 struct netdev_queue *dev_queue; 940 int head; 941 int bytes, pkts; 942 943 NETIF_TX_LOCK(ndev); 944 945 head = ring->next_to_use; /* ntu :soft setted ring position*/ 946 bytes = 0; 947 pkts = 0; 948 while (head != ring->next_to_clean) 949 hns_nic_reclaim_one_desc(ring, &bytes, &pkts); 950 951 NETIF_TX_UNLOCK(ndev); 952 953 dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index); 954 netdev_tx_reset_queue(dev_queue); 955 } 956 957 static int hns_nic_common_poll(struct napi_struct *napi, int budget) 958 { 959 struct hns_nic_ring_data *ring_data = 960 container_of(napi, struct hns_nic_ring_data, napi); 961 int clean_complete = ring_data->poll_one( 962 ring_data, budget, ring_data->ex_process); 963 964 if (clean_complete >= 0 && clean_complete < budget) { 965 napi_complete(napi); 966 ring_data->ring->q->handle->dev->ops->toggle_ring_irq( 967 ring_data->ring, 0); 968 if (ring_data->fini_process) 969 ring_data->fini_process(ring_data); 970 return 0; 971 } 972 973 return clean_complete; 974 } 975 976 static irqreturn_t hns_irq_handle(int irq, void *dev) 977 { 978 struct hns_nic_ring_data *ring_data = (struct hns_nic_ring_data *)dev; 979 980 ring_data->ring->q->handle->dev->ops->toggle_ring_irq( 981 ring_data->ring, 1); 982 napi_schedule(&ring_data->napi); 983 984 return IRQ_HANDLED; 985 } 986 987 /** 988 *hns_nic_adjust_link - adjust net work mode by the phy stat or new param 989 *@ndev: net device 990 */ 991 static void hns_nic_adjust_link(struct net_device *ndev) 992 { 993 struct hns_nic_priv *priv = netdev_priv(ndev); 994 struct hnae_handle *h = priv->ae_handle; 995 int state = 1; 996 997 if (priv->phy) { 998 h->dev->ops->adjust_link(h, ndev->phydev->speed, 999 ndev->phydev->duplex); 1000 state = priv->phy->link; 1001 } 1002 state = state && h->dev->ops->get_status(h); 1003 1004 if (state != priv->link) { 1005 if (state) { 1006 netif_carrier_on(ndev); 1007 netif_tx_wake_all_queues(ndev); 1008 netdev_info(ndev, "link up\n"); 1009 } else { 1010 netif_carrier_off(ndev); 1011 netdev_info(ndev, "link down\n"); 1012 } 1013 priv->link = state; 1014 } 1015 } 1016 1017 /** 1018 *hns_nic_init_phy - init phy 1019 *@ndev: net device 1020 *@h: ae handle 1021 * Return 0 on success, negative on failure 1022 */ 1023 int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h) 1024 { 1025 struct hns_nic_priv *priv = netdev_priv(ndev); 1026 struct phy_device *phy_dev = h->phy_dev; 1027 int ret; 1028 1029 if (!h->phy_dev) 1030 return 0; 1031 1032 if (h->phy_if != PHY_INTERFACE_MODE_XGMII) { 1033 phy_dev->dev_flags = 0; 1034 1035 ret = phy_connect_direct(ndev, phy_dev, hns_nic_adjust_link, 1036 h->phy_if); 1037 } else { 1038 ret = phy_attach_direct(ndev, phy_dev, 0, h->phy_if); 1039 } 1040 if (unlikely(ret)) 1041 return -ENODEV; 1042 1043 phy_dev->supported &= h->if_support; 1044 phy_dev->advertising = phy_dev->supported; 1045 1046 if (h->phy_if == PHY_INTERFACE_MODE_XGMII) 1047 phy_dev->autoneg = false; 1048 1049 priv->phy = phy_dev; 1050 1051 return 0; 1052 } 1053 1054 static int hns_nic_ring_open(struct net_device *netdev, int idx) 1055 { 1056 struct hns_nic_priv *priv = netdev_priv(netdev); 1057 struct hnae_handle *h = priv->ae_handle; 1058 1059 napi_enable(&priv->ring_data[idx].napi); 1060 1061 enable_irq(priv->ring_data[idx].ring->irq); 1062 h->dev->ops->toggle_ring_irq(priv->ring_data[idx].ring, 0); 1063 1064 return 0; 1065 } 1066 1067 static int hns_nic_net_set_mac_address(struct net_device *ndev, void *p) 1068 { 1069 struct hns_nic_priv *priv = netdev_priv(ndev); 1070 struct hnae_handle *h = priv->ae_handle; 1071 struct sockaddr *mac_addr = p; 1072 int ret; 1073 1074 if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data)) 1075 return -EADDRNOTAVAIL; 1076 1077 ret = h->dev->ops->set_mac_addr(h, mac_addr->sa_data); 1078 if (ret) { 1079 netdev_err(ndev, "set_mac_address fail, ret=%d!\n", ret); 1080 return ret; 1081 } 1082 1083 memcpy(ndev->dev_addr, mac_addr->sa_data, ndev->addr_len); 1084 1085 return 0; 1086 } 1087 1088 void hns_nic_update_stats(struct net_device *netdev) 1089 { 1090 struct hns_nic_priv *priv = netdev_priv(netdev); 1091 struct hnae_handle *h = priv->ae_handle; 1092 1093 h->dev->ops->update_stats(h, &netdev->stats); 1094 } 1095 1096 /* set mac addr if it is configed. or leave it to the AE driver */ 1097 static void hns_init_mac_addr(struct net_device *ndev) 1098 { 1099 struct hns_nic_priv *priv = netdev_priv(ndev); 1100 1101 if (!device_get_mac_address(priv->dev, ndev->dev_addr, ETH_ALEN)) { 1102 eth_hw_addr_random(ndev); 1103 dev_warn(priv->dev, "No valid mac, use random mac %pM", 1104 ndev->dev_addr); 1105 } 1106 } 1107 1108 static void hns_nic_ring_close(struct net_device *netdev, int idx) 1109 { 1110 struct hns_nic_priv *priv = netdev_priv(netdev); 1111 struct hnae_handle *h = priv->ae_handle; 1112 1113 h->dev->ops->toggle_ring_irq(priv->ring_data[idx].ring, 1); 1114 disable_irq(priv->ring_data[idx].ring->irq); 1115 1116 napi_disable(&priv->ring_data[idx].napi); 1117 } 1118 1119 static void hns_set_irq_affinity(struct hns_nic_priv *priv) 1120 { 1121 struct hnae_handle *h = priv->ae_handle; 1122 struct hns_nic_ring_data *rd; 1123 int i; 1124 int cpu; 1125 cpumask_t mask; 1126 1127 /*diffrent irq banlance for 16core and 32core*/ 1128 if (h->q_num == num_possible_cpus()) { 1129 for (i = 0; i < h->q_num * 2; i++) { 1130 rd = &priv->ring_data[i]; 1131 if (cpu_online(rd->queue_index)) { 1132 cpumask_clear(&mask); 1133 cpu = rd->queue_index; 1134 cpumask_set_cpu(cpu, &mask); 1135 (void)irq_set_affinity_hint(rd->ring->irq, 1136 &mask); 1137 } 1138 } 1139 } else { 1140 for (i = 0; i < h->q_num; i++) { 1141 rd = &priv->ring_data[i]; 1142 if (cpu_online(rd->queue_index * 2)) { 1143 cpumask_clear(&mask); 1144 cpu = rd->queue_index * 2; 1145 cpumask_set_cpu(cpu, &mask); 1146 (void)irq_set_affinity_hint(rd->ring->irq, 1147 &mask); 1148 } 1149 } 1150 1151 for (i = h->q_num; i < h->q_num * 2; i++) { 1152 rd = &priv->ring_data[i]; 1153 if (cpu_online(rd->queue_index * 2 + 1)) { 1154 cpumask_clear(&mask); 1155 cpu = rd->queue_index * 2 + 1; 1156 cpumask_set_cpu(cpu, &mask); 1157 (void)irq_set_affinity_hint(rd->ring->irq, 1158 &mask); 1159 } 1160 } 1161 } 1162 } 1163 1164 static int hns_nic_init_irq(struct hns_nic_priv *priv) 1165 { 1166 struct hnae_handle *h = priv->ae_handle; 1167 struct hns_nic_ring_data *rd; 1168 int i; 1169 int ret; 1170 1171 for (i = 0; i < h->q_num * 2; i++) { 1172 rd = &priv->ring_data[i]; 1173 1174 if (rd->ring->irq_init_flag == RCB_IRQ_INITED) 1175 break; 1176 1177 snprintf(rd->ring->ring_name, RCB_RING_NAME_LEN, 1178 "%s-%s%d", priv->netdev->name, 1179 (i < h->q_num ? "tx" : "rx"), rd->queue_index); 1180 1181 rd->ring->ring_name[RCB_RING_NAME_LEN - 1] = '\0'; 1182 1183 ret = request_irq(rd->ring->irq, 1184 hns_irq_handle, 0, rd->ring->ring_name, rd); 1185 if (ret) { 1186 netdev_err(priv->netdev, "request irq(%d) fail\n", 1187 rd->ring->irq); 1188 return ret; 1189 } 1190 disable_irq(rd->ring->irq); 1191 rd->ring->irq_init_flag = RCB_IRQ_INITED; 1192 } 1193 1194 /*set cpu affinity*/ 1195 hns_set_irq_affinity(priv); 1196 1197 return 0; 1198 } 1199 1200 static int hns_nic_net_up(struct net_device *ndev) 1201 { 1202 struct hns_nic_priv *priv = netdev_priv(ndev); 1203 struct hnae_handle *h = priv->ae_handle; 1204 int i, j; 1205 int ret; 1206 1207 ret = hns_nic_init_irq(priv); 1208 if (ret != 0) { 1209 netdev_err(ndev, "hns init irq failed! ret=%d\n", ret); 1210 return ret; 1211 } 1212 1213 for (i = 0; i < h->q_num * 2; i++) { 1214 ret = hns_nic_ring_open(ndev, i); 1215 if (ret) 1216 goto out_has_some_queues; 1217 } 1218 1219 ret = h->dev->ops->set_mac_addr(h, ndev->dev_addr); 1220 if (ret) 1221 goto out_set_mac_addr_err; 1222 1223 ret = h->dev->ops->start ? h->dev->ops->start(h) : 0; 1224 if (ret) 1225 goto out_start_err; 1226 1227 if (priv->phy) 1228 phy_start(priv->phy); 1229 1230 clear_bit(NIC_STATE_DOWN, &priv->state); 1231 (void)mod_timer(&priv->service_timer, jiffies + SERVICE_TIMER_HZ); 1232 1233 return 0; 1234 1235 out_start_err: 1236 netif_stop_queue(ndev); 1237 out_set_mac_addr_err: 1238 out_has_some_queues: 1239 for (j = i - 1; j >= 0; j--) 1240 hns_nic_ring_close(ndev, j); 1241 1242 set_bit(NIC_STATE_DOWN, &priv->state); 1243 1244 return ret; 1245 } 1246 1247 static void hns_nic_net_down(struct net_device *ndev) 1248 { 1249 int i; 1250 struct hnae_ae_ops *ops; 1251 struct hns_nic_priv *priv = netdev_priv(ndev); 1252 1253 if (test_and_set_bit(NIC_STATE_DOWN, &priv->state)) 1254 return; 1255 1256 (void)del_timer_sync(&priv->service_timer); 1257 netif_tx_stop_all_queues(ndev); 1258 netif_carrier_off(ndev); 1259 netif_tx_disable(ndev); 1260 priv->link = 0; 1261 1262 if (priv->phy) 1263 phy_stop(priv->phy); 1264 1265 ops = priv->ae_handle->dev->ops; 1266 1267 if (ops->stop) 1268 ops->stop(priv->ae_handle); 1269 1270 netif_tx_stop_all_queues(ndev); 1271 1272 for (i = priv->ae_handle->q_num - 1; i >= 0; i--) { 1273 hns_nic_ring_close(ndev, i); 1274 hns_nic_ring_close(ndev, i + priv->ae_handle->q_num); 1275 1276 /* clean tx buffers*/ 1277 hns_nic_tx_clr_all_bufs(priv->ring_data + i); 1278 } 1279 } 1280 1281 void hns_nic_net_reset(struct net_device *ndev) 1282 { 1283 struct hns_nic_priv *priv = netdev_priv(ndev); 1284 struct hnae_handle *handle = priv->ae_handle; 1285 1286 while (test_and_set_bit(NIC_STATE_RESETTING, &priv->state)) 1287 usleep_range(1000, 2000); 1288 1289 (void)hnae_reinit_handle(handle); 1290 1291 clear_bit(NIC_STATE_RESETTING, &priv->state); 1292 } 1293 1294 void hns_nic_net_reinit(struct net_device *netdev) 1295 { 1296 struct hns_nic_priv *priv = netdev_priv(netdev); 1297 1298 netif_trans_update(priv->netdev); 1299 while (test_and_set_bit(NIC_STATE_REINITING, &priv->state)) 1300 usleep_range(1000, 2000); 1301 1302 hns_nic_net_down(netdev); 1303 hns_nic_net_reset(netdev); 1304 (void)hns_nic_net_up(netdev); 1305 clear_bit(NIC_STATE_REINITING, &priv->state); 1306 } 1307 1308 static int hns_nic_net_open(struct net_device *ndev) 1309 { 1310 struct hns_nic_priv *priv = netdev_priv(ndev); 1311 struct hnae_handle *h = priv->ae_handle; 1312 int ret; 1313 1314 if (test_bit(NIC_STATE_TESTING, &priv->state)) 1315 return -EBUSY; 1316 1317 priv->link = 0; 1318 netif_carrier_off(ndev); 1319 1320 ret = netif_set_real_num_tx_queues(ndev, h->q_num); 1321 if (ret < 0) { 1322 netdev_err(ndev, "netif_set_real_num_tx_queues fail, ret=%d!\n", 1323 ret); 1324 return ret; 1325 } 1326 1327 ret = netif_set_real_num_rx_queues(ndev, h->q_num); 1328 if (ret < 0) { 1329 netdev_err(ndev, 1330 "netif_set_real_num_rx_queues fail, ret=%d!\n", ret); 1331 return ret; 1332 } 1333 1334 ret = hns_nic_net_up(ndev); 1335 if (ret) { 1336 netdev_err(ndev, 1337 "hns net up fail, ret=%d!\n", ret); 1338 return ret; 1339 } 1340 1341 return 0; 1342 } 1343 1344 static int hns_nic_net_stop(struct net_device *ndev) 1345 { 1346 hns_nic_net_down(ndev); 1347 1348 return 0; 1349 } 1350 1351 static void hns_tx_timeout_reset(struct hns_nic_priv *priv); 1352 static void hns_nic_net_timeout(struct net_device *ndev) 1353 { 1354 struct hns_nic_priv *priv = netdev_priv(ndev); 1355 1356 hns_tx_timeout_reset(priv); 1357 } 1358 1359 static int hns_nic_do_ioctl(struct net_device *netdev, struct ifreq *ifr, 1360 int cmd) 1361 { 1362 struct hns_nic_priv *priv = netdev_priv(netdev); 1363 struct phy_device *phy_dev = priv->phy; 1364 1365 if (!netif_running(netdev)) 1366 return -EINVAL; 1367 1368 if (!phy_dev) 1369 return -ENOTSUPP; 1370 1371 return phy_mii_ioctl(phy_dev, ifr, cmd); 1372 } 1373 1374 /* use only for netconsole to poll with the device without interrupt */ 1375 #ifdef CONFIG_NET_POLL_CONTROLLER 1376 void hns_nic_poll_controller(struct net_device *ndev) 1377 { 1378 struct hns_nic_priv *priv = netdev_priv(ndev); 1379 unsigned long flags; 1380 int i; 1381 1382 local_irq_save(flags); 1383 for (i = 0; i < priv->ae_handle->q_num * 2; i++) 1384 napi_schedule(&priv->ring_data[i].napi); 1385 local_irq_restore(flags); 1386 } 1387 #endif 1388 1389 static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb, 1390 struct net_device *ndev) 1391 { 1392 struct hns_nic_priv *priv = netdev_priv(ndev); 1393 int ret; 1394 1395 assert(skb->queue_mapping < ndev->ae_handle->q_num); 1396 ret = hns_nic_net_xmit_hw(ndev, skb, 1397 &tx_ring_data(priv, skb->queue_mapping)); 1398 if (ret == NETDEV_TX_OK) { 1399 netif_trans_update(ndev); 1400 ndev->stats.tx_bytes += skb->len; 1401 ndev->stats.tx_packets++; 1402 } 1403 return (netdev_tx_t)ret; 1404 } 1405 1406 static int hns_nic_change_mtu(struct net_device *ndev, int new_mtu) 1407 { 1408 struct hns_nic_priv *priv = netdev_priv(ndev); 1409 struct hnae_handle *h = priv->ae_handle; 1410 int ret; 1411 1412 /* MTU < 68 is an error and causes problems on some kernels */ 1413 if (new_mtu < 68) 1414 return -EINVAL; 1415 1416 if (!h->dev->ops->set_mtu) 1417 return -ENOTSUPP; 1418 1419 if (netif_running(ndev)) { 1420 (void)hns_nic_net_stop(ndev); 1421 msleep(100); 1422 1423 ret = h->dev->ops->set_mtu(h, new_mtu); 1424 if (ret) 1425 netdev_err(ndev, "set mtu fail, return value %d\n", 1426 ret); 1427 1428 if (hns_nic_net_open(ndev)) 1429 netdev_err(ndev, "hns net open fail\n"); 1430 } else { 1431 ret = h->dev->ops->set_mtu(h, new_mtu); 1432 } 1433 1434 if (!ret) 1435 ndev->mtu = new_mtu; 1436 1437 return ret; 1438 } 1439 1440 static int hns_nic_set_features(struct net_device *netdev, 1441 netdev_features_t features) 1442 { 1443 struct hns_nic_priv *priv = netdev_priv(netdev); 1444 1445 switch (priv->enet_ver) { 1446 case AE_VERSION_1: 1447 if (features & (NETIF_F_TSO | NETIF_F_TSO6)) 1448 netdev_info(netdev, "enet v1 do not support tso!\n"); 1449 break; 1450 default: 1451 if (features & (NETIF_F_TSO | NETIF_F_TSO6)) { 1452 priv->ops.fill_desc = fill_tso_desc; 1453 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso; 1454 /* The chip only support 7*4096 */ 1455 netif_set_gso_max_size(netdev, 7 * 4096); 1456 } else { 1457 priv->ops.fill_desc = fill_v2_desc; 1458 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx; 1459 } 1460 break; 1461 } 1462 netdev->features = features; 1463 return 0; 1464 } 1465 1466 static netdev_features_t hns_nic_fix_features( 1467 struct net_device *netdev, netdev_features_t features) 1468 { 1469 struct hns_nic_priv *priv = netdev_priv(netdev); 1470 1471 switch (priv->enet_ver) { 1472 case AE_VERSION_1: 1473 features &= ~(NETIF_F_TSO | NETIF_F_TSO6 | 1474 NETIF_F_HW_VLAN_CTAG_FILTER); 1475 break; 1476 default: 1477 break; 1478 } 1479 return features; 1480 } 1481 1482 /** 1483 * nic_set_multicast_list - set mutl mac address 1484 * @netdev: net device 1485 * @p: mac address 1486 * 1487 * return void 1488 */ 1489 void hns_set_multicast_list(struct net_device *ndev) 1490 { 1491 struct hns_nic_priv *priv = netdev_priv(ndev); 1492 struct hnae_handle *h = priv->ae_handle; 1493 struct netdev_hw_addr *ha = NULL; 1494 1495 if (!h) { 1496 netdev_err(ndev, "hnae handle is null\n"); 1497 return; 1498 } 1499 1500 if (h->dev->ops->set_mc_addr) { 1501 netdev_for_each_mc_addr(ha, ndev) 1502 if (h->dev->ops->set_mc_addr(h, ha->addr)) 1503 netdev_err(ndev, "set multicast fail\n"); 1504 } 1505 } 1506 1507 void hns_nic_set_rx_mode(struct net_device *ndev) 1508 { 1509 struct hns_nic_priv *priv = netdev_priv(ndev); 1510 struct hnae_handle *h = priv->ae_handle; 1511 1512 if (h->dev->ops->set_promisc_mode) { 1513 if (ndev->flags & IFF_PROMISC) 1514 h->dev->ops->set_promisc_mode(h, 1); 1515 else 1516 h->dev->ops->set_promisc_mode(h, 0); 1517 } 1518 1519 hns_set_multicast_list(ndev); 1520 } 1521 1522 struct rtnl_link_stats64 *hns_nic_get_stats64(struct net_device *ndev, 1523 struct rtnl_link_stats64 *stats) 1524 { 1525 int idx = 0; 1526 u64 tx_bytes = 0; 1527 u64 rx_bytes = 0; 1528 u64 tx_pkts = 0; 1529 u64 rx_pkts = 0; 1530 struct hns_nic_priv *priv = netdev_priv(ndev); 1531 struct hnae_handle *h = priv->ae_handle; 1532 1533 for (idx = 0; idx < h->q_num; idx++) { 1534 tx_bytes += h->qs[idx]->tx_ring.stats.tx_bytes; 1535 tx_pkts += h->qs[idx]->tx_ring.stats.tx_pkts; 1536 rx_bytes += h->qs[idx]->rx_ring.stats.rx_bytes; 1537 rx_pkts += h->qs[idx]->rx_ring.stats.rx_pkts; 1538 } 1539 1540 stats->tx_bytes = tx_bytes; 1541 stats->tx_packets = tx_pkts; 1542 stats->rx_bytes = rx_bytes; 1543 stats->rx_packets = rx_pkts; 1544 1545 stats->rx_errors = ndev->stats.rx_errors; 1546 stats->multicast = ndev->stats.multicast; 1547 stats->rx_length_errors = ndev->stats.rx_length_errors; 1548 stats->rx_crc_errors = ndev->stats.rx_crc_errors; 1549 stats->rx_missed_errors = ndev->stats.rx_missed_errors; 1550 1551 stats->tx_errors = ndev->stats.tx_errors; 1552 stats->rx_dropped = ndev->stats.rx_dropped; 1553 stats->tx_dropped = ndev->stats.tx_dropped; 1554 stats->collisions = ndev->stats.collisions; 1555 stats->rx_over_errors = ndev->stats.rx_over_errors; 1556 stats->rx_frame_errors = ndev->stats.rx_frame_errors; 1557 stats->rx_fifo_errors = ndev->stats.rx_fifo_errors; 1558 stats->tx_aborted_errors = ndev->stats.tx_aborted_errors; 1559 stats->tx_carrier_errors = ndev->stats.tx_carrier_errors; 1560 stats->tx_fifo_errors = ndev->stats.tx_fifo_errors; 1561 stats->tx_heartbeat_errors = ndev->stats.tx_heartbeat_errors; 1562 stats->tx_window_errors = ndev->stats.tx_window_errors; 1563 stats->rx_compressed = ndev->stats.rx_compressed; 1564 stats->tx_compressed = ndev->stats.tx_compressed; 1565 1566 return stats; 1567 } 1568 1569 static const struct net_device_ops hns_nic_netdev_ops = { 1570 .ndo_open = hns_nic_net_open, 1571 .ndo_stop = hns_nic_net_stop, 1572 .ndo_start_xmit = hns_nic_net_xmit, 1573 .ndo_tx_timeout = hns_nic_net_timeout, 1574 .ndo_set_mac_address = hns_nic_net_set_mac_address, 1575 .ndo_change_mtu = hns_nic_change_mtu, 1576 .ndo_do_ioctl = hns_nic_do_ioctl, 1577 .ndo_set_features = hns_nic_set_features, 1578 .ndo_fix_features = hns_nic_fix_features, 1579 .ndo_get_stats64 = hns_nic_get_stats64, 1580 #ifdef CONFIG_NET_POLL_CONTROLLER 1581 .ndo_poll_controller = hns_nic_poll_controller, 1582 #endif 1583 .ndo_set_rx_mode = hns_nic_set_rx_mode, 1584 }; 1585 1586 static void hns_nic_update_link_status(struct net_device *netdev) 1587 { 1588 struct hns_nic_priv *priv = netdev_priv(netdev); 1589 1590 struct hnae_handle *h = priv->ae_handle; 1591 1592 if (h->phy_dev) { 1593 if (h->phy_if != PHY_INTERFACE_MODE_XGMII) 1594 return; 1595 1596 (void)genphy_read_status(h->phy_dev); 1597 } 1598 hns_nic_adjust_link(netdev); 1599 } 1600 1601 /* for dumping key regs*/ 1602 static void hns_nic_dump(struct hns_nic_priv *priv) 1603 { 1604 struct hnae_handle *h = priv->ae_handle; 1605 struct hnae_ae_ops *ops = h->dev->ops; 1606 u32 *data, reg_num, i; 1607 1608 if (ops->get_regs_len && ops->get_regs) { 1609 reg_num = ops->get_regs_len(priv->ae_handle); 1610 reg_num = (reg_num + 3ul) & ~3ul; 1611 data = kcalloc(reg_num, sizeof(u32), GFP_KERNEL); 1612 if (data) { 1613 ops->get_regs(priv->ae_handle, data); 1614 for (i = 0; i < reg_num; i += 4) 1615 pr_info("0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n", 1616 i, data[i], data[i + 1], 1617 data[i + 2], data[i + 3]); 1618 kfree(data); 1619 } 1620 } 1621 1622 for (i = 0; i < h->q_num; i++) { 1623 pr_info("tx_queue%d_next_to_clean:%d\n", 1624 i, h->qs[i]->tx_ring.next_to_clean); 1625 pr_info("tx_queue%d_next_to_use:%d\n", 1626 i, h->qs[i]->tx_ring.next_to_use); 1627 pr_info("rx_queue%d_next_to_clean:%d\n", 1628 i, h->qs[i]->rx_ring.next_to_clean); 1629 pr_info("rx_queue%d_next_to_use:%d\n", 1630 i, h->qs[i]->rx_ring.next_to_use); 1631 } 1632 } 1633 1634 /* for resetting subtask */ 1635 static void hns_nic_reset_subtask(struct hns_nic_priv *priv) 1636 { 1637 enum hnae_port_type type = priv->ae_handle->port_type; 1638 1639 if (!test_bit(NIC_STATE2_RESET_REQUESTED, &priv->state)) 1640 return; 1641 clear_bit(NIC_STATE2_RESET_REQUESTED, &priv->state); 1642 1643 /* If we're already down, removing or resetting, just bail */ 1644 if (test_bit(NIC_STATE_DOWN, &priv->state) || 1645 test_bit(NIC_STATE_REMOVING, &priv->state) || 1646 test_bit(NIC_STATE_RESETTING, &priv->state)) 1647 return; 1648 1649 hns_nic_dump(priv); 1650 netdev_info(priv->netdev, "try to reset %s port!\n", 1651 (type == HNAE_PORT_DEBUG ? "debug" : "service")); 1652 1653 rtnl_lock(); 1654 /* put off any impending NetWatchDogTimeout */ 1655 netif_trans_update(priv->netdev); 1656 1657 if (type == HNAE_PORT_DEBUG) { 1658 hns_nic_net_reinit(priv->netdev); 1659 } else { 1660 netif_carrier_off(priv->netdev); 1661 netif_tx_disable(priv->netdev); 1662 } 1663 rtnl_unlock(); 1664 } 1665 1666 /* for doing service complete*/ 1667 static void hns_nic_service_event_complete(struct hns_nic_priv *priv) 1668 { 1669 WARN_ON(!test_bit(NIC_STATE_SERVICE_SCHED, &priv->state)); 1670 1671 smp_mb__before_atomic(); 1672 clear_bit(NIC_STATE_SERVICE_SCHED, &priv->state); 1673 } 1674 1675 static void hns_nic_service_task(struct work_struct *work) 1676 { 1677 struct hns_nic_priv *priv 1678 = container_of(work, struct hns_nic_priv, service_task); 1679 struct hnae_handle *h = priv->ae_handle; 1680 1681 hns_nic_update_link_status(priv->netdev); 1682 h->dev->ops->update_led_status(h); 1683 hns_nic_update_stats(priv->netdev); 1684 1685 hns_nic_reset_subtask(priv); 1686 hns_nic_service_event_complete(priv); 1687 } 1688 1689 static void hns_nic_task_schedule(struct hns_nic_priv *priv) 1690 { 1691 if (!test_bit(NIC_STATE_DOWN, &priv->state) && 1692 !test_bit(NIC_STATE_REMOVING, &priv->state) && 1693 !test_and_set_bit(NIC_STATE_SERVICE_SCHED, &priv->state)) 1694 (void)schedule_work(&priv->service_task); 1695 } 1696 1697 static void hns_nic_service_timer(unsigned long data) 1698 { 1699 struct hns_nic_priv *priv = (struct hns_nic_priv *)data; 1700 1701 (void)mod_timer(&priv->service_timer, jiffies + SERVICE_TIMER_HZ); 1702 1703 hns_nic_task_schedule(priv); 1704 } 1705 1706 /** 1707 * hns_tx_timeout_reset - initiate reset due to Tx timeout 1708 * @priv: driver private struct 1709 **/ 1710 static void hns_tx_timeout_reset(struct hns_nic_priv *priv) 1711 { 1712 /* Do the reset outside of interrupt context */ 1713 if (!test_bit(NIC_STATE_DOWN, &priv->state)) { 1714 set_bit(NIC_STATE2_RESET_REQUESTED, &priv->state); 1715 netdev_warn(priv->netdev, 1716 "initiating reset due to tx timeout(%llu,0x%lx)\n", 1717 priv->tx_timeout_count, priv->state); 1718 priv->tx_timeout_count++; 1719 hns_nic_task_schedule(priv); 1720 } 1721 } 1722 1723 static int hns_nic_init_ring_data(struct hns_nic_priv *priv) 1724 { 1725 struct hnae_handle *h = priv->ae_handle; 1726 struct hns_nic_ring_data *rd; 1727 bool is_ver1 = AE_IS_VER1(priv->enet_ver); 1728 int i; 1729 1730 if (h->q_num > NIC_MAX_Q_PER_VF) { 1731 netdev_err(priv->netdev, "too much queue (%d)\n", h->q_num); 1732 return -EINVAL; 1733 } 1734 1735 priv->ring_data = kzalloc(h->q_num * sizeof(*priv->ring_data) * 2, 1736 GFP_KERNEL); 1737 if (!priv->ring_data) 1738 return -ENOMEM; 1739 1740 for (i = 0; i < h->q_num; i++) { 1741 rd = &priv->ring_data[i]; 1742 rd->queue_index = i; 1743 rd->ring = &h->qs[i]->tx_ring; 1744 rd->poll_one = hns_nic_tx_poll_one; 1745 rd->fini_process = is_ver1 ? hns_nic_tx_fini_pro : NULL; 1746 1747 netif_napi_add(priv->netdev, &rd->napi, 1748 hns_nic_common_poll, NIC_TX_CLEAN_MAX_NUM); 1749 rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED; 1750 } 1751 for (i = h->q_num; i < h->q_num * 2; i++) { 1752 rd = &priv->ring_data[i]; 1753 rd->queue_index = i - h->q_num; 1754 rd->ring = &h->qs[i - h->q_num]->rx_ring; 1755 rd->poll_one = hns_nic_rx_poll_one; 1756 rd->ex_process = hns_nic_rx_up_pro; 1757 rd->fini_process = is_ver1 ? hns_nic_rx_fini_pro : NULL; 1758 1759 netif_napi_add(priv->netdev, &rd->napi, 1760 hns_nic_common_poll, NIC_RX_CLEAN_MAX_NUM); 1761 rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED; 1762 } 1763 1764 return 0; 1765 } 1766 1767 static void hns_nic_uninit_ring_data(struct hns_nic_priv *priv) 1768 { 1769 struct hnae_handle *h = priv->ae_handle; 1770 int i; 1771 1772 for (i = 0; i < h->q_num * 2; i++) { 1773 netif_napi_del(&priv->ring_data[i].napi); 1774 if (priv->ring_data[i].ring->irq_init_flag == RCB_IRQ_INITED) { 1775 (void)irq_set_affinity_hint( 1776 priv->ring_data[i].ring->irq, 1777 NULL); 1778 free_irq(priv->ring_data[i].ring->irq, 1779 &priv->ring_data[i]); 1780 } 1781 1782 priv->ring_data[i].ring->irq_init_flag = RCB_IRQ_NOT_INITED; 1783 } 1784 kfree(priv->ring_data); 1785 } 1786 1787 static void hns_nic_set_priv_ops(struct net_device *netdev) 1788 { 1789 struct hns_nic_priv *priv = netdev_priv(netdev); 1790 struct hnae_handle *h = priv->ae_handle; 1791 1792 if (AE_IS_VER1(priv->enet_ver)) { 1793 priv->ops.fill_desc = fill_desc; 1794 priv->ops.get_rxd_bnum = get_rx_desc_bnum; 1795 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx; 1796 } else { 1797 priv->ops.get_rxd_bnum = get_v2rx_desc_bnum; 1798 if ((netdev->features & NETIF_F_TSO) || 1799 (netdev->features & NETIF_F_TSO6)) { 1800 priv->ops.fill_desc = fill_tso_desc; 1801 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso; 1802 /* This chip only support 7*4096 */ 1803 netif_set_gso_max_size(netdev, 7 * 4096); 1804 } else { 1805 priv->ops.fill_desc = fill_v2_desc; 1806 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx; 1807 } 1808 /* enable tso when init 1809 * control tso on/off through TSE bit in bd 1810 */ 1811 h->dev->ops->set_tso_stats(h, 1); 1812 } 1813 } 1814 1815 static int hns_nic_try_get_ae(struct net_device *ndev) 1816 { 1817 struct hns_nic_priv *priv = netdev_priv(ndev); 1818 struct hnae_handle *h; 1819 int ret; 1820 1821 h = hnae_get_handle(&priv->netdev->dev, 1822 priv->fwnode, priv->port_id, NULL); 1823 if (IS_ERR_OR_NULL(h)) { 1824 ret = -ENODEV; 1825 dev_dbg(priv->dev, "has not handle, register notifier!\n"); 1826 goto out; 1827 } 1828 priv->ae_handle = h; 1829 1830 ret = hns_nic_init_phy(ndev, h); 1831 if (ret) { 1832 dev_err(priv->dev, "probe phy device fail!\n"); 1833 goto out_init_phy; 1834 } 1835 1836 ret = hns_nic_init_ring_data(priv); 1837 if (ret) { 1838 ret = -ENOMEM; 1839 goto out_init_ring_data; 1840 } 1841 1842 hns_nic_set_priv_ops(ndev); 1843 1844 ret = register_netdev(ndev); 1845 if (ret) { 1846 dev_err(priv->dev, "probe register netdev fail!\n"); 1847 goto out_reg_ndev_fail; 1848 } 1849 return 0; 1850 1851 out_reg_ndev_fail: 1852 hns_nic_uninit_ring_data(priv); 1853 priv->ring_data = NULL; 1854 out_init_phy: 1855 out_init_ring_data: 1856 hnae_put_handle(priv->ae_handle); 1857 priv->ae_handle = NULL; 1858 out: 1859 return ret; 1860 } 1861 1862 static int hns_nic_notifier_action(struct notifier_block *nb, 1863 unsigned long action, void *data) 1864 { 1865 struct hns_nic_priv *priv = 1866 container_of(nb, struct hns_nic_priv, notifier_block); 1867 1868 assert(action == HNAE_AE_REGISTER); 1869 1870 if (!hns_nic_try_get_ae(priv->netdev)) { 1871 hnae_unregister_notifier(&priv->notifier_block); 1872 priv->notifier_block.notifier_call = NULL; 1873 } 1874 return 0; 1875 } 1876 1877 static int hns_nic_dev_probe(struct platform_device *pdev) 1878 { 1879 struct device *dev = &pdev->dev; 1880 struct net_device *ndev; 1881 struct hns_nic_priv *priv; 1882 u32 port_id; 1883 int ret; 1884 1885 ndev = alloc_etherdev_mq(sizeof(struct hns_nic_priv), NIC_MAX_Q_PER_VF); 1886 if (!ndev) 1887 return -ENOMEM; 1888 1889 platform_set_drvdata(pdev, ndev); 1890 1891 priv = netdev_priv(ndev); 1892 priv->dev = dev; 1893 priv->netdev = ndev; 1894 1895 if (dev_of_node(dev)) { 1896 struct device_node *ae_node; 1897 1898 if (of_device_is_compatible(dev->of_node, 1899 "hisilicon,hns-nic-v1")) 1900 priv->enet_ver = AE_VERSION_1; 1901 else 1902 priv->enet_ver = AE_VERSION_2; 1903 1904 ae_node = of_parse_phandle(dev->of_node, "ae-handle", 0); 1905 if (IS_ERR_OR_NULL(ae_node)) { 1906 ret = PTR_ERR(ae_node); 1907 dev_err(dev, "not find ae-handle\n"); 1908 goto out_read_prop_fail; 1909 } 1910 priv->fwnode = &ae_node->fwnode; 1911 } else if (is_acpi_node(dev->fwnode)) { 1912 struct acpi_reference_args args; 1913 1914 if (acpi_dev_found(hns_enet_acpi_match[0].id)) 1915 priv->enet_ver = AE_VERSION_1; 1916 else if (acpi_dev_found(hns_enet_acpi_match[1].id)) 1917 priv->enet_ver = AE_VERSION_2; 1918 else 1919 return -ENXIO; 1920 1921 /* try to find port-idx-in-ae first */ 1922 ret = acpi_node_get_property_reference(dev->fwnode, 1923 "ae-handle", 0, &args); 1924 if (ret) { 1925 dev_err(dev, "not find ae-handle\n"); 1926 goto out_read_prop_fail; 1927 } 1928 priv->fwnode = acpi_fwnode_handle(args.adev); 1929 } else { 1930 dev_err(dev, "cannot read cfg data from OF or acpi\n"); 1931 return -ENXIO; 1932 } 1933 1934 ret = device_property_read_u32(dev, "port-idx-in-ae", &port_id); 1935 if (ret) { 1936 /* only for old code compatible */ 1937 ret = device_property_read_u32(dev, "port-id", &port_id); 1938 if (ret) 1939 goto out_read_prop_fail; 1940 /* for old dts, we need to caculate the port offset */ 1941 port_id = port_id < HNS_SRV_OFFSET ? port_id + HNS_DEBUG_OFFSET 1942 : port_id - HNS_SRV_OFFSET; 1943 } 1944 priv->port_id = port_id; 1945 1946 hns_init_mac_addr(ndev); 1947 1948 ndev->watchdog_timeo = HNS_NIC_TX_TIMEOUT; 1949 ndev->priv_flags |= IFF_UNICAST_FLT; 1950 ndev->netdev_ops = &hns_nic_netdev_ops; 1951 hns_ethtool_set_ops(ndev); 1952 1953 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 1954 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | 1955 NETIF_F_GRO; 1956 ndev->vlan_features |= 1957 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM; 1958 ndev->vlan_features |= NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO; 1959 1960 switch (priv->enet_ver) { 1961 case AE_VERSION_2: 1962 ndev->features |= NETIF_F_TSO | NETIF_F_TSO6; 1963 ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 1964 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | 1965 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6; 1966 break; 1967 default: 1968 break; 1969 } 1970 1971 SET_NETDEV_DEV(ndev, dev); 1972 1973 if (!dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) 1974 dev_dbg(dev, "set mask to 64bit\n"); 1975 else 1976 dev_err(dev, "set mask to 64bit fail!\n"); 1977 1978 /* carrier off reporting is important to ethtool even BEFORE open */ 1979 netif_carrier_off(ndev); 1980 1981 setup_timer(&priv->service_timer, hns_nic_service_timer, 1982 (unsigned long)priv); 1983 INIT_WORK(&priv->service_task, hns_nic_service_task); 1984 1985 set_bit(NIC_STATE_SERVICE_INITED, &priv->state); 1986 clear_bit(NIC_STATE_SERVICE_SCHED, &priv->state); 1987 set_bit(NIC_STATE_DOWN, &priv->state); 1988 1989 if (hns_nic_try_get_ae(priv->netdev)) { 1990 priv->notifier_block.notifier_call = hns_nic_notifier_action; 1991 ret = hnae_register_notifier(&priv->notifier_block); 1992 if (ret) { 1993 dev_err(dev, "register notifier fail!\n"); 1994 goto out_notify_fail; 1995 } 1996 dev_dbg(dev, "has not handle, register notifier!\n"); 1997 } 1998 1999 return 0; 2000 2001 out_notify_fail: 2002 (void)cancel_work_sync(&priv->service_task); 2003 out_read_prop_fail: 2004 free_netdev(ndev); 2005 return ret; 2006 } 2007 2008 static int hns_nic_dev_remove(struct platform_device *pdev) 2009 { 2010 struct net_device *ndev = platform_get_drvdata(pdev); 2011 struct hns_nic_priv *priv = netdev_priv(ndev); 2012 2013 if (ndev->reg_state != NETREG_UNINITIALIZED) 2014 unregister_netdev(ndev); 2015 2016 if (priv->ring_data) 2017 hns_nic_uninit_ring_data(priv); 2018 priv->ring_data = NULL; 2019 2020 if (priv->phy) 2021 phy_disconnect(priv->phy); 2022 priv->phy = NULL; 2023 2024 if (!IS_ERR_OR_NULL(priv->ae_handle)) 2025 hnae_put_handle(priv->ae_handle); 2026 priv->ae_handle = NULL; 2027 if (priv->notifier_block.notifier_call) 2028 hnae_unregister_notifier(&priv->notifier_block); 2029 priv->notifier_block.notifier_call = NULL; 2030 2031 set_bit(NIC_STATE_REMOVING, &priv->state); 2032 (void)cancel_work_sync(&priv->service_task); 2033 2034 free_netdev(ndev); 2035 return 0; 2036 } 2037 2038 static const struct of_device_id hns_enet_of_match[] = { 2039 {.compatible = "hisilicon,hns-nic-v1",}, 2040 {.compatible = "hisilicon,hns-nic-v2",}, 2041 {}, 2042 }; 2043 2044 MODULE_DEVICE_TABLE(of, hns_enet_of_match); 2045 2046 static struct platform_driver hns_nic_dev_driver = { 2047 .driver = { 2048 .name = "hns-nic", 2049 .of_match_table = hns_enet_of_match, 2050 .acpi_match_table = ACPI_PTR(hns_enet_acpi_match), 2051 }, 2052 .probe = hns_nic_dev_probe, 2053 .remove = hns_nic_dev_remove, 2054 }; 2055 2056 module_platform_driver(hns_nic_dev_driver); 2057 2058 MODULE_DESCRIPTION("HISILICON HNS Ethernet driver"); 2059 MODULE_AUTHOR("Hisilicon, Inc."); 2060 MODULE_LICENSE("GPL"); 2061 MODULE_ALIAS("platform:hns-nic"); 2062