1 /* 2 * Copyright (c) 2014-2015 Hisilicon Limited. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 */ 9 10 #include <linux/clk.h> 11 #include <linux/cpumask.h> 12 #include <linux/etherdevice.h> 13 #include <linux/if_vlan.h> 14 #include <linux/interrupt.h> 15 #include <linux/io.h> 16 #include <linux/ip.h> 17 #include <linux/ipv6.h> 18 #include <linux/module.h> 19 #include <linux/phy.h> 20 #include <linux/platform_device.h> 21 #include <linux/skbuff.h> 22 23 #include "hnae.h" 24 #include "hns_enet.h" 25 26 #define NIC_MAX_Q_PER_VF 16 27 #define HNS_NIC_TX_TIMEOUT (5 * HZ) 28 29 #define SERVICE_TIMER_HZ (1 * HZ) 30 31 #define NIC_TX_CLEAN_MAX_NUM 256 32 #define NIC_RX_CLEAN_MAX_NUM 64 33 34 #define RCB_IRQ_NOT_INITED 0 35 #define RCB_IRQ_INITED 1 36 #define HNS_BUFFER_SIZE_2048 2048 37 38 #define BD_MAX_SEND_SIZE 8191 39 #define SKB_TMP_LEN(SKB) \ 40 (((SKB)->transport_header - (SKB)->mac_header) + tcp_hdrlen(SKB)) 41 42 static void fill_v2_desc(struct hnae_ring *ring, void *priv, 43 int size, dma_addr_t dma, int frag_end, 44 int buf_num, enum hns_desc_type type, int mtu) 45 { 46 struct hnae_desc *desc = &ring->desc[ring->next_to_use]; 47 struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; 48 struct iphdr *iphdr; 49 struct ipv6hdr *ipv6hdr; 50 struct sk_buff *skb; 51 int skb_tmp_len; 52 __be16 protocol; 53 u8 bn_pid = 0; 54 u8 rrcfv = 0; 55 u8 ip_offset = 0; 56 u8 tvsvsn = 0; 57 u16 mss = 0; 58 u8 l4_len = 0; 59 u16 paylen = 0; 60 61 desc_cb->priv = priv; 62 desc_cb->length = size; 63 desc_cb->dma = dma; 64 desc_cb->type = type; 65 66 desc->addr = cpu_to_le64(dma); 67 desc->tx.send_size = cpu_to_le16((u16)size); 68 69 /*config bd buffer end */ 70 hnae_set_bit(rrcfv, HNSV2_TXD_VLD_B, 1); 71 hnae_set_field(bn_pid, HNSV2_TXD_BUFNUM_M, 0, buf_num - 1); 72 73 if (type == DESC_TYPE_SKB) { 74 skb = (struct sk_buff *)priv; 75 76 if (skb->ip_summed == CHECKSUM_PARTIAL) { 77 skb_reset_mac_len(skb); 78 protocol = skb->protocol; 79 ip_offset = ETH_HLEN; 80 81 if (protocol == htons(ETH_P_8021Q)) { 82 ip_offset += VLAN_HLEN; 83 protocol = vlan_get_protocol(skb); 84 skb->protocol = protocol; 85 } 86 87 if (skb->protocol == htons(ETH_P_IP)) { 88 iphdr = ip_hdr(skb); 89 hnae_set_bit(rrcfv, HNSV2_TXD_L3CS_B, 1); 90 hnae_set_bit(rrcfv, HNSV2_TXD_L4CS_B, 1); 91 92 /* check for tcp/udp header */ 93 if (iphdr->protocol == IPPROTO_TCP) { 94 hnae_set_bit(tvsvsn, 95 HNSV2_TXD_TSE_B, 1); 96 skb_tmp_len = SKB_TMP_LEN(skb); 97 l4_len = tcp_hdrlen(skb); 98 mss = mtu - skb_tmp_len - ETH_FCS_LEN; 99 paylen = skb->len - skb_tmp_len; 100 } 101 } else if (skb->protocol == htons(ETH_P_IPV6)) { 102 hnae_set_bit(tvsvsn, HNSV2_TXD_IPV6_B, 1); 103 ipv6hdr = ipv6_hdr(skb); 104 hnae_set_bit(rrcfv, HNSV2_TXD_L4CS_B, 1); 105 106 /* check for tcp/udp header */ 107 if (ipv6hdr->nexthdr == IPPROTO_TCP) { 108 hnae_set_bit(tvsvsn, 109 HNSV2_TXD_TSE_B, 1); 110 skb_tmp_len = SKB_TMP_LEN(skb); 111 l4_len = tcp_hdrlen(skb); 112 mss = mtu - skb_tmp_len - ETH_FCS_LEN; 113 paylen = skb->len - skb_tmp_len; 114 } 115 } 116 desc->tx.ip_offset = ip_offset; 117 desc->tx.tse_vlan_snap_v6_sctp_nth = tvsvsn; 118 desc->tx.mss = cpu_to_le16(mss); 119 desc->tx.l4_len = l4_len; 120 desc->tx.paylen = cpu_to_le16(paylen); 121 } 122 } 123 124 hnae_set_bit(rrcfv, HNSV2_TXD_FE_B, frag_end); 125 126 desc->tx.bn_pid = bn_pid; 127 desc->tx.ra_ri_cs_fe_vld = rrcfv; 128 129 ring_ptr_move_fw(ring, next_to_use); 130 } 131 132 static void fill_desc(struct hnae_ring *ring, void *priv, 133 int size, dma_addr_t dma, int frag_end, 134 int buf_num, enum hns_desc_type type, int mtu) 135 { 136 struct hnae_desc *desc = &ring->desc[ring->next_to_use]; 137 struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; 138 struct sk_buff *skb; 139 __be16 protocol; 140 u32 ip_offset; 141 u32 asid_bufnum_pid = 0; 142 u32 flag_ipoffset = 0; 143 144 desc_cb->priv = priv; 145 desc_cb->length = size; 146 desc_cb->dma = dma; 147 desc_cb->type = type; 148 149 desc->addr = cpu_to_le64(dma); 150 desc->tx.send_size = cpu_to_le16((u16)size); 151 152 /*config bd buffer end */ 153 flag_ipoffset |= 1 << HNS_TXD_VLD_B; 154 155 asid_bufnum_pid |= buf_num << HNS_TXD_BUFNUM_S; 156 157 if (type == DESC_TYPE_SKB) { 158 skb = (struct sk_buff *)priv; 159 160 if (skb->ip_summed == CHECKSUM_PARTIAL) { 161 protocol = skb->protocol; 162 ip_offset = ETH_HLEN; 163 164 /*if it is a SW VLAN check the next protocol*/ 165 if (protocol == htons(ETH_P_8021Q)) { 166 ip_offset += VLAN_HLEN; 167 protocol = vlan_get_protocol(skb); 168 skb->protocol = protocol; 169 } 170 171 if (skb->protocol == htons(ETH_P_IP)) { 172 flag_ipoffset |= 1 << HNS_TXD_L3CS_B; 173 /* check for tcp/udp header */ 174 flag_ipoffset |= 1 << HNS_TXD_L4CS_B; 175 176 } else if (skb->protocol == htons(ETH_P_IPV6)) { 177 /* ipv6 has not l3 cs, check for L4 header */ 178 flag_ipoffset |= 1 << HNS_TXD_L4CS_B; 179 } 180 181 flag_ipoffset |= ip_offset << HNS_TXD_IPOFFSET_S; 182 } 183 } 184 185 flag_ipoffset |= frag_end << HNS_TXD_FE_B; 186 187 desc->tx.asid_bufnum_pid = cpu_to_le16(asid_bufnum_pid); 188 desc->tx.flag_ipoffset = cpu_to_le32(flag_ipoffset); 189 190 ring_ptr_move_fw(ring, next_to_use); 191 } 192 193 static void unfill_desc(struct hnae_ring *ring) 194 { 195 ring_ptr_move_bw(ring, next_to_use); 196 } 197 198 static int hns_nic_maybe_stop_tx( 199 struct sk_buff **out_skb, int *bnum, struct hnae_ring *ring) 200 { 201 struct sk_buff *skb = *out_skb; 202 struct sk_buff *new_skb = NULL; 203 int buf_num; 204 205 /* no. of segments (plus a header) */ 206 buf_num = skb_shinfo(skb)->nr_frags + 1; 207 208 if (unlikely(buf_num > ring->max_desc_num_per_pkt)) { 209 if (ring_space(ring) < 1) 210 return -EBUSY; 211 212 new_skb = skb_copy(skb, GFP_ATOMIC); 213 if (!new_skb) 214 return -ENOMEM; 215 216 dev_kfree_skb_any(skb); 217 *out_skb = new_skb; 218 buf_num = 1; 219 } else if (buf_num > ring_space(ring)) { 220 return -EBUSY; 221 } 222 223 *bnum = buf_num; 224 return 0; 225 } 226 227 static int hns_nic_maybe_stop_tso( 228 struct sk_buff **out_skb, int *bnum, struct hnae_ring *ring) 229 { 230 int i; 231 int size; 232 int buf_num; 233 int frag_num; 234 struct sk_buff *skb = *out_skb; 235 struct sk_buff *new_skb = NULL; 236 struct skb_frag_struct *frag; 237 238 size = skb_headlen(skb); 239 buf_num = (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE; 240 241 frag_num = skb_shinfo(skb)->nr_frags; 242 for (i = 0; i < frag_num; i++) { 243 frag = &skb_shinfo(skb)->frags[i]; 244 size = skb_frag_size(frag); 245 buf_num += (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE; 246 } 247 248 if (unlikely(buf_num > ring->max_desc_num_per_pkt)) { 249 buf_num = (skb->len + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE; 250 if (ring_space(ring) < buf_num) 251 return -EBUSY; 252 /* manual split the send packet */ 253 new_skb = skb_copy(skb, GFP_ATOMIC); 254 if (!new_skb) 255 return -ENOMEM; 256 dev_kfree_skb_any(skb); 257 *out_skb = new_skb; 258 259 } else if (ring_space(ring) < buf_num) { 260 return -EBUSY; 261 } 262 263 *bnum = buf_num; 264 return 0; 265 } 266 267 static void fill_tso_desc(struct hnae_ring *ring, void *priv, 268 int size, dma_addr_t dma, int frag_end, 269 int buf_num, enum hns_desc_type type, int mtu) 270 { 271 int frag_buf_num; 272 int sizeoflast; 273 int k; 274 275 frag_buf_num = (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE; 276 sizeoflast = size % BD_MAX_SEND_SIZE; 277 sizeoflast = sizeoflast ? sizeoflast : BD_MAX_SEND_SIZE; 278 279 /* when the frag size is bigger than hardware, split this frag */ 280 for (k = 0; k < frag_buf_num; k++) 281 fill_v2_desc(ring, priv, 282 (k == frag_buf_num - 1) ? 283 sizeoflast : BD_MAX_SEND_SIZE, 284 dma + BD_MAX_SEND_SIZE * k, 285 frag_end && (k == frag_buf_num - 1) ? 1 : 0, 286 buf_num, 287 (type == DESC_TYPE_SKB && !k) ? 288 DESC_TYPE_SKB : DESC_TYPE_PAGE, 289 mtu); 290 } 291 292 int hns_nic_net_xmit_hw(struct net_device *ndev, 293 struct sk_buff *skb, 294 struct hns_nic_ring_data *ring_data) 295 { 296 struct hns_nic_priv *priv = netdev_priv(ndev); 297 struct device *dev = priv->dev; 298 struct hnae_ring *ring = ring_data->ring; 299 struct netdev_queue *dev_queue; 300 struct skb_frag_struct *frag; 301 int buf_num; 302 int seg_num; 303 dma_addr_t dma; 304 int size, next_to_use; 305 int i; 306 307 switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) { 308 case -EBUSY: 309 ring->stats.tx_busy++; 310 goto out_net_tx_busy; 311 case -ENOMEM: 312 ring->stats.sw_err_cnt++; 313 netdev_err(ndev, "no memory to xmit!\n"); 314 goto out_err_tx_ok; 315 default: 316 break; 317 } 318 319 /* no. of segments (plus a header) */ 320 seg_num = skb_shinfo(skb)->nr_frags + 1; 321 next_to_use = ring->next_to_use; 322 323 /* fill the first part */ 324 size = skb_headlen(skb); 325 dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); 326 if (dma_mapping_error(dev, dma)) { 327 netdev_err(ndev, "TX head DMA map failed\n"); 328 ring->stats.sw_err_cnt++; 329 goto out_err_tx_ok; 330 } 331 priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0, 332 buf_num, DESC_TYPE_SKB, ndev->mtu); 333 334 /* fill the fragments */ 335 for (i = 1; i < seg_num; i++) { 336 frag = &skb_shinfo(skb)->frags[i - 1]; 337 size = skb_frag_size(frag); 338 dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE); 339 if (dma_mapping_error(dev, dma)) { 340 netdev_err(ndev, "TX frag(%d) DMA map failed\n", i); 341 ring->stats.sw_err_cnt++; 342 goto out_map_frag_fail; 343 } 344 priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma, 345 seg_num - 1 == i ? 1 : 0, buf_num, 346 DESC_TYPE_PAGE, ndev->mtu); 347 } 348 349 /*complete translate all packets*/ 350 dev_queue = netdev_get_tx_queue(ndev, skb->queue_mapping); 351 netdev_tx_sent_queue(dev_queue, skb->len); 352 353 wmb(); /* commit all data before submit */ 354 assert(skb->queue_mapping < priv->ae_handle->q_num); 355 hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num); 356 ring->stats.tx_pkts++; 357 ring->stats.tx_bytes += skb->len; 358 359 return NETDEV_TX_OK; 360 361 out_map_frag_fail: 362 363 while (ring->next_to_use != next_to_use) { 364 unfill_desc(ring); 365 if (ring->next_to_use != next_to_use) 366 dma_unmap_page(dev, 367 ring->desc_cb[ring->next_to_use].dma, 368 ring->desc_cb[ring->next_to_use].length, 369 DMA_TO_DEVICE); 370 else 371 dma_unmap_single(dev, 372 ring->desc_cb[next_to_use].dma, 373 ring->desc_cb[next_to_use].length, 374 DMA_TO_DEVICE); 375 } 376 377 out_err_tx_ok: 378 379 dev_kfree_skb_any(skb); 380 return NETDEV_TX_OK; 381 382 out_net_tx_busy: 383 384 netif_stop_subqueue(ndev, skb->queue_mapping); 385 386 /* Herbert's original patch had: 387 * smp_mb__after_netif_stop_queue(); 388 * but since that doesn't exist yet, just open code it. 389 */ 390 smp_mb(); 391 return NETDEV_TX_BUSY; 392 } 393 394 /** 395 * hns_nic_get_headlen - determine size of header for RSC/LRO/GRO/FCOE 396 * @data: pointer to the start of the headers 397 * @max: total length of section to find headers in 398 * 399 * This function is meant to determine the length of headers that will 400 * be recognized by hardware for LRO, GRO, and RSC offloads. The main 401 * motivation of doing this is to only perform one pull for IPv4 TCP 402 * packets so that we can do basic things like calculating the gso_size 403 * based on the average data per packet. 404 **/ 405 static unsigned int hns_nic_get_headlen(unsigned char *data, u32 flag, 406 unsigned int max_size) 407 { 408 unsigned char *network; 409 u8 hlen; 410 411 /* this should never happen, but better safe than sorry */ 412 if (max_size < ETH_HLEN) 413 return max_size; 414 415 /* initialize network frame pointer */ 416 network = data; 417 418 /* set first protocol and move network header forward */ 419 network += ETH_HLEN; 420 421 /* handle any vlan tag if present */ 422 if (hnae_get_field(flag, HNS_RXD_VLAN_M, HNS_RXD_VLAN_S) 423 == HNS_RX_FLAG_VLAN_PRESENT) { 424 if ((typeof(max_size))(network - data) > (max_size - VLAN_HLEN)) 425 return max_size; 426 427 network += VLAN_HLEN; 428 } 429 430 /* handle L3 protocols */ 431 if (hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S) 432 == HNS_RX_FLAG_L3ID_IPV4) { 433 if ((typeof(max_size))(network - data) > 434 (max_size - sizeof(struct iphdr))) 435 return max_size; 436 437 /* access ihl as a u8 to avoid unaligned access on ia64 */ 438 hlen = (network[0] & 0x0F) << 2; 439 440 /* verify hlen meets minimum size requirements */ 441 if (hlen < sizeof(struct iphdr)) 442 return network - data; 443 444 /* record next protocol if header is present */ 445 } else if (hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S) 446 == HNS_RX_FLAG_L3ID_IPV6) { 447 if ((typeof(max_size))(network - data) > 448 (max_size - sizeof(struct ipv6hdr))) 449 return max_size; 450 451 /* record next protocol */ 452 hlen = sizeof(struct ipv6hdr); 453 } else { 454 return network - data; 455 } 456 457 /* relocate pointer to start of L4 header */ 458 network += hlen; 459 460 /* finally sort out TCP/UDP */ 461 if (hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S) 462 == HNS_RX_FLAG_L4ID_TCP) { 463 if ((typeof(max_size))(network - data) > 464 (max_size - sizeof(struct tcphdr))) 465 return max_size; 466 467 /* access doff as a u8 to avoid unaligned access on ia64 */ 468 hlen = (network[12] & 0xF0) >> 2; 469 470 /* verify hlen meets minimum size requirements */ 471 if (hlen < sizeof(struct tcphdr)) 472 return network - data; 473 474 network += hlen; 475 } else if (hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S) 476 == HNS_RX_FLAG_L4ID_UDP) { 477 if ((typeof(max_size))(network - data) > 478 (max_size - sizeof(struct udphdr))) 479 return max_size; 480 481 network += sizeof(struct udphdr); 482 } 483 484 /* If everything has gone correctly network should be the 485 * data section of the packet and will be the end of the header. 486 * If not then it probably represents the end of the last recognized 487 * header. 488 */ 489 if ((typeof(max_size))(network - data) < max_size) 490 return network - data; 491 else 492 return max_size; 493 } 494 495 static void hns_nic_reuse_page(struct sk_buff *skb, int i, 496 struct hnae_ring *ring, int pull_len, 497 struct hnae_desc_cb *desc_cb) 498 { 499 struct hnae_desc *desc; 500 int truesize, size; 501 int last_offset; 502 bool twobufs; 503 504 twobufs = ((PAGE_SIZE < 8192) && hnae_buf_size(ring) == HNS_BUFFER_SIZE_2048); 505 506 desc = &ring->desc[ring->next_to_clean]; 507 size = le16_to_cpu(desc->rx.size); 508 509 if (twobufs) { 510 truesize = hnae_buf_size(ring); 511 } else { 512 truesize = ALIGN(size, L1_CACHE_BYTES); 513 last_offset = hnae_page_size(ring) - hnae_buf_size(ring); 514 } 515 516 skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len, 517 size - pull_len, truesize - pull_len); 518 519 /* avoid re-using remote pages,flag default unreuse */ 520 if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id())) 521 return; 522 523 if (twobufs) { 524 /* if we are only owner of page we can reuse it */ 525 if (likely(page_count(desc_cb->priv) == 1)) { 526 /* flip page offset to other buffer */ 527 desc_cb->page_offset ^= truesize; 528 529 desc_cb->reuse_flag = 1; 530 /* bump ref count on page before it is given*/ 531 get_page(desc_cb->priv); 532 } 533 return; 534 } 535 536 /* move offset up to the next cache line */ 537 desc_cb->page_offset += truesize; 538 539 if (desc_cb->page_offset <= last_offset) { 540 desc_cb->reuse_flag = 1; 541 /* bump ref count on page before it is given*/ 542 get_page(desc_cb->priv); 543 } 544 } 545 546 static void get_v2rx_desc_bnum(u32 bnum_flag, int *out_bnum) 547 { 548 *out_bnum = hnae_get_field(bnum_flag, 549 HNS_RXD_BUFNUM_M, HNS_RXD_BUFNUM_S) + 1; 550 } 551 552 static void get_rx_desc_bnum(u32 bnum_flag, int *out_bnum) 553 { 554 *out_bnum = hnae_get_field(bnum_flag, 555 HNS_RXD_BUFNUM_M, HNS_RXD_BUFNUM_S); 556 } 557 558 static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data, 559 struct sk_buff **out_skb, int *out_bnum) 560 { 561 struct hnae_ring *ring = ring_data->ring; 562 struct net_device *ndev = ring_data->napi.dev; 563 struct hns_nic_priv *priv = netdev_priv(ndev); 564 struct sk_buff *skb; 565 struct hnae_desc *desc; 566 struct hnae_desc_cb *desc_cb; 567 unsigned char *va; 568 int bnum, length, i; 569 int pull_len; 570 u32 bnum_flag; 571 572 desc = &ring->desc[ring->next_to_clean]; 573 desc_cb = &ring->desc_cb[ring->next_to_clean]; 574 575 prefetch(desc); 576 577 va = (unsigned char *)desc_cb->buf + desc_cb->page_offset; 578 579 /* prefetch first cache line of first page */ 580 prefetch(va); 581 #if L1_CACHE_BYTES < 128 582 prefetch(va + L1_CACHE_BYTES); 583 #endif 584 585 skb = *out_skb = napi_alloc_skb(&ring_data->napi, 586 HNS_RX_HEAD_SIZE); 587 if (unlikely(!skb)) { 588 netdev_err(ndev, "alloc rx skb fail\n"); 589 ring->stats.sw_err_cnt++; 590 return -ENOMEM; 591 } 592 593 prefetchw(skb->data); 594 length = le16_to_cpu(desc->rx.pkt_len); 595 bnum_flag = le32_to_cpu(desc->rx.ipoff_bnum_pid_flag); 596 priv->ops.get_rxd_bnum(bnum_flag, &bnum); 597 *out_bnum = bnum; 598 599 if (length <= HNS_RX_HEAD_SIZE) { 600 memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long))); 601 602 /* we can reuse buffer as-is, just make sure it is local */ 603 if (likely(page_to_nid(desc_cb->priv) == numa_node_id())) 604 desc_cb->reuse_flag = 1; 605 else /* this page cannot be reused so discard it */ 606 put_page(desc_cb->priv); 607 608 ring_ptr_move_fw(ring, next_to_clean); 609 610 if (unlikely(bnum != 1)) { /* check err*/ 611 *out_bnum = 1; 612 goto out_bnum_err; 613 } 614 } else { 615 ring->stats.seg_pkt_cnt++; 616 617 pull_len = hns_nic_get_headlen(va, bnum_flag, HNS_RX_HEAD_SIZE); 618 memcpy(__skb_put(skb, pull_len), va, 619 ALIGN(pull_len, sizeof(long))); 620 621 hns_nic_reuse_page(skb, 0, ring, pull_len, desc_cb); 622 ring_ptr_move_fw(ring, next_to_clean); 623 624 if (unlikely(bnum >= (int)MAX_SKB_FRAGS)) { /* check err*/ 625 *out_bnum = 1; 626 goto out_bnum_err; 627 } 628 for (i = 1; i < bnum; i++) { 629 desc = &ring->desc[ring->next_to_clean]; 630 desc_cb = &ring->desc_cb[ring->next_to_clean]; 631 632 hns_nic_reuse_page(skb, i, ring, 0, desc_cb); 633 ring_ptr_move_fw(ring, next_to_clean); 634 } 635 } 636 637 /* check except process, free skb and jump the desc */ 638 if (unlikely((!bnum) || (bnum > ring->max_desc_num_per_pkt))) { 639 out_bnum_err: 640 *out_bnum = *out_bnum ? *out_bnum : 1; /* ntc moved,cannot 0*/ 641 netdev_err(ndev, "invalid bnum(%d,%d,%d,%d),%016llx,%016llx\n", 642 bnum, ring->max_desc_num_per_pkt, 643 length, (int)MAX_SKB_FRAGS, 644 ((u64 *)desc)[0], ((u64 *)desc)[1]); 645 ring->stats.err_bd_num++; 646 dev_kfree_skb_any(skb); 647 return -EDOM; 648 } 649 650 bnum_flag = le32_to_cpu(desc->rx.ipoff_bnum_pid_flag); 651 652 if (unlikely(!hnae_get_bit(bnum_flag, HNS_RXD_VLD_B))) { 653 netdev_err(ndev, "no valid bd,%016llx,%016llx\n", 654 ((u64 *)desc)[0], ((u64 *)desc)[1]); 655 ring->stats.non_vld_descs++; 656 dev_kfree_skb_any(skb); 657 return -EINVAL; 658 } 659 660 if (unlikely((!desc->rx.pkt_len) || 661 hnae_get_bit(bnum_flag, HNS_RXD_DROP_B))) { 662 ring->stats.err_pkt_len++; 663 dev_kfree_skb_any(skb); 664 return -EFAULT; 665 } 666 667 if (unlikely(hnae_get_bit(bnum_flag, HNS_RXD_L2E_B))) { 668 ring->stats.l2_err++; 669 dev_kfree_skb_any(skb); 670 return -EFAULT; 671 } 672 673 ring->stats.rx_pkts++; 674 ring->stats.rx_bytes += skb->len; 675 676 if (unlikely(hnae_get_bit(bnum_flag, HNS_RXD_L3E_B) || 677 hnae_get_bit(bnum_flag, HNS_RXD_L4E_B))) { 678 ring->stats.l3l4_csum_err++; 679 return 0; 680 } 681 682 skb->ip_summed = CHECKSUM_UNNECESSARY; 683 684 return 0; 685 } 686 687 static void 688 hns_nic_alloc_rx_buffers(struct hns_nic_ring_data *ring_data, int cleand_count) 689 { 690 int i, ret; 691 struct hnae_desc_cb res_cbs; 692 struct hnae_desc_cb *desc_cb; 693 struct hnae_ring *ring = ring_data->ring; 694 struct net_device *ndev = ring_data->napi.dev; 695 696 for (i = 0; i < cleand_count; i++) { 697 desc_cb = &ring->desc_cb[ring->next_to_use]; 698 if (desc_cb->reuse_flag) { 699 ring->stats.reuse_pg_cnt++; 700 hnae_reuse_buffer(ring, ring->next_to_use); 701 } else { 702 ret = hnae_reserve_buffer_map(ring, &res_cbs); 703 if (ret) { 704 ring->stats.sw_err_cnt++; 705 netdev_err(ndev, "hnae reserve buffer map failed.\n"); 706 break; 707 } 708 hnae_replace_buffer(ring, ring->next_to_use, &res_cbs); 709 } 710 711 ring_ptr_move_fw(ring, next_to_use); 712 } 713 714 wmb(); /* make all data has been write before submit */ 715 writel_relaxed(i, ring->io_base + RCB_REG_HEAD); 716 } 717 718 /* return error number for error or number of desc left to take 719 */ 720 static void hns_nic_rx_up_pro(struct hns_nic_ring_data *ring_data, 721 struct sk_buff *skb) 722 { 723 struct net_device *ndev = ring_data->napi.dev; 724 725 skb->protocol = eth_type_trans(skb, ndev); 726 (void)napi_gro_receive(&ring_data->napi, skb); 727 ndev->last_rx = jiffies; 728 } 729 730 static int hns_nic_rx_poll_one(struct hns_nic_ring_data *ring_data, 731 int budget, void *v) 732 { 733 struct hnae_ring *ring = ring_data->ring; 734 struct sk_buff *skb; 735 int num, bnum, ex_num; 736 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16 737 int recv_pkts, recv_bds, clean_count, err; 738 739 num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM); 740 rmb(); /* make sure num taken effect before the other data is touched */ 741 742 recv_pkts = 0, recv_bds = 0, clean_count = 0; 743 recv: 744 while (recv_pkts < budget && recv_bds < num) { 745 /* reuse or realloc buffers*/ 746 if (clean_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) { 747 hns_nic_alloc_rx_buffers(ring_data, clean_count); 748 clean_count = 0; 749 } 750 751 /* poll one pkg*/ 752 err = hns_nic_poll_rx_skb(ring_data, &skb, &bnum); 753 if (unlikely(!skb)) /* this fault cannot be repaired */ 754 break; 755 756 recv_bds += bnum; 757 clean_count += bnum; 758 if (unlikely(err)) { /* do jump the err */ 759 recv_pkts++; 760 continue; 761 } 762 763 /* do update ip stack process*/ 764 ((void (*)(struct hns_nic_ring_data *, struct sk_buff *))v)( 765 ring_data, skb); 766 recv_pkts++; 767 } 768 769 /* make all data has been write before submit */ 770 if (recv_pkts < budget) { 771 ex_num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM); 772 773 if (ex_num > clean_count) { 774 num += ex_num - clean_count; 775 rmb(); /*complete read rx ring bd number*/ 776 goto recv; 777 } 778 } 779 780 /* make all data has been write before submit */ 781 if (clean_count > 0) 782 hns_nic_alloc_rx_buffers(ring_data, clean_count); 783 784 return recv_pkts; 785 } 786 787 static void hns_nic_rx_fini_pro(struct hns_nic_ring_data *ring_data) 788 { 789 struct hnae_ring *ring = ring_data->ring; 790 int num = 0; 791 792 /* for hardware bug fixed */ 793 num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM); 794 795 if (num > 0) { 796 ring_data->ring->q->handle->dev->ops->toggle_ring_irq( 797 ring_data->ring, 1); 798 799 napi_schedule(&ring_data->napi); 800 } 801 } 802 803 static inline void hns_nic_reclaim_one_desc(struct hnae_ring *ring, 804 int *bytes, int *pkts) 805 { 806 struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean]; 807 808 (*pkts) += (desc_cb->type == DESC_TYPE_SKB); 809 (*bytes) += desc_cb->length; 810 /* desc_cb will be cleaned, after hnae_free_buffer_detach*/ 811 hnae_free_buffer_detach(ring, ring->next_to_clean); 812 813 ring_ptr_move_fw(ring, next_to_clean); 814 } 815 816 static int is_valid_clean_head(struct hnae_ring *ring, int h) 817 { 818 int u = ring->next_to_use; 819 int c = ring->next_to_clean; 820 821 if (unlikely(h > ring->desc_num)) 822 return 0; 823 824 assert(u > 0 && u < ring->desc_num); 825 assert(c > 0 && c < ring->desc_num); 826 assert(u != c && h != c); /* must be checked before call this func */ 827 828 return u > c ? (h > c && h <= u) : (h > c || h <= u); 829 } 830 831 /* netif_tx_lock will turn down the performance, set only when necessary */ 832 #ifdef CONFIG_NET_POLL_CONTROLLER 833 #define NETIF_TX_LOCK(ndev) netif_tx_lock(ndev) 834 #define NETIF_TX_UNLOCK(ndev) netif_tx_unlock(ndev) 835 #else 836 #define NETIF_TX_LOCK(ndev) 837 #define NETIF_TX_UNLOCK(ndev) 838 #endif 839 /* reclaim all desc in one budget 840 * return error or number of desc left 841 */ 842 static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data, 843 int budget, void *v) 844 { 845 struct hnae_ring *ring = ring_data->ring; 846 struct net_device *ndev = ring_data->napi.dev; 847 struct netdev_queue *dev_queue; 848 struct hns_nic_priv *priv = netdev_priv(ndev); 849 int head; 850 int bytes, pkts; 851 852 NETIF_TX_LOCK(ndev); 853 854 head = readl_relaxed(ring->io_base + RCB_REG_HEAD); 855 rmb(); /* make sure head is ready before touch any data */ 856 857 if (is_ring_empty(ring) || head == ring->next_to_clean) { 858 NETIF_TX_UNLOCK(ndev); 859 return 0; /* no data to poll */ 860 } 861 862 if (!is_valid_clean_head(ring, head)) { 863 netdev_err(ndev, "wrong head (%d, %d-%d)\n", head, 864 ring->next_to_use, ring->next_to_clean); 865 ring->stats.io_err_cnt++; 866 NETIF_TX_UNLOCK(ndev); 867 return -EIO; 868 } 869 870 bytes = 0; 871 pkts = 0; 872 while (head != ring->next_to_clean) { 873 hns_nic_reclaim_one_desc(ring, &bytes, &pkts); 874 /* issue prefetch for next Tx descriptor */ 875 prefetch(&ring->desc_cb[ring->next_to_clean]); 876 } 877 878 NETIF_TX_UNLOCK(ndev); 879 880 dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index); 881 netdev_tx_completed_queue(dev_queue, pkts, bytes); 882 883 if (unlikely(priv->link && !netif_carrier_ok(ndev))) 884 netif_carrier_on(ndev); 885 886 if (unlikely(pkts && netif_carrier_ok(ndev) && 887 (ring_space(ring) >= ring->max_desc_num_per_pkt * 2))) { 888 /* Make sure that anybody stopping the queue after this 889 * sees the new next_to_clean. 890 */ 891 smp_mb(); 892 if (netif_tx_queue_stopped(dev_queue) && 893 !test_bit(NIC_STATE_DOWN, &priv->state)) { 894 netif_tx_wake_queue(dev_queue); 895 ring->stats.restart_queue++; 896 } 897 } 898 return 0; 899 } 900 901 static void hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data) 902 { 903 struct hnae_ring *ring = ring_data->ring; 904 int head = ring->next_to_clean; 905 906 /* for hardware bug fixed */ 907 head = readl_relaxed(ring->io_base + RCB_REG_HEAD); 908 909 if (head != ring->next_to_clean) { 910 ring_data->ring->q->handle->dev->ops->toggle_ring_irq( 911 ring_data->ring, 1); 912 913 napi_schedule(&ring_data->napi); 914 } 915 } 916 917 static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data) 918 { 919 struct hnae_ring *ring = ring_data->ring; 920 struct net_device *ndev = ring_data->napi.dev; 921 struct netdev_queue *dev_queue; 922 int head; 923 int bytes, pkts; 924 925 NETIF_TX_LOCK(ndev); 926 927 head = ring->next_to_use; /* ntu :soft setted ring position*/ 928 bytes = 0; 929 pkts = 0; 930 while (head != ring->next_to_clean) 931 hns_nic_reclaim_one_desc(ring, &bytes, &pkts); 932 933 NETIF_TX_UNLOCK(ndev); 934 935 dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index); 936 netdev_tx_reset_queue(dev_queue); 937 } 938 939 static int hns_nic_common_poll(struct napi_struct *napi, int budget) 940 { 941 struct hns_nic_ring_data *ring_data = 942 container_of(napi, struct hns_nic_ring_data, napi); 943 int clean_complete = ring_data->poll_one( 944 ring_data, budget, ring_data->ex_process); 945 946 if (clean_complete >= 0 && clean_complete < budget) { 947 napi_complete(napi); 948 ring_data->ring->q->handle->dev->ops->toggle_ring_irq( 949 ring_data->ring, 0); 950 951 ring_data->fini_process(ring_data); 952 return 0; 953 } 954 955 return clean_complete; 956 } 957 958 static irqreturn_t hns_irq_handle(int irq, void *dev) 959 { 960 struct hns_nic_ring_data *ring_data = (struct hns_nic_ring_data *)dev; 961 962 ring_data->ring->q->handle->dev->ops->toggle_ring_irq( 963 ring_data->ring, 1); 964 napi_schedule(&ring_data->napi); 965 966 return IRQ_HANDLED; 967 } 968 969 /** 970 *hns_nic_adjust_link - adjust net work mode by the phy stat or new param 971 *@ndev: net device 972 */ 973 static void hns_nic_adjust_link(struct net_device *ndev) 974 { 975 struct hns_nic_priv *priv = netdev_priv(ndev); 976 struct hnae_handle *h = priv->ae_handle; 977 978 h->dev->ops->adjust_link(h, ndev->phydev->speed, ndev->phydev->duplex); 979 } 980 981 /** 982 *hns_nic_init_phy - init phy 983 *@ndev: net device 984 *@h: ae handle 985 * Return 0 on success, negative on failure 986 */ 987 int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h) 988 { 989 struct hns_nic_priv *priv = netdev_priv(ndev); 990 struct phy_device *phy_dev = NULL; 991 992 if (!h->phy_node) 993 return 0; 994 995 if (h->phy_if != PHY_INTERFACE_MODE_XGMII) 996 phy_dev = of_phy_connect(ndev, h->phy_node, 997 hns_nic_adjust_link, 0, h->phy_if); 998 else 999 phy_dev = of_phy_attach(ndev, h->phy_node, 0, h->phy_if); 1000 1001 if (unlikely(!phy_dev) || IS_ERR(phy_dev)) 1002 return !phy_dev ? -ENODEV : PTR_ERR(phy_dev); 1003 1004 phy_dev->supported &= h->if_support; 1005 phy_dev->advertising = phy_dev->supported; 1006 1007 if (h->phy_if == PHY_INTERFACE_MODE_XGMII) 1008 phy_dev->autoneg = false; 1009 1010 priv->phy = phy_dev; 1011 1012 return 0; 1013 } 1014 1015 static int hns_nic_ring_open(struct net_device *netdev, int idx) 1016 { 1017 struct hns_nic_priv *priv = netdev_priv(netdev); 1018 struct hnae_handle *h = priv->ae_handle; 1019 1020 napi_enable(&priv->ring_data[idx].napi); 1021 1022 enable_irq(priv->ring_data[idx].ring->irq); 1023 h->dev->ops->toggle_ring_irq(priv->ring_data[idx].ring, 0); 1024 1025 return 0; 1026 } 1027 1028 static int hns_nic_net_set_mac_address(struct net_device *ndev, void *p) 1029 { 1030 struct hns_nic_priv *priv = netdev_priv(ndev); 1031 struct hnae_handle *h = priv->ae_handle; 1032 struct sockaddr *mac_addr = p; 1033 int ret; 1034 1035 if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data)) 1036 return -EADDRNOTAVAIL; 1037 1038 ret = h->dev->ops->set_mac_addr(h, mac_addr->sa_data); 1039 if (ret) { 1040 netdev_err(ndev, "set_mac_address fail, ret=%d!\n", ret); 1041 return ret; 1042 } 1043 1044 memcpy(ndev->dev_addr, mac_addr->sa_data, ndev->addr_len); 1045 1046 return 0; 1047 } 1048 1049 void hns_nic_update_stats(struct net_device *netdev) 1050 { 1051 struct hns_nic_priv *priv = netdev_priv(netdev); 1052 struct hnae_handle *h = priv->ae_handle; 1053 1054 h->dev->ops->update_stats(h, &netdev->stats); 1055 } 1056 1057 /* set mac addr if it is configed. or leave it to the AE driver */ 1058 static void hns_init_mac_addr(struct net_device *ndev) 1059 { 1060 struct hns_nic_priv *priv = netdev_priv(ndev); 1061 struct device_node *node = priv->dev->of_node; 1062 const void *mac_addr_temp; 1063 1064 mac_addr_temp = of_get_mac_address(node); 1065 if (mac_addr_temp && is_valid_ether_addr(mac_addr_temp)) { 1066 memcpy(ndev->dev_addr, mac_addr_temp, ndev->addr_len); 1067 } else { 1068 eth_hw_addr_random(ndev); 1069 dev_warn(priv->dev, "No valid mac, use random mac %pM", 1070 ndev->dev_addr); 1071 } 1072 } 1073 1074 static void hns_nic_ring_close(struct net_device *netdev, int idx) 1075 { 1076 struct hns_nic_priv *priv = netdev_priv(netdev); 1077 struct hnae_handle *h = priv->ae_handle; 1078 1079 h->dev->ops->toggle_ring_irq(priv->ring_data[idx].ring, 1); 1080 disable_irq(priv->ring_data[idx].ring->irq); 1081 1082 napi_disable(&priv->ring_data[idx].napi); 1083 } 1084 1085 static void hns_set_irq_affinity(struct hns_nic_priv *priv) 1086 { 1087 struct hnae_handle *h = priv->ae_handle; 1088 struct hns_nic_ring_data *rd; 1089 int i; 1090 int cpu; 1091 cpumask_t mask; 1092 1093 /*diffrent irq banlance for 16core and 32core*/ 1094 if (h->q_num == num_possible_cpus()) { 1095 for (i = 0; i < h->q_num * 2; i++) { 1096 rd = &priv->ring_data[i]; 1097 if (cpu_online(rd->queue_index)) { 1098 cpumask_clear(&mask); 1099 cpu = rd->queue_index; 1100 cpumask_set_cpu(cpu, &mask); 1101 (void)irq_set_affinity_hint(rd->ring->irq, 1102 &mask); 1103 } 1104 } 1105 } else { 1106 for (i = 0; i < h->q_num; i++) { 1107 rd = &priv->ring_data[i]; 1108 if (cpu_online(rd->queue_index * 2)) { 1109 cpumask_clear(&mask); 1110 cpu = rd->queue_index * 2; 1111 cpumask_set_cpu(cpu, &mask); 1112 (void)irq_set_affinity_hint(rd->ring->irq, 1113 &mask); 1114 } 1115 } 1116 1117 for (i = h->q_num; i < h->q_num * 2; i++) { 1118 rd = &priv->ring_data[i]; 1119 if (cpu_online(rd->queue_index * 2 + 1)) { 1120 cpumask_clear(&mask); 1121 cpu = rd->queue_index * 2 + 1; 1122 cpumask_set_cpu(cpu, &mask); 1123 (void)irq_set_affinity_hint(rd->ring->irq, 1124 &mask); 1125 } 1126 } 1127 } 1128 } 1129 1130 static int hns_nic_init_irq(struct hns_nic_priv *priv) 1131 { 1132 struct hnae_handle *h = priv->ae_handle; 1133 struct hns_nic_ring_data *rd; 1134 int i; 1135 int ret; 1136 1137 for (i = 0; i < h->q_num * 2; i++) { 1138 rd = &priv->ring_data[i]; 1139 1140 if (rd->ring->irq_init_flag == RCB_IRQ_INITED) 1141 break; 1142 1143 snprintf(rd->ring->ring_name, RCB_RING_NAME_LEN, 1144 "%s-%s%d", priv->netdev->name, 1145 (i < h->q_num ? "tx" : "rx"), rd->queue_index); 1146 1147 rd->ring->ring_name[RCB_RING_NAME_LEN - 1] = '\0'; 1148 1149 ret = request_irq(rd->ring->irq, 1150 hns_irq_handle, 0, rd->ring->ring_name, rd); 1151 if (ret) { 1152 netdev_err(priv->netdev, "request irq(%d) fail\n", 1153 rd->ring->irq); 1154 return ret; 1155 } 1156 disable_irq(rd->ring->irq); 1157 rd->ring->irq_init_flag = RCB_IRQ_INITED; 1158 } 1159 1160 /*set cpu affinity*/ 1161 hns_set_irq_affinity(priv); 1162 1163 return 0; 1164 } 1165 1166 static int hns_nic_net_up(struct net_device *ndev) 1167 { 1168 struct hns_nic_priv *priv = netdev_priv(ndev); 1169 struct hnae_handle *h = priv->ae_handle; 1170 int i, j, k; 1171 int ret; 1172 1173 ret = hns_nic_init_irq(priv); 1174 if (ret != 0) { 1175 netdev_err(ndev, "hns init irq failed! ret=%d\n", ret); 1176 return ret; 1177 } 1178 1179 for (i = 0; i < h->q_num * 2; i++) { 1180 ret = hns_nic_ring_open(ndev, i); 1181 if (ret) 1182 goto out_has_some_queues; 1183 } 1184 1185 for (k = 0; k < h->q_num; k++) 1186 h->dev->ops->toggle_queue_status(h->qs[k], 1); 1187 1188 ret = h->dev->ops->set_mac_addr(h, ndev->dev_addr); 1189 if (ret) 1190 goto out_set_mac_addr_err; 1191 1192 ret = h->dev->ops->start ? h->dev->ops->start(h) : 0; 1193 if (ret) 1194 goto out_start_err; 1195 1196 if (priv->phy) 1197 phy_start(priv->phy); 1198 1199 clear_bit(NIC_STATE_DOWN, &priv->state); 1200 (void)mod_timer(&priv->service_timer, jiffies + SERVICE_TIMER_HZ); 1201 1202 return 0; 1203 1204 out_start_err: 1205 netif_stop_queue(ndev); 1206 out_set_mac_addr_err: 1207 for (k = 0; k < h->q_num; k++) 1208 h->dev->ops->toggle_queue_status(h->qs[k], 0); 1209 out_has_some_queues: 1210 for (j = i - 1; j >= 0; j--) 1211 hns_nic_ring_close(ndev, j); 1212 1213 set_bit(NIC_STATE_DOWN, &priv->state); 1214 1215 return ret; 1216 } 1217 1218 static void hns_nic_net_down(struct net_device *ndev) 1219 { 1220 int i; 1221 struct hnae_ae_ops *ops; 1222 struct hns_nic_priv *priv = netdev_priv(ndev); 1223 1224 if (test_and_set_bit(NIC_STATE_DOWN, &priv->state)) 1225 return; 1226 1227 (void)del_timer_sync(&priv->service_timer); 1228 netif_tx_stop_all_queues(ndev); 1229 netif_carrier_off(ndev); 1230 netif_tx_disable(ndev); 1231 priv->link = 0; 1232 1233 if (priv->phy) 1234 phy_stop(priv->phy); 1235 1236 ops = priv->ae_handle->dev->ops; 1237 1238 if (ops->stop) 1239 ops->stop(priv->ae_handle); 1240 1241 netif_tx_stop_all_queues(ndev); 1242 1243 for (i = priv->ae_handle->q_num - 1; i >= 0; i--) { 1244 hns_nic_ring_close(ndev, i); 1245 hns_nic_ring_close(ndev, i + priv->ae_handle->q_num); 1246 1247 /* clean tx buffers*/ 1248 hns_nic_tx_clr_all_bufs(priv->ring_data + i); 1249 } 1250 } 1251 1252 void hns_nic_net_reset(struct net_device *ndev) 1253 { 1254 struct hns_nic_priv *priv = netdev_priv(ndev); 1255 struct hnae_handle *handle = priv->ae_handle; 1256 1257 while (test_and_set_bit(NIC_STATE_RESETTING, &priv->state)) 1258 usleep_range(1000, 2000); 1259 1260 (void)hnae_reinit_handle(handle); 1261 1262 clear_bit(NIC_STATE_RESETTING, &priv->state); 1263 } 1264 1265 void hns_nic_net_reinit(struct net_device *netdev) 1266 { 1267 struct hns_nic_priv *priv = netdev_priv(netdev); 1268 1269 priv->netdev->trans_start = jiffies; 1270 while (test_and_set_bit(NIC_STATE_REINITING, &priv->state)) 1271 usleep_range(1000, 2000); 1272 1273 hns_nic_net_down(netdev); 1274 hns_nic_net_reset(netdev); 1275 (void)hns_nic_net_up(netdev); 1276 clear_bit(NIC_STATE_REINITING, &priv->state); 1277 } 1278 1279 static int hns_nic_net_open(struct net_device *ndev) 1280 { 1281 struct hns_nic_priv *priv = netdev_priv(ndev); 1282 struct hnae_handle *h = priv->ae_handle; 1283 int ret; 1284 1285 if (test_bit(NIC_STATE_TESTING, &priv->state)) 1286 return -EBUSY; 1287 1288 priv->link = 0; 1289 netif_carrier_off(ndev); 1290 1291 ret = netif_set_real_num_tx_queues(ndev, h->q_num); 1292 if (ret < 0) { 1293 netdev_err(ndev, "netif_set_real_num_tx_queues fail, ret=%d!\n", 1294 ret); 1295 return ret; 1296 } 1297 1298 ret = netif_set_real_num_rx_queues(ndev, h->q_num); 1299 if (ret < 0) { 1300 netdev_err(ndev, 1301 "netif_set_real_num_rx_queues fail, ret=%d!\n", ret); 1302 return ret; 1303 } 1304 1305 ret = hns_nic_net_up(ndev); 1306 if (ret) { 1307 netdev_err(ndev, 1308 "hns net up fail, ret=%d!\n", ret); 1309 return ret; 1310 } 1311 1312 return 0; 1313 } 1314 1315 static int hns_nic_net_stop(struct net_device *ndev) 1316 { 1317 hns_nic_net_down(ndev); 1318 1319 return 0; 1320 } 1321 1322 static void hns_tx_timeout_reset(struct hns_nic_priv *priv); 1323 static void hns_nic_net_timeout(struct net_device *ndev) 1324 { 1325 struct hns_nic_priv *priv = netdev_priv(ndev); 1326 1327 hns_tx_timeout_reset(priv); 1328 } 1329 1330 static int hns_nic_do_ioctl(struct net_device *netdev, struct ifreq *ifr, 1331 int cmd) 1332 { 1333 struct hns_nic_priv *priv = netdev_priv(netdev); 1334 struct phy_device *phy_dev = priv->phy; 1335 1336 if (!netif_running(netdev)) 1337 return -EINVAL; 1338 1339 if (!phy_dev) 1340 return -ENOTSUPP; 1341 1342 return phy_mii_ioctl(phy_dev, ifr, cmd); 1343 } 1344 1345 /* use only for netconsole to poll with the device without interrupt */ 1346 #ifdef CONFIG_NET_POLL_CONTROLLER 1347 void hns_nic_poll_controller(struct net_device *ndev) 1348 { 1349 struct hns_nic_priv *priv = netdev_priv(ndev); 1350 unsigned long flags; 1351 int i; 1352 1353 local_irq_save(flags); 1354 for (i = 0; i < priv->ae_handle->q_num * 2; i++) 1355 napi_schedule(&priv->ring_data[i].napi); 1356 local_irq_restore(flags); 1357 } 1358 #endif 1359 1360 static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb, 1361 struct net_device *ndev) 1362 { 1363 struct hns_nic_priv *priv = netdev_priv(ndev); 1364 int ret; 1365 1366 assert(skb->queue_mapping < ndev->ae_handle->q_num); 1367 ret = hns_nic_net_xmit_hw(ndev, skb, 1368 &tx_ring_data(priv, skb->queue_mapping)); 1369 if (ret == NETDEV_TX_OK) { 1370 ndev->trans_start = jiffies; 1371 ndev->stats.tx_bytes += skb->len; 1372 ndev->stats.tx_packets++; 1373 } 1374 return (netdev_tx_t)ret; 1375 } 1376 1377 static int hns_nic_change_mtu(struct net_device *ndev, int new_mtu) 1378 { 1379 struct hns_nic_priv *priv = netdev_priv(ndev); 1380 struct hnae_handle *h = priv->ae_handle; 1381 int ret; 1382 1383 /* MTU < 68 is an error and causes problems on some kernels */ 1384 if (new_mtu < 68) 1385 return -EINVAL; 1386 1387 if (!h->dev->ops->set_mtu) 1388 return -ENOTSUPP; 1389 1390 if (netif_running(ndev)) { 1391 (void)hns_nic_net_stop(ndev); 1392 msleep(100); 1393 1394 ret = h->dev->ops->set_mtu(h, new_mtu); 1395 if (ret) 1396 netdev_err(ndev, "set mtu fail, return value %d\n", 1397 ret); 1398 1399 if (hns_nic_net_open(ndev)) 1400 netdev_err(ndev, "hns net open fail\n"); 1401 } else { 1402 ret = h->dev->ops->set_mtu(h, new_mtu); 1403 } 1404 1405 if (!ret) 1406 ndev->mtu = new_mtu; 1407 1408 return ret; 1409 } 1410 1411 static int hns_nic_set_features(struct net_device *netdev, 1412 netdev_features_t features) 1413 { 1414 struct hns_nic_priv *priv = netdev_priv(netdev); 1415 struct hnae_handle *h = priv->ae_handle; 1416 1417 switch (priv->enet_ver) { 1418 case AE_VERSION_1: 1419 if (features & (NETIF_F_TSO | NETIF_F_TSO6)) 1420 netdev_info(netdev, "enet v1 do not support tso!\n"); 1421 break; 1422 default: 1423 if (features & (NETIF_F_TSO | NETIF_F_TSO6)) { 1424 priv->ops.fill_desc = fill_tso_desc; 1425 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso; 1426 /* The chip only support 7*4096 */ 1427 netif_set_gso_max_size(netdev, 7 * 4096); 1428 h->dev->ops->set_tso_stats(h, 1); 1429 } else { 1430 priv->ops.fill_desc = fill_v2_desc; 1431 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx; 1432 h->dev->ops->set_tso_stats(h, 0); 1433 } 1434 break; 1435 } 1436 netdev->features = features; 1437 return 0; 1438 } 1439 1440 static netdev_features_t hns_nic_fix_features( 1441 struct net_device *netdev, netdev_features_t features) 1442 { 1443 struct hns_nic_priv *priv = netdev_priv(netdev); 1444 1445 switch (priv->enet_ver) { 1446 case AE_VERSION_1: 1447 features &= ~(NETIF_F_TSO | NETIF_F_TSO6 | 1448 NETIF_F_HW_VLAN_CTAG_FILTER); 1449 break; 1450 default: 1451 break; 1452 } 1453 return features; 1454 } 1455 1456 /** 1457 * nic_set_multicast_list - set mutl mac address 1458 * @netdev: net device 1459 * @p: mac address 1460 * 1461 * return void 1462 */ 1463 void hns_set_multicast_list(struct net_device *ndev) 1464 { 1465 struct hns_nic_priv *priv = netdev_priv(ndev); 1466 struct hnae_handle *h = priv->ae_handle; 1467 struct netdev_hw_addr *ha = NULL; 1468 1469 if (!h) { 1470 netdev_err(ndev, "hnae handle is null\n"); 1471 return; 1472 } 1473 1474 if (h->dev->ops->set_mc_addr) { 1475 netdev_for_each_mc_addr(ha, ndev) 1476 if (h->dev->ops->set_mc_addr(h, ha->addr)) 1477 netdev_err(ndev, "set multicast fail\n"); 1478 } 1479 } 1480 1481 void hns_nic_set_rx_mode(struct net_device *ndev) 1482 { 1483 struct hns_nic_priv *priv = netdev_priv(ndev); 1484 struct hnae_handle *h = priv->ae_handle; 1485 1486 if (h->dev->ops->set_promisc_mode) { 1487 if (ndev->flags & IFF_PROMISC) 1488 h->dev->ops->set_promisc_mode(h, 1); 1489 else 1490 h->dev->ops->set_promisc_mode(h, 0); 1491 } 1492 1493 hns_set_multicast_list(ndev); 1494 } 1495 1496 struct rtnl_link_stats64 *hns_nic_get_stats64(struct net_device *ndev, 1497 struct rtnl_link_stats64 *stats) 1498 { 1499 int idx = 0; 1500 u64 tx_bytes = 0; 1501 u64 rx_bytes = 0; 1502 u64 tx_pkts = 0; 1503 u64 rx_pkts = 0; 1504 struct hns_nic_priv *priv = netdev_priv(ndev); 1505 struct hnae_handle *h = priv->ae_handle; 1506 1507 for (idx = 0; idx < h->q_num; idx++) { 1508 tx_bytes += h->qs[idx]->tx_ring.stats.tx_bytes; 1509 tx_pkts += h->qs[idx]->tx_ring.stats.tx_pkts; 1510 rx_bytes += h->qs[idx]->rx_ring.stats.rx_bytes; 1511 rx_pkts += h->qs[idx]->rx_ring.stats.rx_pkts; 1512 } 1513 1514 stats->tx_bytes = tx_bytes; 1515 stats->tx_packets = tx_pkts; 1516 stats->rx_bytes = rx_bytes; 1517 stats->rx_packets = rx_pkts; 1518 1519 stats->rx_errors = ndev->stats.rx_errors; 1520 stats->multicast = ndev->stats.multicast; 1521 stats->rx_length_errors = ndev->stats.rx_length_errors; 1522 stats->rx_crc_errors = ndev->stats.rx_crc_errors; 1523 stats->rx_missed_errors = ndev->stats.rx_missed_errors; 1524 1525 stats->tx_errors = ndev->stats.tx_errors; 1526 stats->rx_dropped = ndev->stats.rx_dropped; 1527 stats->tx_dropped = ndev->stats.tx_dropped; 1528 stats->collisions = ndev->stats.collisions; 1529 stats->rx_over_errors = ndev->stats.rx_over_errors; 1530 stats->rx_frame_errors = ndev->stats.rx_frame_errors; 1531 stats->rx_fifo_errors = ndev->stats.rx_fifo_errors; 1532 stats->tx_aborted_errors = ndev->stats.tx_aborted_errors; 1533 stats->tx_carrier_errors = ndev->stats.tx_carrier_errors; 1534 stats->tx_fifo_errors = ndev->stats.tx_fifo_errors; 1535 stats->tx_heartbeat_errors = ndev->stats.tx_heartbeat_errors; 1536 stats->tx_window_errors = ndev->stats.tx_window_errors; 1537 stats->rx_compressed = ndev->stats.rx_compressed; 1538 stats->tx_compressed = ndev->stats.tx_compressed; 1539 1540 return stats; 1541 } 1542 1543 static const struct net_device_ops hns_nic_netdev_ops = { 1544 .ndo_open = hns_nic_net_open, 1545 .ndo_stop = hns_nic_net_stop, 1546 .ndo_start_xmit = hns_nic_net_xmit, 1547 .ndo_tx_timeout = hns_nic_net_timeout, 1548 .ndo_set_mac_address = hns_nic_net_set_mac_address, 1549 .ndo_change_mtu = hns_nic_change_mtu, 1550 .ndo_do_ioctl = hns_nic_do_ioctl, 1551 .ndo_set_features = hns_nic_set_features, 1552 .ndo_fix_features = hns_nic_fix_features, 1553 .ndo_get_stats64 = hns_nic_get_stats64, 1554 #ifdef CONFIG_NET_POLL_CONTROLLER 1555 .ndo_poll_controller = hns_nic_poll_controller, 1556 #endif 1557 .ndo_set_rx_mode = hns_nic_set_rx_mode, 1558 }; 1559 1560 static void hns_nic_update_link_status(struct net_device *netdev) 1561 { 1562 struct hns_nic_priv *priv = netdev_priv(netdev); 1563 1564 struct hnae_handle *h = priv->ae_handle; 1565 int state = 1; 1566 1567 if (priv->phy) { 1568 if (!genphy_update_link(priv->phy)) 1569 state = priv->phy->link; 1570 else 1571 state = 0; 1572 } 1573 state = state && h->dev->ops->get_status(h); 1574 1575 if (state != priv->link) { 1576 if (state) { 1577 netif_carrier_on(netdev); 1578 netif_tx_wake_all_queues(netdev); 1579 netdev_info(netdev, "link up\n"); 1580 } else { 1581 netif_carrier_off(netdev); 1582 netdev_info(netdev, "link down\n"); 1583 } 1584 priv->link = state; 1585 } 1586 } 1587 1588 /* for dumping key regs*/ 1589 static void hns_nic_dump(struct hns_nic_priv *priv) 1590 { 1591 struct hnae_handle *h = priv->ae_handle; 1592 struct hnae_ae_ops *ops = h->dev->ops; 1593 u32 *data, reg_num, i; 1594 1595 if (ops->get_regs_len && ops->get_regs) { 1596 reg_num = ops->get_regs_len(priv->ae_handle); 1597 reg_num = (reg_num + 3ul) & ~3ul; 1598 data = kcalloc(reg_num, sizeof(u32), GFP_KERNEL); 1599 if (data) { 1600 ops->get_regs(priv->ae_handle, data); 1601 for (i = 0; i < reg_num; i += 4) 1602 pr_info("0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n", 1603 i, data[i], data[i + 1], 1604 data[i + 2], data[i + 3]); 1605 kfree(data); 1606 } 1607 } 1608 1609 for (i = 0; i < h->q_num; i++) { 1610 pr_info("tx_queue%d_next_to_clean:%d\n", 1611 i, h->qs[i]->tx_ring.next_to_clean); 1612 pr_info("tx_queue%d_next_to_use:%d\n", 1613 i, h->qs[i]->tx_ring.next_to_use); 1614 pr_info("rx_queue%d_next_to_clean:%d\n", 1615 i, h->qs[i]->rx_ring.next_to_clean); 1616 pr_info("rx_queue%d_next_to_use:%d\n", 1617 i, h->qs[i]->rx_ring.next_to_use); 1618 } 1619 } 1620 1621 /* for resetting suntask*/ 1622 static void hns_nic_reset_subtask(struct hns_nic_priv *priv) 1623 { 1624 enum hnae_port_type type = priv->ae_handle->port_type; 1625 1626 if (!test_bit(NIC_STATE2_RESET_REQUESTED, &priv->state)) 1627 return; 1628 clear_bit(NIC_STATE2_RESET_REQUESTED, &priv->state); 1629 1630 /* If we're already down, removing or resetting, just bail */ 1631 if (test_bit(NIC_STATE_DOWN, &priv->state) || 1632 test_bit(NIC_STATE_REMOVING, &priv->state) || 1633 test_bit(NIC_STATE_RESETTING, &priv->state)) 1634 return; 1635 1636 hns_nic_dump(priv); 1637 netdev_info(priv->netdev, "try to reset %s port!\n", 1638 (type == HNAE_PORT_DEBUG ? "debug" : "service")); 1639 1640 rtnl_lock(); 1641 /* put off any impending NetWatchDogTimeout */ 1642 priv->netdev->trans_start = jiffies; 1643 1644 if (type == HNAE_PORT_DEBUG) { 1645 hns_nic_net_reinit(priv->netdev); 1646 } else { 1647 netif_carrier_off(priv->netdev); 1648 netif_tx_disable(priv->netdev); 1649 } 1650 rtnl_unlock(); 1651 } 1652 1653 /* for doing service complete*/ 1654 static void hns_nic_service_event_complete(struct hns_nic_priv *priv) 1655 { 1656 WARN_ON(!test_bit(NIC_STATE_SERVICE_SCHED, &priv->state)); 1657 1658 smp_mb__before_atomic(); 1659 clear_bit(NIC_STATE_SERVICE_SCHED, &priv->state); 1660 } 1661 1662 static void hns_nic_service_task(struct work_struct *work) 1663 { 1664 struct hns_nic_priv *priv 1665 = container_of(work, struct hns_nic_priv, service_task); 1666 struct hnae_handle *h = priv->ae_handle; 1667 1668 hns_nic_update_link_status(priv->netdev); 1669 h->dev->ops->update_led_status(h); 1670 hns_nic_update_stats(priv->netdev); 1671 1672 hns_nic_reset_subtask(priv); 1673 hns_nic_service_event_complete(priv); 1674 } 1675 1676 static void hns_nic_task_schedule(struct hns_nic_priv *priv) 1677 { 1678 if (!test_bit(NIC_STATE_DOWN, &priv->state) && 1679 !test_bit(NIC_STATE_REMOVING, &priv->state) && 1680 !test_and_set_bit(NIC_STATE_SERVICE_SCHED, &priv->state)) 1681 (void)schedule_work(&priv->service_task); 1682 } 1683 1684 static void hns_nic_service_timer(unsigned long data) 1685 { 1686 struct hns_nic_priv *priv = (struct hns_nic_priv *)data; 1687 1688 (void)mod_timer(&priv->service_timer, jiffies + SERVICE_TIMER_HZ); 1689 1690 hns_nic_task_schedule(priv); 1691 } 1692 1693 /** 1694 * hns_tx_timeout_reset - initiate reset due to Tx timeout 1695 * @priv: driver private struct 1696 **/ 1697 static void hns_tx_timeout_reset(struct hns_nic_priv *priv) 1698 { 1699 /* Do the reset outside of interrupt context */ 1700 if (!test_bit(NIC_STATE_DOWN, &priv->state)) { 1701 set_bit(NIC_STATE2_RESET_REQUESTED, &priv->state); 1702 netdev_warn(priv->netdev, 1703 "initiating reset due to tx timeout(%llu,0x%lx)\n", 1704 priv->tx_timeout_count, priv->state); 1705 priv->tx_timeout_count++; 1706 hns_nic_task_schedule(priv); 1707 } 1708 } 1709 1710 static int hns_nic_init_ring_data(struct hns_nic_priv *priv) 1711 { 1712 struct hnae_handle *h = priv->ae_handle; 1713 struct hns_nic_ring_data *rd; 1714 int i; 1715 1716 if (h->q_num > NIC_MAX_Q_PER_VF) { 1717 netdev_err(priv->netdev, "too much queue (%d)\n", h->q_num); 1718 return -EINVAL; 1719 } 1720 1721 priv->ring_data = kzalloc(h->q_num * sizeof(*priv->ring_data) * 2, 1722 GFP_KERNEL); 1723 if (!priv->ring_data) 1724 return -ENOMEM; 1725 1726 for (i = 0; i < h->q_num; i++) { 1727 rd = &priv->ring_data[i]; 1728 rd->queue_index = i; 1729 rd->ring = &h->qs[i]->tx_ring; 1730 rd->poll_one = hns_nic_tx_poll_one; 1731 rd->fini_process = hns_nic_tx_fini_pro; 1732 1733 netif_napi_add(priv->netdev, &rd->napi, 1734 hns_nic_common_poll, NIC_TX_CLEAN_MAX_NUM); 1735 rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED; 1736 } 1737 for (i = h->q_num; i < h->q_num * 2; i++) { 1738 rd = &priv->ring_data[i]; 1739 rd->queue_index = i - h->q_num; 1740 rd->ring = &h->qs[i - h->q_num]->rx_ring; 1741 rd->poll_one = hns_nic_rx_poll_one; 1742 rd->ex_process = hns_nic_rx_up_pro; 1743 rd->fini_process = hns_nic_rx_fini_pro; 1744 1745 netif_napi_add(priv->netdev, &rd->napi, 1746 hns_nic_common_poll, NIC_RX_CLEAN_MAX_NUM); 1747 rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED; 1748 } 1749 1750 return 0; 1751 } 1752 1753 static void hns_nic_uninit_ring_data(struct hns_nic_priv *priv) 1754 { 1755 struct hnae_handle *h = priv->ae_handle; 1756 int i; 1757 1758 for (i = 0; i < h->q_num * 2; i++) { 1759 netif_napi_del(&priv->ring_data[i].napi); 1760 if (priv->ring_data[i].ring->irq_init_flag == RCB_IRQ_INITED) { 1761 (void)irq_set_affinity_hint( 1762 priv->ring_data[i].ring->irq, 1763 NULL); 1764 free_irq(priv->ring_data[i].ring->irq, 1765 &priv->ring_data[i]); 1766 } 1767 1768 priv->ring_data[i].ring->irq_init_flag = RCB_IRQ_NOT_INITED; 1769 } 1770 kfree(priv->ring_data); 1771 } 1772 1773 static void hns_nic_set_priv_ops(struct net_device *netdev) 1774 { 1775 struct hns_nic_priv *priv = netdev_priv(netdev); 1776 struct hnae_handle *h = priv->ae_handle; 1777 1778 if (AE_IS_VER1(priv->enet_ver)) { 1779 priv->ops.fill_desc = fill_desc; 1780 priv->ops.get_rxd_bnum = get_rx_desc_bnum; 1781 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx; 1782 } else { 1783 priv->ops.get_rxd_bnum = get_v2rx_desc_bnum; 1784 if ((netdev->features & NETIF_F_TSO) || 1785 (netdev->features & NETIF_F_TSO6)) { 1786 priv->ops.fill_desc = fill_tso_desc; 1787 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso; 1788 /* This chip only support 7*4096 */ 1789 netif_set_gso_max_size(netdev, 7 * 4096); 1790 h->dev->ops->set_tso_stats(h, 1); 1791 } else { 1792 priv->ops.fill_desc = fill_v2_desc; 1793 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx; 1794 } 1795 } 1796 } 1797 1798 static int hns_nic_try_get_ae(struct net_device *ndev) 1799 { 1800 struct hns_nic_priv *priv = netdev_priv(ndev); 1801 struct hnae_handle *h; 1802 int ret; 1803 1804 h = hnae_get_handle(&priv->netdev->dev, 1805 priv->ae_name, priv->port_id, NULL); 1806 if (IS_ERR_OR_NULL(h)) { 1807 ret = PTR_ERR(h); 1808 dev_dbg(priv->dev, "has not handle, register notifier!\n"); 1809 goto out; 1810 } 1811 priv->ae_handle = h; 1812 1813 ret = hns_nic_init_phy(ndev, h); 1814 if (ret) { 1815 dev_err(priv->dev, "probe phy device fail!\n"); 1816 goto out_init_phy; 1817 } 1818 1819 ret = hns_nic_init_ring_data(priv); 1820 if (ret) { 1821 ret = -ENOMEM; 1822 goto out_init_ring_data; 1823 } 1824 1825 hns_nic_set_priv_ops(ndev); 1826 1827 ret = register_netdev(ndev); 1828 if (ret) { 1829 dev_err(priv->dev, "probe register netdev fail!\n"); 1830 goto out_reg_ndev_fail; 1831 } 1832 return 0; 1833 1834 out_reg_ndev_fail: 1835 hns_nic_uninit_ring_data(priv); 1836 priv->ring_data = NULL; 1837 out_init_phy: 1838 out_init_ring_data: 1839 hnae_put_handle(priv->ae_handle); 1840 priv->ae_handle = NULL; 1841 out: 1842 return ret; 1843 } 1844 1845 static int hns_nic_notifier_action(struct notifier_block *nb, 1846 unsigned long action, void *data) 1847 { 1848 struct hns_nic_priv *priv = 1849 container_of(nb, struct hns_nic_priv, notifier_block); 1850 1851 assert(action == HNAE_AE_REGISTER); 1852 1853 if (!hns_nic_try_get_ae(priv->netdev)) { 1854 hnae_unregister_notifier(&priv->notifier_block); 1855 priv->notifier_block.notifier_call = NULL; 1856 } 1857 return 0; 1858 } 1859 1860 static int hns_nic_dev_probe(struct platform_device *pdev) 1861 { 1862 struct device *dev = &pdev->dev; 1863 struct net_device *ndev; 1864 struct hns_nic_priv *priv; 1865 struct device_node *node = dev->of_node; 1866 int ret; 1867 1868 ndev = alloc_etherdev_mq(sizeof(struct hns_nic_priv), NIC_MAX_Q_PER_VF); 1869 if (!ndev) 1870 return -ENOMEM; 1871 1872 platform_set_drvdata(pdev, ndev); 1873 1874 priv = netdev_priv(ndev); 1875 priv->dev = dev; 1876 priv->netdev = ndev; 1877 1878 if (of_device_is_compatible(node, "hisilicon,hns-nic-v1")) 1879 priv->enet_ver = AE_VERSION_1; 1880 else 1881 priv->enet_ver = AE_VERSION_2; 1882 1883 ret = of_property_read_string(node, "ae-name", &priv->ae_name); 1884 if (ret) 1885 goto out_read_string_fail; 1886 1887 ret = of_property_read_u32(node, "port-id", &priv->port_id); 1888 if (ret) 1889 goto out_read_string_fail; 1890 1891 hns_init_mac_addr(ndev); 1892 1893 ndev->watchdog_timeo = HNS_NIC_TX_TIMEOUT; 1894 ndev->priv_flags |= IFF_UNICAST_FLT; 1895 ndev->netdev_ops = &hns_nic_netdev_ops; 1896 hns_ethtool_set_ops(ndev); 1897 1898 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 1899 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | 1900 NETIF_F_GRO; 1901 ndev->vlan_features |= 1902 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM; 1903 ndev->vlan_features |= NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO; 1904 1905 switch (priv->enet_ver) { 1906 case AE_VERSION_2: 1907 ndev->features |= NETIF_F_TSO | NETIF_F_TSO6; 1908 ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 1909 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | 1910 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6; 1911 break; 1912 default: 1913 break; 1914 } 1915 1916 SET_NETDEV_DEV(ndev, dev); 1917 1918 if (!dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) 1919 dev_dbg(dev, "set mask to 64bit\n"); 1920 else 1921 dev_err(dev, "set mask to 32bit fail!\n"); 1922 1923 /* carrier off reporting is important to ethtool even BEFORE open */ 1924 netif_carrier_off(ndev); 1925 1926 setup_timer(&priv->service_timer, hns_nic_service_timer, 1927 (unsigned long)priv); 1928 INIT_WORK(&priv->service_task, hns_nic_service_task); 1929 1930 set_bit(NIC_STATE_SERVICE_INITED, &priv->state); 1931 clear_bit(NIC_STATE_SERVICE_SCHED, &priv->state); 1932 set_bit(NIC_STATE_DOWN, &priv->state); 1933 1934 if (hns_nic_try_get_ae(priv->netdev)) { 1935 priv->notifier_block.notifier_call = hns_nic_notifier_action; 1936 ret = hnae_register_notifier(&priv->notifier_block); 1937 if (ret) { 1938 dev_err(dev, "register notifier fail!\n"); 1939 goto out_notify_fail; 1940 } 1941 dev_dbg(dev, "has not handle, register notifier!\n"); 1942 } 1943 1944 return 0; 1945 1946 out_notify_fail: 1947 (void)cancel_work_sync(&priv->service_task); 1948 out_read_string_fail: 1949 free_netdev(ndev); 1950 return ret; 1951 } 1952 1953 static int hns_nic_dev_remove(struct platform_device *pdev) 1954 { 1955 struct net_device *ndev = platform_get_drvdata(pdev); 1956 struct hns_nic_priv *priv = netdev_priv(ndev); 1957 1958 if (ndev->reg_state != NETREG_UNINITIALIZED) 1959 unregister_netdev(ndev); 1960 1961 if (priv->ring_data) 1962 hns_nic_uninit_ring_data(priv); 1963 priv->ring_data = NULL; 1964 1965 if (priv->phy) 1966 phy_disconnect(priv->phy); 1967 priv->phy = NULL; 1968 1969 if (!IS_ERR_OR_NULL(priv->ae_handle)) 1970 hnae_put_handle(priv->ae_handle); 1971 priv->ae_handle = NULL; 1972 if (priv->notifier_block.notifier_call) 1973 hnae_unregister_notifier(&priv->notifier_block); 1974 priv->notifier_block.notifier_call = NULL; 1975 1976 set_bit(NIC_STATE_REMOVING, &priv->state); 1977 (void)cancel_work_sync(&priv->service_task); 1978 1979 free_netdev(ndev); 1980 return 0; 1981 } 1982 1983 static const struct of_device_id hns_enet_of_match[] = { 1984 {.compatible = "hisilicon,hns-nic-v1",}, 1985 {.compatible = "hisilicon,hns-nic-v2",}, 1986 {}, 1987 }; 1988 1989 MODULE_DEVICE_TABLE(of, hns_enet_of_match); 1990 1991 static struct platform_driver hns_nic_dev_driver = { 1992 .driver = { 1993 .name = "hns-nic", 1994 .of_match_table = hns_enet_of_match, 1995 }, 1996 .probe = hns_nic_dev_probe, 1997 .remove = hns_nic_dev_remove, 1998 }; 1999 2000 module_platform_driver(hns_nic_dev_driver); 2001 2002 MODULE_DESCRIPTION("HISILICON HNS Ethernet driver"); 2003 MODULE_AUTHOR("Hisilicon, Inc."); 2004 MODULE_LICENSE("GPL"); 2005 MODULE_ALIAS("platform:hns-nic"); 2006