1 /* 2 * Copyright (c) 2014-2015 Hisilicon Limited. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 */ 9 10 #include <linux/clk.h> 11 #include <linux/cpumask.h> 12 #include <linux/etherdevice.h> 13 #include <linux/if_vlan.h> 14 #include <linux/interrupt.h> 15 #include <linux/io.h> 16 #include <linux/ip.h> 17 #include <linux/ipv6.h> 18 #include <linux/module.h> 19 #include <linux/phy.h> 20 #include <linux/platform_device.h> 21 #include <linux/skbuff.h> 22 23 #include "hnae.h" 24 #include "hns_enet.h" 25 #include "hns_dsaf_mac.h" 26 27 #define NIC_MAX_Q_PER_VF 16 28 #define HNS_NIC_TX_TIMEOUT (5 * HZ) 29 30 #define SERVICE_TIMER_HZ (1 * HZ) 31 32 #define NIC_TX_CLEAN_MAX_NUM 256 33 #define NIC_RX_CLEAN_MAX_NUM 64 34 35 #define RCB_IRQ_NOT_INITED 0 36 #define RCB_IRQ_INITED 1 37 #define HNS_BUFFER_SIZE_2048 2048 38 39 #define BD_MAX_SEND_SIZE 8191 40 #define SKB_TMP_LEN(SKB) \ 41 (((SKB)->transport_header - (SKB)->mac_header) + tcp_hdrlen(SKB)) 42 43 static void fill_v2_desc(struct hnae_ring *ring, void *priv, 44 int size, dma_addr_t dma, int frag_end, 45 int buf_num, enum hns_desc_type type, int mtu) 46 { 47 struct hnae_desc *desc = &ring->desc[ring->next_to_use]; 48 struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; 49 struct iphdr *iphdr; 50 struct ipv6hdr *ipv6hdr; 51 struct sk_buff *skb; 52 __be16 protocol; 53 u8 bn_pid = 0; 54 u8 rrcfv = 0; 55 u8 ip_offset = 0; 56 u8 tvsvsn = 0; 57 u16 mss = 0; 58 u8 l4_len = 0; 59 u16 paylen = 0; 60 61 desc_cb->priv = priv; 62 desc_cb->length = size; 63 desc_cb->dma = dma; 64 desc_cb->type = type; 65 66 desc->addr = cpu_to_le64(dma); 67 desc->tx.send_size = cpu_to_le16((u16)size); 68 69 /* config bd buffer end */ 70 hnae_set_bit(rrcfv, HNSV2_TXD_VLD_B, 1); 71 hnae_set_field(bn_pid, HNSV2_TXD_BUFNUM_M, 0, buf_num - 1); 72 73 /* fill port_id in the tx bd for sending management pkts */ 74 hnae_set_field(bn_pid, HNSV2_TXD_PORTID_M, 75 HNSV2_TXD_PORTID_S, ring->q->handle->dport_id); 76 77 if (type == DESC_TYPE_SKB) { 78 skb = (struct sk_buff *)priv; 79 80 if (skb->ip_summed == CHECKSUM_PARTIAL) { 81 skb_reset_mac_len(skb); 82 protocol = skb->protocol; 83 ip_offset = ETH_HLEN; 84 85 if (protocol == htons(ETH_P_8021Q)) { 86 ip_offset += VLAN_HLEN; 87 protocol = vlan_get_protocol(skb); 88 skb->protocol = protocol; 89 } 90 91 if (skb->protocol == htons(ETH_P_IP)) { 92 iphdr = ip_hdr(skb); 93 hnae_set_bit(rrcfv, HNSV2_TXD_L3CS_B, 1); 94 hnae_set_bit(rrcfv, HNSV2_TXD_L4CS_B, 1); 95 96 /* check for tcp/udp header */ 97 if (iphdr->protocol == IPPROTO_TCP && 98 skb_is_gso(skb)) { 99 hnae_set_bit(tvsvsn, 100 HNSV2_TXD_TSE_B, 1); 101 l4_len = tcp_hdrlen(skb); 102 mss = skb_shinfo(skb)->gso_size; 103 paylen = skb->len - SKB_TMP_LEN(skb); 104 } 105 } else if (skb->protocol == htons(ETH_P_IPV6)) { 106 hnae_set_bit(tvsvsn, HNSV2_TXD_IPV6_B, 1); 107 ipv6hdr = ipv6_hdr(skb); 108 hnae_set_bit(rrcfv, HNSV2_TXD_L4CS_B, 1); 109 110 /* check for tcp/udp header */ 111 if (ipv6hdr->nexthdr == IPPROTO_TCP && 112 skb_is_gso(skb) && skb_is_gso_v6(skb)) { 113 hnae_set_bit(tvsvsn, 114 HNSV2_TXD_TSE_B, 1); 115 l4_len = tcp_hdrlen(skb); 116 mss = skb_shinfo(skb)->gso_size; 117 paylen = skb->len - SKB_TMP_LEN(skb); 118 } 119 } 120 desc->tx.ip_offset = ip_offset; 121 desc->tx.tse_vlan_snap_v6_sctp_nth = tvsvsn; 122 desc->tx.mss = cpu_to_le16(mss); 123 desc->tx.l4_len = l4_len; 124 desc->tx.paylen = cpu_to_le16(paylen); 125 } 126 } 127 128 hnae_set_bit(rrcfv, HNSV2_TXD_FE_B, frag_end); 129 130 desc->tx.bn_pid = bn_pid; 131 desc->tx.ra_ri_cs_fe_vld = rrcfv; 132 133 ring_ptr_move_fw(ring, next_to_use); 134 } 135 136 static const struct acpi_device_id hns_enet_acpi_match[] = { 137 { "HISI00C1", 0 }, 138 { "HISI00C2", 0 }, 139 { }, 140 }; 141 MODULE_DEVICE_TABLE(acpi, hns_enet_acpi_match); 142 143 static void fill_desc(struct hnae_ring *ring, void *priv, 144 int size, dma_addr_t dma, int frag_end, 145 int buf_num, enum hns_desc_type type, int mtu) 146 { 147 struct hnae_desc *desc = &ring->desc[ring->next_to_use]; 148 struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; 149 struct sk_buff *skb; 150 __be16 protocol; 151 u32 ip_offset; 152 u32 asid_bufnum_pid = 0; 153 u32 flag_ipoffset = 0; 154 155 desc_cb->priv = priv; 156 desc_cb->length = size; 157 desc_cb->dma = dma; 158 desc_cb->type = type; 159 160 desc->addr = cpu_to_le64(dma); 161 desc->tx.send_size = cpu_to_le16((u16)size); 162 163 /*config bd buffer end */ 164 flag_ipoffset |= 1 << HNS_TXD_VLD_B; 165 166 asid_bufnum_pid |= buf_num << HNS_TXD_BUFNUM_S; 167 168 if (type == DESC_TYPE_SKB) { 169 skb = (struct sk_buff *)priv; 170 171 if (skb->ip_summed == CHECKSUM_PARTIAL) { 172 protocol = skb->protocol; 173 ip_offset = ETH_HLEN; 174 175 /*if it is a SW VLAN check the next protocol*/ 176 if (protocol == htons(ETH_P_8021Q)) { 177 ip_offset += VLAN_HLEN; 178 protocol = vlan_get_protocol(skb); 179 skb->protocol = protocol; 180 } 181 182 if (skb->protocol == htons(ETH_P_IP)) { 183 flag_ipoffset |= 1 << HNS_TXD_L3CS_B; 184 /* check for tcp/udp header */ 185 flag_ipoffset |= 1 << HNS_TXD_L4CS_B; 186 187 } else if (skb->protocol == htons(ETH_P_IPV6)) { 188 /* ipv6 has not l3 cs, check for L4 header */ 189 flag_ipoffset |= 1 << HNS_TXD_L4CS_B; 190 } 191 192 flag_ipoffset |= ip_offset << HNS_TXD_IPOFFSET_S; 193 } 194 } 195 196 flag_ipoffset |= frag_end << HNS_TXD_FE_B; 197 198 desc->tx.asid_bufnum_pid = cpu_to_le16(asid_bufnum_pid); 199 desc->tx.flag_ipoffset = cpu_to_le32(flag_ipoffset); 200 201 ring_ptr_move_fw(ring, next_to_use); 202 } 203 204 static void unfill_desc(struct hnae_ring *ring) 205 { 206 ring_ptr_move_bw(ring, next_to_use); 207 } 208 209 static int hns_nic_maybe_stop_tx( 210 struct sk_buff **out_skb, int *bnum, struct hnae_ring *ring) 211 { 212 struct sk_buff *skb = *out_skb; 213 struct sk_buff *new_skb = NULL; 214 int buf_num; 215 216 /* no. of segments (plus a header) */ 217 buf_num = skb_shinfo(skb)->nr_frags + 1; 218 219 if (unlikely(buf_num > ring->max_desc_num_per_pkt)) { 220 if (ring_space(ring) < 1) 221 return -EBUSY; 222 223 new_skb = skb_copy(skb, GFP_ATOMIC); 224 if (!new_skb) 225 return -ENOMEM; 226 227 dev_kfree_skb_any(skb); 228 *out_skb = new_skb; 229 buf_num = 1; 230 } else if (buf_num > ring_space(ring)) { 231 return -EBUSY; 232 } 233 234 *bnum = buf_num; 235 return 0; 236 } 237 238 static int hns_nic_maybe_stop_tso( 239 struct sk_buff **out_skb, int *bnum, struct hnae_ring *ring) 240 { 241 int i; 242 int size; 243 int buf_num; 244 int frag_num; 245 struct sk_buff *skb = *out_skb; 246 struct sk_buff *new_skb = NULL; 247 struct skb_frag_struct *frag; 248 249 size = skb_headlen(skb); 250 buf_num = (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE; 251 252 frag_num = skb_shinfo(skb)->nr_frags; 253 for (i = 0; i < frag_num; i++) { 254 frag = &skb_shinfo(skb)->frags[i]; 255 size = skb_frag_size(frag); 256 buf_num += (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE; 257 } 258 259 if (unlikely(buf_num > ring->max_desc_num_per_pkt)) { 260 buf_num = (skb->len + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE; 261 if (ring_space(ring) < buf_num) 262 return -EBUSY; 263 /* manual split the send packet */ 264 new_skb = skb_copy(skb, GFP_ATOMIC); 265 if (!new_skb) 266 return -ENOMEM; 267 dev_kfree_skb_any(skb); 268 *out_skb = new_skb; 269 270 } else if (ring_space(ring) < buf_num) { 271 return -EBUSY; 272 } 273 274 *bnum = buf_num; 275 return 0; 276 } 277 278 static void fill_tso_desc(struct hnae_ring *ring, void *priv, 279 int size, dma_addr_t dma, int frag_end, 280 int buf_num, enum hns_desc_type type, int mtu) 281 { 282 int frag_buf_num; 283 int sizeoflast; 284 int k; 285 286 frag_buf_num = (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE; 287 sizeoflast = size % BD_MAX_SEND_SIZE; 288 sizeoflast = sizeoflast ? sizeoflast : BD_MAX_SEND_SIZE; 289 290 /* when the frag size is bigger than hardware, split this frag */ 291 for (k = 0; k < frag_buf_num; k++) 292 fill_v2_desc(ring, priv, 293 (k == frag_buf_num - 1) ? 294 sizeoflast : BD_MAX_SEND_SIZE, 295 dma + BD_MAX_SEND_SIZE * k, 296 frag_end && (k == frag_buf_num - 1) ? 1 : 0, 297 buf_num, 298 (type == DESC_TYPE_SKB && !k) ? 299 DESC_TYPE_SKB : DESC_TYPE_PAGE, 300 mtu); 301 } 302 303 netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev, 304 struct sk_buff *skb, 305 struct hns_nic_ring_data *ring_data) 306 { 307 struct hns_nic_priv *priv = netdev_priv(ndev); 308 struct hnae_ring *ring = ring_data->ring; 309 struct device *dev = ring_to_dev(ring); 310 struct netdev_queue *dev_queue; 311 struct skb_frag_struct *frag; 312 int buf_num; 313 int seg_num; 314 dma_addr_t dma; 315 int size, next_to_use; 316 int i; 317 318 switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) { 319 case -EBUSY: 320 ring->stats.tx_busy++; 321 goto out_net_tx_busy; 322 case -ENOMEM: 323 ring->stats.sw_err_cnt++; 324 netdev_err(ndev, "no memory to xmit!\n"); 325 goto out_err_tx_ok; 326 default: 327 break; 328 } 329 330 /* no. of segments (plus a header) */ 331 seg_num = skb_shinfo(skb)->nr_frags + 1; 332 next_to_use = ring->next_to_use; 333 334 /* fill the first part */ 335 size = skb_headlen(skb); 336 dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); 337 if (dma_mapping_error(dev, dma)) { 338 netdev_err(ndev, "TX head DMA map failed\n"); 339 ring->stats.sw_err_cnt++; 340 goto out_err_tx_ok; 341 } 342 priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0, 343 buf_num, DESC_TYPE_SKB, ndev->mtu); 344 345 /* fill the fragments */ 346 for (i = 1; i < seg_num; i++) { 347 frag = &skb_shinfo(skb)->frags[i - 1]; 348 size = skb_frag_size(frag); 349 dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE); 350 if (dma_mapping_error(dev, dma)) { 351 netdev_err(ndev, "TX frag(%d) DMA map failed\n", i); 352 ring->stats.sw_err_cnt++; 353 goto out_map_frag_fail; 354 } 355 priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma, 356 seg_num - 1 == i ? 1 : 0, buf_num, 357 DESC_TYPE_PAGE, ndev->mtu); 358 } 359 360 /*complete translate all packets*/ 361 dev_queue = netdev_get_tx_queue(ndev, skb->queue_mapping); 362 netdev_tx_sent_queue(dev_queue, skb->len); 363 364 netif_trans_update(ndev); 365 ndev->stats.tx_bytes += skb->len; 366 ndev->stats.tx_packets++; 367 368 wmb(); /* commit all data before submit */ 369 assert(skb->queue_mapping < priv->ae_handle->q_num); 370 hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num); 371 ring->stats.tx_pkts++; 372 ring->stats.tx_bytes += skb->len; 373 374 return NETDEV_TX_OK; 375 376 out_map_frag_fail: 377 378 while (ring->next_to_use != next_to_use) { 379 unfill_desc(ring); 380 if (ring->next_to_use != next_to_use) 381 dma_unmap_page(dev, 382 ring->desc_cb[ring->next_to_use].dma, 383 ring->desc_cb[ring->next_to_use].length, 384 DMA_TO_DEVICE); 385 else 386 dma_unmap_single(dev, 387 ring->desc_cb[next_to_use].dma, 388 ring->desc_cb[next_to_use].length, 389 DMA_TO_DEVICE); 390 } 391 392 out_err_tx_ok: 393 394 dev_kfree_skb_any(skb); 395 return NETDEV_TX_OK; 396 397 out_net_tx_busy: 398 399 netif_stop_subqueue(ndev, skb->queue_mapping); 400 401 /* Herbert's original patch had: 402 * smp_mb__after_netif_stop_queue(); 403 * but since that doesn't exist yet, just open code it. 404 */ 405 smp_mb(); 406 return NETDEV_TX_BUSY; 407 } 408 409 /** 410 * hns_nic_get_headlen - determine size of header for RSC/LRO/GRO/FCOE 411 * @data: pointer to the start of the headers 412 * @max: total length of section to find headers in 413 * 414 * This function is meant to determine the length of headers that will 415 * be recognized by hardware for LRO, GRO, and RSC offloads. The main 416 * motivation of doing this is to only perform one pull for IPv4 TCP 417 * packets so that we can do basic things like calculating the gso_size 418 * based on the average data per packet. 419 **/ 420 static unsigned int hns_nic_get_headlen(unsigned char *data, u32 flag, 421 unsigned int max_size) 422 { 423 unsigned char *network; 424 u8 hlen; 425 426 /* this should never happen, but better safe than sorry */ 427 if (max_size < ETH_HLEN) 428 return max_size; 429 430 /* initialize network frame pointer */ 431 network = data; 432 433 /* set first protocol and move network header forward */ 434 network += ETH_HLEN; 435 436 /* handle any vlan tag if present */ 437 if (hnae_get_field(flag, HNS_RXD_VLAN_M, HNS_RXD_VLAN_S) 438 == HNS_RX_FLAG_VLAN_PRESENT) { 439 if ((typeof(max_size))(network - data) > (max_size - VLAN_HLEN)) 440 return max_size; 441 442 network += VLAN_HLEN; 443 } 444 445 /* handle L3 protocols */ 446 if (hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S) 447 == HNS_RX_FLAG_L3ID_IPV4) { 448 if ((typeof(max_size))(network - data) > 449 (max_size - sizeof(struct iphdr))) 450 return max_size; 451 452 /* access ihl as a u8 to avoid unaligned access on ia64 */ 453 hlen = (network[0] & 0x0F) << 2; 454 455 /* verify hlen meets minimum size requirements */ 456 if (hlen < sizeof(struct iphdr)) 457 return network - data; 458 459 /* record next protocol if header is present */ 460 } else if (hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S) 461 == HNS_RX_FLAG_L3ID_IPV6) { 462 if ((typeof(max_size))(network - data) > 463 (max_size - sizeof(struct ipv6hdr))) 464 return max_size; 465 466 /* record next protocol */ 467 hlen = sizeof(struct ipv6hdr); 468 } else { 469 return network - data; 470 } 471 472 /* relocate pointer to start of L4 header */ 473 network += hlen; 474 475 /* finally sort out TCP/UDP */ 476 if (hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S) 477 == HNS_RX_FLAG_L4ID_TCP) { 478 if ((typeof(max_size))(network - data) > 479 (max_size - sizeof(struct tcphdr))) 480 return max_size; 481 482 /* access doff as a u8 to avoid unaligned access on ia64 */ 483 hlen = (network[12] & 0xF0) >> 2; 484 485 /* verify hlen meets minimum size requirements */ 486 if (hlen < sizeof(struct tcphdr)) 487 return network - data; 488 489 network += hlen; 490 } else if (hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S) 491 == HNS_RX_FLAG_L4ID_UDP) { 492 if ((typeof(max_size))(network - data) > 493 (max_size - sizeof(struct udphdr))) 494 return max_size; 495 496 network += sizeof(struct udphdr); 497 } 498 499 /* If everything has gone correctly network should be the 500 * data section of the packet and will be the end of the header. 501 * If not then it probably represents the end of the last recognized 502 * header. 503 */ 504 if ((typeof(max_size))(network - data) < max_size) 505 return network - data; 506 else 507 return max_size; 508 } 509 510 static void hns_nic_reuse_page(struct sk_buff *skb, int i, 511 struct hnae_ring *ring, int pull_len, 512 struct hnae_desc_cb *desc_cb) 513 { 514 struct hnae_desc *desc; 515 int truesize, size; 516 int last_offset; 517 bool twobufs; 518 519 twobufs = ((PAGE_SIZE < 8192) && 520 hnae_buf_size(ring) == HNS_BUFFER_SIZE_2048); 521 522 desc = &ring->desc[ring->next_to_clean]; 523 size = le16_to_cpu(desc->rx.size); 524 525 if (twobufs) { 526 truesize = hnae_buf_size(ring); 527 } else { 528 truesize = ALIGN(size, L1_CACHE_BYTES); 529 last_offset = hnae_page_size(ring) - hnae_buf_size(ring); 530 } 531 532 skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len, 533 size - pull_len, truesize - pull_len); 534 535 /* avoid re-using remote pages,flag default unreuse */ 536 if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id())) 537 return; 538 539 if (twobufs) { 540 /* if we are only owner of page we can reuse it */ 541 if (likely(page_count(desc_cb->priv) == 1)) { 542 /* flip page offset to other buffer */ 543 desc_cb->page_offset ^= truesize; 544 545 desc_cb->reuse_flag = 1; 546 /* bump ref count on page before it is given*/ 547 get_page(desc_cb->priv); 548 } 549 return; 550 } 551 552 /* move offset up to the next cache line */ 553 desc_cb->page_offset += truesize; 554 555 if (desc_cb->page_offset <= last_offset) { 556 desc_cb->reuse_flag = 1; 557 /* bump ref count on page before it is given*/ 558 get_page(desc_cb->priv); 559 } 560 } 561 562 static void get_v2rx_desc_bnum(u32 bnum_flag, int *out_bnum) 563 { 564 *out_bnum = hnae_get_field(bnum_flag, 565 HNS_RXD_BUFNUM_M, HNS_RXD_BUFNUM_S) + 1; 566 } 567 568 static void get_rx_desc_bnum(u32 bnum_flag, int *out_bnum) 569 { 570 *out_bnum = hnae_get_field(bnum_flag, 571 HNS_RXD_BUFNUM_M, HNS_RXD_BUFNUM_S); 572 } 573 574 static void hns_nic_rx_checksum(struct hns_nic_ring_data *ring_data, 575 struct sk_buff *skb, u32 flag) 576 { 577 struct net_device *netdev = ring_data->napi.dev; 578 u32 l3id; 579 u32 l4id; 580 581 /* check if RX checksum offload is enabled */ 582 if (unlikely(!(netdev->features & NETIF_F_RXCSUM))) 583 return; 584 585 /* In hardware, we only support checksum for the following protocols: 586 * 1) IPv4, 587 * 2) TCP(over IPv4 or IPv6), 588 * 3) UDP(over IPv4 or IPv6), 589 * 4) SCTP(over IPv4 or IPv6) 590 * but we support many L3(IPv4, IPv6, MPLS, PPPoE etc) and L4(TCP, 591 * UDP, GRE, SCTP, IGMP, ICMP etc.) protocols. 592 * 593 * Hardware limitation: 594 * Our present hardware RX Descriptor lacks L3/L4 checksum "Status & 595 * Error" bit (which usually can be used to indicate whether checksum 596 * was calculated by the hardware and if there was any error encountered 597 * during checksum calculation). 598 * 599 * Software workaround: 600 * We do get info within the RX descriptor about the kind of L3/L4 601 * protocol coming in the packet and the error status. These errors 602 * might not just be checksum errors but could be related to version, 603 * length of IPv4, UDP, TCP etc. 604 * Because there is no-way of knowing if it is a L3/L4 error due to bad 605 * checksum or any other L3/L4 error, we will not (cannot) convey 606 * checksum status for such cases to upper stack and will not maintain 607 * the RX L3/L4 checksum counters as well. 608 */ 609 610 l3id = hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S); 611 l4id = hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S); 612 613 /* check L3 protocol for which checksum is supported */ 614 if ((l3id != HNS_RX_FLAG_L3ID_IPV4) && (l3id != HNS_RX_FLAG_L3ID_IPV6)) 615 return; 616 617 /* check for any(not just checksum)flagged L3 protocol errors */ 618 if (unlikely(hnae_get_bit(flag, HNS_RXD_L3E_B))) 619 return; 620 621 /* we do not support checksum of fragmented packets */ 622 if (unlikely(hnae_get_bit(flag, HNS_RXD_FRAG_B))) 623 return; 624 625 /* check L4 protocol for which checksum is supported */ 626 if ((l4id != HNS_RX_FLAG_L4ID_TCP) && 627 (l4id != HNS_RX_FLAG_L4ID_UDP) && 628 (l4id != HNS_RX_FLAG_L4ID_SCTP)) 629 return; 630 631 /* check for any(not just checksum)flagged L4 protocol errors */ 632 if (unlikely(hnae_get_bit(flag, HNS_RXD_L4E_B))) 633 return; 634 635 /* now, this has to be a packet with valid RX checksum */ 636 skb->ip_summed = CHECKSUM_UNNECESSARY; 637 } 638 639 static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data, 640 struct sk_buff **out_skb, int *out_bnum) 641 { 642 struct hnae_ring *ring = ring_data->ring; 643 struct net_device *ndev = ring_data->napi.dev; 644 struct hns_nic_priv *priv = netdev_priv(ndev); 645 struct sk_buff *skb; 646 struct hnae_desc *desc; 647 struct hnae_desc_cb *desc_cb; 648 unsigned char *va; 649 int bnum, length, i; 650 int pull_len; 651 u32 bnum_flag; 652 653 desc = &ring->desc[ring->next_to_clean]; 654 desc_cb = &ring->desc_cb[ring->next_to_clean]; 655 656 prefetch(desc); 657 658 va = (unsigned char *)desc_cb->buf + desc_cb->page_offset; 659 660 /* prefetch first cache line of first page */ 661 prefetch(va); 662 #if L1_CACHE_BYTES < 128 663 prefetch(va + L1_CACHE_BYTES); 664 #endif 665 666 skb = *out_skb = napi_alloc_skb(&ring_data->napi, 667 HNS_RX_HEAD_SIZE); 668 if (unlikely(!skb)) { 669 netdev_err(ndev, "alloc rx skb fail\n"); 670 ring->stats.sw_err_cnt++; 671 return -ENOMEM; 672 } 673 674 prefetchw(skb->data); 675 length = le16_to_cpu(desc->rx.pkt_len); 676 bnum_flag = le32_to_cpu(desc->rx.ipoff_bnum_pid_flag); 677 priv->ops.get_rxd_bnum(bnum_flag, &bnum); 678 *out_bnum = bnum; 679 680 if (length <= HNS_RX_HEAD_SIZE) { 681 memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long))); 682 683 /* we can reuse buffer as-is, just make sure it is local */ 684 if (likely(page_to_nid(desc_cb->priv) == numa_node_id())) 685 desc_cb->reuse_flag = 1; 686 else /* this page cannot be reused so discard it */ 687 put_page(desc_cb->priv); 688 689 ring_ptr_move_fw(ring, next_to_clean); 690 691 if (unlikely(bnum != 1)) { /* check err*/ 692 *out_bnum = 1; 693 goto out_bnum_err; 694 } 695 } else { 696 ring->stats.seg_pkt_cnt++; 697 698 pull_len = hns_nic_get_headlen(va, bnum_flag, HNS_RX_HEAD_SIZE); 699 memcpy(__skb_put(skb, pull_len), va, 700 ALIGN(pull_len, sizeof(long))); 701 702 hns_nic_reuse_page(skb, 0, ring, pull_len, desc_cb); 703 ring_ptr_move_fw(ring, next_to_clean); 704 705 if (unlikely(bnum >= (int)MAX_SKB_FRAGS)) { /* check err*/ 706 *out_bnum = 1; 707 goto out_bnum_err; 708 } 709 for (i = 1; i < bnum; i++) { 710 desc = &ring->desc[ring->next_to_clean]; 711 desc_cb = &ring->desc_cb[ring->next_to_clean]; 712 713 hns_nic_reuse_page(skb, i, ring, 0, desc_cb); 714 ring_ptr_move_fw(ring, next_to_clean); 715 } 716 } 717 718 /* check except process, free skb and jump the desc */ 719 if (unlikely((!bnum) || (bnum > ring->max_desc_num_per_pkt))) { 720 out_bnum_err: 721 *out_bnum = *out_bnum ? *out_bnum : 1; /* ntc moved,cannot 0*/ 722 netdev_err(ndev, "invalid bnum(%d,%d,%d,%d),%016llx,%016llx\n", 723 bnum, ring->max_desc_num_per_pkt, 724 length, (int)MAX_SKB_FRAGS, 725 ((u64 *)desc)[0], ((u64 *)desc)[1]); 726 ring->stats.err_bd_num++; 727 dev_kfree_skb_any(skb); 728 return -EDOM; 729 } 730 731 bnum_flag = le32_to_cpu(desc->rx.ipoff_bnum_pid_flag); 732 733 if (unlikely(!hnae_get_bit(bnum_flag, HNS_RXD_VLD_B))) { 734 netdev_err(ndev, "no valid bd,%016llx,%016llx\n", 735 ((u64 *)desc)[0], ((u64 *)desc)[1]); 736 ring->stats.non_vld_descs++; 737 dev_kfree_skb_any(skb); 738 return -EINVAL; 739 } 740 741 if (unlikely((!desc->rx.pkt_len) || 742 hnae_get_bit(bnum_flag, HNS_RXD_DROP_B))) { 743 ring->stats.err_pkt_len++; 744 dev_kfree_skb_any(skb); 745 return -EFAULT; 746 } 747 748 if (unlikely(hnae_get_bit(bnum_flag, HNS_RXD_L2E_B))) { 749 ring->stats.l2_err++; 750 dev_kfree_skb_any(skb); 751 return -EFAULT; 752 } 753 754 ring->stats.rx_pkts++; 755 ring->stats.rx_bytes += skb->len; 756 757 /* indicate to upper stack if our hardware has already calculated 758 * the RX checksum 759 */ 760 hns_nic_rx_checksum(ring_data, skb, bnum_flag); 761 762 return 0; 763 } 764 765 static void 766 hns_nic_alloc_rx_buffers(struct hns_nic_ring_data *ring_data, int cleand_count) 767 { 768 int i, ret; 769 struct hnae_desc_cb res_cbs; 770 struct hnae_desc_cb *desc_cb; 771 struct hnae_ring *ring = ring_data->ring; 772 struct net_device *ndev = ring_data->napi.dev; 773 774 for (i = 0; i < cleand_count; i++) { 775 desc_cb = &ring->desc_cb[ring->next_to_use]; 776 if (desc_cb->reuse_flag) { 777 ring->stats.reuse_pg_cnt++; 778 hnae_reuse_buffer(ring, ring->next_to_use); 779 } else { 780 ret = hnae_reserve_buffer_map(ring, &res_cbs); 781 if (ret) { 782 ring->stats.sw_err_cnt++; 783 netdev_err(ndev, "hnae reserve buffer map failed.\n"); 784 break; 785 } 786 hnae_replace_buffer(ring, ring->next_to_use, &res_cbs); 787 } 788 789 ring_ptr_move_fw(ring, next_to_use); 790 } 791 792 wmb(); /* make all data has been write before submit */ 793 writel_relaxed(i, ring->io_base + RCB_REG_HEAD); 794 } 795 796 /* return error number for error or number of desc left to take 797 */ 798 static void hns_nic_rx_up_pro(struct hns_nic_ring_data *ring_data, 799 struct sk_buff *skb) 800 { 801 struct net_device *ndev = ring_data->napi.dev; 802 803 skb->protocol = eth_type_trans(skb, ndev); 804 (void)napi_gro_receive(&ring_data->napi, skb); 805 } 806 807 static int hns_desc_unused(struct hnae_ring *ring) 808 { 809 int ntc = ring->next_to_clean; 810 int ntu = ring->next_to_use; 811 812 return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu; 813 } 814 815 static int hns_nic_rx_poll_one(struct hns_nic_ring_data *ring_data, 816 int budget, void *v) 817 { 818 struct hnae_ring *ring = ring_data->ring; 819 struct sk_buff *skb; 820 int num, bnum; 821 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16 822 int recv_pkts, recv_bds, clean_count, err; 823 int unused_count = hns_desc_unused(ring); 824 825 num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM); 826 rmb(); /* make sure num taken effect before the other data is touched */ 827 828 recv_pkts = 0, recv_bds = 0, clean_count = 0; 829 num -= unused_count; 830 831 while (recv_pkts < budget && recv_bds < num) { 832 /* reuse or realloc buffers */ 833 if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) { 834 hns_nic_alloc_rx_buffers(ring_data, 835 clean_count + unused_count); 836 clean_count = 0; 837 unused_count = hns_desc_unused(ring); 838 } 839 840 /* poll one pkt */ 841 err = hns_nic_poll_rx_skb(ring_data, &skb, &bnum); 842 if (unlikely(!skb)) /* this fault cannot be repaired */ 843 goto out; 844 845 recv_bds += bnum; 846 clean_count += bnum; 847 if (unlikely(err)) { /* do jump the err */ 848 recv_pkts++; 849 continue; 850 } 851 852 /* do update ip stack process*/ 853 ((void (*)(struct hns_nic_ring_data *, struct sk_buff *))v)( 854 ring_data, skb); 855 recv_pkts++; 856 } 857 858 out: 859 /* make all data has been write before submit */ 860 if (clean_count + unused_count > 0) 861 hns_nic_alloc_rx_buffers(ring_data, 862 clean_count + unused_count); 863 864 return recv_pkts; 865 } 866 867 static bool hns_nic_rx_fini_pro(struct hns_nic_ring_data *ring_data) 868 { 869 struct hnae_ring *ring = ring_data->ring; 870 int num = 0; 871 872 ring_data->ring->q->handle->dev->ops->toggle_ring_irq(ring, 0); 873 874 /* for hardware bug fixed */ 875 num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM); 876 877 if (num > 0) { 878 ring_data->ring->q->handle->dev->ops->toggle_ring_irq( 879 ring_data->ring, 1); 880 881 return false; 882 } else { 883 return true; 884 } 885 } 886 887 static bool hns_nic_rx_fini_pro_v2(struct hns_nic_ring_data *ring_data) 888 { 889 struct hnae_ring *ring = ring_data->ring; 890 int num; 891 892 num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM); 893 894 if (!num) 895 return true; 896 else 897 return false; 898 } 899 900 static inline void hns_nic_reclaim_one_desc(struct hnae_ring *ring, 901 int *bytes, int *pkts) 902 { 903 struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean]; 904 905 (*pkts) += (desc_cb->type == DESC_TYPE_SKB); 906 (*bytes) += desc_cb->length; 907 /* desc_cb will be cleaned, after hnae_free_buffer_detach*/ 908 hnae_free_buffer_detach(ring, ring->next_to_clean); 909 910 ring_ptr_move_fw(ring, next_to_clean); 911 } 912 913 static int is_valid_clean_head(struct hnae_ring *ring, int h) 914 { 915 int u = ring->next_to_use; 916 int c = ring->next_to_clean; 917 918 if (unlikely(h > ring->desc_num)) 919 return 0; 920 921 assert(u > 0 && u < ring->desc_num); 922 assert(c > 0 && c < ring->desc_num); 923 assert(u != c && h != c); /* must be checked before call this func */ 924 925 return u > c ? (h > c && h <= u) : (h > c || h <= u); 926 } 927 928 /* netif_tx_lock will turn down the performance, set only when necessary */ 929 #ifdef CONFIG_NET_POLL_CONTROLLER 930 #define NETIF_TX_LOCK(ring) spin_lock(&(ring)->lock) 931 #define NETIF_TX_UNLOCK(ring) spin_unlock(&(ring)->lock) 932 #else 933 #define NETIF_TX_LOCK(ring) 934 #define NETIF_TX_UNLOCK(ring) 935 #endif 936 937 /* reclaim all desc in one budget 938 * return error or number of desc left 939 */ 940 static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data, 941 int budget, void *v) 942 { 943 struct hnae_ring *ring = ring_data->ring; 944 struct net_device *ndev = ring_data->napi.dev; 945 struct netdev_queue *dev_queue; 946 struct hns_nic_priv *priv = netdev_priv(ndev); 947 int head; 948 int bytes, pkts; 949 950 NETIF_TX_LOCK(ring); 951 952 head = readl_relaxed(ring->io_base + RCB_REG_HEAD); 953 rmb(); /* make sure head is ready before touch any data */ 954 955 if (is_ring_empty(ring) || head == ring->next_to_clean) { 956 NETIF_TX_UNLOCK(ring); 957 return 0; /* no data to poll */ 958 } 959 960 if (!is_valid_clean_head(ring, head)) { 961 netdev_err(ndev, "wrong head (%d, %d-%d)\n", head, 962 ring->next_to_use, ring->next_to_clean); 963 ring->stats.io_err_cnt++; 964 NETIF_TX_UNLOCK(ring); 965 return -EIO; 966 } 967 968 bytes = 0; 969 pkts = 0; 970 while (head != ring->next_to_clean) { 971 hns_nic_reclaim_one_desc(ring, &bytes, &pkts); 972 /* issue prefetch for next Tx descriptor */ 973 prefetch(&ring->desc_cb[ring->next_to_clean]); 974 } 975 976 NETIF_TX_UNLOCK(ring); 977 978 dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index); 979 netdev_tx_completed_queue(dev_queue, pkts, bytes); 980 981 if (unlikely(priv->link && !netif_carrier_ok(ndev))) 982 netif_carrier_on(ndev); 983 984 if (unlikely(pkts && netif_carrier_ok(ndev) && 985 (ring_space(ring) >= ring->max_desc_num_per_pkt * 2))) { 986 /* Make sure that anybody stopping the queue after this 987 * sees the new next_to_clean. 988 */ 989 smp_mb(); 990 if (netif_tx_queue_stopped(dev_queue) && 991 !test_bit(NIC_STATE_DOWN, &priv->state)) { 992 netif_tx_wake_queue(dev_queue); 993 ring->stats.restart_queue++; 994 } 995 } 996 return 0; 997 } 998 999 static bool hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data) 1000 { 1001 struct hnae_ring *ring = ring_data->ring; 1002 int head; 1003 1004 ring_data->ring->q->handle->dev->ops->toggle_ring_irq(ring, 0); 1005 1006 head = readl_relaxed(ring->io_base + RCB_REG_HEAD); 1007 1008 if (head != ring->next_to_clean) { 1009 ring_data->ring->q->handle->dev->ops->toggle_ring_irq( 1010 ring_data->ring, 1); 1011 1012 return false; 1013 } else { 1014 return true; 1015 } 1016 } 1017 1018 static bool hns_nic_tx_fini_pro_v2(struct hns_nic_ring_data *ring_data) 1019 { 1020 struct hnae_ring *ring = ring_data->ring; 1021 int head = readl_relaxed(ring->io_base + RCB_REG_HEAD); 1022 1023 if (head == ring->next_to_clean) 1024 return true; 1025 else 1026 return false; 1027 } 1028 1029 static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data) 1030 { 1031 struct hnae_ring *ring = ring_data->ring; 1032 struct net_device *ndev = ring_data->napi.dev; 1033 struct netdev_queue *dev_queue; 1034 int head; 1035 int bytes, pkts; 1036 1037 NETIF_TX_LOCK(ring); 1038 1039 head = ring->next_to_use; /* ntu :soft setted ring position*/ 1040 bytes = 0; 1041 pkts = 0; 1042 while (head != ring->next_to_clean) 1043 hns_nic_reclaim_one_desc(ring, &bytes, &pkts); 1044 1045 NETIF_TX_UNLOCK(ring); 1046 1047 dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index); 1048 netdev_tx_reset_queue(dev_queue); 1049 } 1050 1051 static int hns_nic_common_poll(struct napi_struct *napi, int budget) 1052 { 1053 int clean_complete = 0; 1054 struct hns_nic_ring_data *ring_data = 1055 container_of(napi, struct hns_nic_ring_data, napi); 1056 struct hnae_ring *ring = ring_data->ring; 1057 1058 try_again: 1059 clean_complete += ring_data->poll_one( 1060 ring_data, budget - clean_complete, 1061 ring_data->ex_process); 1062 1063 if (clean_complete < budget) { 1064 if (ring_data->fini_process(ring_data)) { 1065 napi_complete(napi); 1066 ring->q->handle->dev->ops->toggle_ring_irq(ring, 0); 1067 } else { 1068 goto try_again; 1069 } 1070 } 1071 1072 return clean_complete; 1073 } 1074 1075 static irqreturn_t hns_irq_handle(int irq, void *dev) 1076 { 1077 struct hns_nic_ring_data *ring_data = (struct hns_nic_ring_data *)dev; 1078 1079 ring_data->ring->q->handle->dev->ops->toggle_ring_irq( 1080 ring_data->ring, 1); 1081 napi_schedule(&ring_data->napi); 1082 1083 return IRQ_HANDLED; 1084 } 1085 1086 /** 1087 *hns_nic_adjust_link - adjust net work mode by the phy stat or new param 1088 *@ndev: net device 1089 */ 1090 static void hns_nic_adjust_link(struct net_device *ndev) 1091 { 1092 struct hns_nic_priv *priv = netdev_priv(ndev); 1093 struct hnae_handle *h = priv->ae_handle; 1094 int state = 1; 1095 1096 if (ndev->phydev) { 1097 h->dev->ops->adjust_link(h, ndev->phydev->speed, 1098 ndev->phydev->duplex); 1099 state = ndev->phydev->link; 1100 } 1101 state = state && h->dev->ops->get_status(h); 1102 1103 if (state != priv->link) { 1104 if (state) { 1105 netif_carrier_on(ndev); 1106 netif_tx_wake_all_queues(ndev); 1107 netdev_info(ndev, "link up\n"); 1108 } else { 1109 netif_carrier_off(ndev); 1110 netdev_info(ndev, "link down\n"); 1111 } 1112 priv->link = state; 1113 } 1114 } 1115 1116 /** 1117 *hns_nic_init_phy - init phy 1118 *@ndev: net device 1119 *@h: ae handle 1120 * Return 0 on success, negative on failure 1121 */ 1122 int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h) 1123 { 1124 struct phy_device *phy_dev = h->phy_dev; 1125 int ret; 1126 1127 if (!h->phy_dev) 1128 return 0; 1129 1130 if (h->phy_if != PHY_INTERFACE_MODE_XGMII) { 1131 phy_dev->dev_flags = 0; 1132 1133 ret = phy_connect_direct(ndev, phy_dev, hns_nic_adjust_link, 1134 h->phy_if); 1135 } else { 1136 ret = phy_attach_direct(ndev, phy_dev, 0, h->phy_if); 1137 } 1138 if (unlikely(ret)) 1139 return -ENODEV; 1140 1141 phy_dev->supported &= h->if_support; 1142 phy_dev->advertising = phy_dev->supported; 1143 1144 if (h->phy_if == PHY_INTERFACE_MODE_XGMII) 1145 phy_dev->autoneg = false; 1146 1147 return 0; 1148 } 1149 1150 static int hns_nic_ring_open(struct net_device *netdev, int idx) 1151 { 1152 struct hns_nic_priv *priv = netdev_priv(netdev); 1153 struct hnae_handle *h = priv->ae_handle; 1154 1155 napi_enable(&priv->ring_data[idx].napi); 1156 1157 enable_irq(priv->ring_data[idx].ring->irq); 1158 h->dev->ops->toggle_ring_irq(priv->ring_data[idx].ring, 0); 1159 1160 return 0; 1161 } 1162 1163 static int hns_nic_net_set_mac_address(struct net_device *ndev, void *p) 1164 { 1165 struct hns_nic_priv *priv = netdev_priv(ndev); 1166 struct hnae_handle *h = priv->ae_handle; 1167 struct sockaddr *mac_addr = p; 1168 int ret; 1169 1170 if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data)) 1171 return -EADDRNOTAVAIL; 1172 1173 ret = h->dev->ops->set_mac_addr(h, mac_addr->sa_data); 1174 if (ret) { 1175 netdev_err(ndev, "set_mac_address fail, ret=%d!\n", ret); 1176 return ret; 1177 } 1178 1179 memcpy(ndev->dev_addr, mac_addr->sa_data, ndev->addr_len); 1180 1181 return 0; 1182 } 1183 1184 void hns_nic_update_stats(struct net_device *netdev) 1185 { 1186 struct hns_nic_priv *priv = netdev_priv(netdev); 1187 struct hnae_handle *h = priv->ae_handle; 1188 1189 h->dev->ops->update_stats(h, &netdev->stats); 1190 } 1191 1192 /* set mac addr if it is configed. or leave it to the AE driver */ 1193 static void hns_init_mac_addr(struct net_device *ndev) 1194 { 1195 struct hns_nic_priv *priv = netdev_priv(ndev); 1196 1197 if (!device_get_mac_address(priv->dev, ndev->dev_addr, ETH_ALEN)) { 1198 eth_hw_addr_random(ndev); 1199 dev_warn(priv->dev, "No valid mac, use random mac %pM", 1200 ndev->dev_addr); 1201 } 1202 } 1203 1204 static void hns_nic_ring_close(struct net_device *netdev, int idx) 1205 { 1206 struct hns_nic_priv *priv = netdev_priv(netdev); 1207 struct hnae_handle *h = priv->ae_handle; 1208 1209 h->dev->ops->toggle_ring_irq(priv->ring_data[idx].ring, 1); 1210 disable_irq(priv->ring_data[idx].ring->irq); 1211 1212 napi_disable(&priv->ring_data[idx].napi); 1213 } 1214 1215 static int hns_nic_init_affinity_mask(int q_num, int ring_idx, 1216 struct hnae_ring *ring, cpumask_t *mask) 1217 { 1218 int cpu; 1219 1220 /* Diffrent irq banlance between 16core and 32core. 1221 * The cpu mask set by ring index according to the ring flag 1222 * which indicate the ring is tx or rx. 1223 */ 1224 if (q_num == num_possible_cpus()) { 1225 if (is_tx_ring(ring)) 1226 cpu = ring_idx; 1227 else 1228 cpu = ring_idx - q_num; 1229 } else { 1230 if (is_tx_ring(ring)) 1231 cpu = ring_idx * 2; 1232 else 1233 cpu = (ring_idx - q_num) * 2 + 1; 1234 } 1235 1236 cpumask_clear(mask); 1237 cpumask_set_cpu(cpu, mask); 1238 1239 return cpu; 1240 } 1241 1242 static int hns_nic_init_irq(struct hns_nic_priv *priv) 1243 { 1244 struct hnae_handle *h = priv->ae_handle; 1245 struct hns_nic_ring_data *rd; 1246 int i; 1247 int ret; 1248 int cpu; 1249 1250 for (i = 0; i < h->q_num * 2; i++) { 1251 rd = &priv->ring_data[i]; 1252 1253 if (rd->ring->irq_init_flag == RCB_IRQ_INITED) 1254 break; 1255 1256 snprintf(rd->ring->ring_name, RCB_RING_NAME_LEN, 1257 "%s-%s%d", priv->netdev->name, 1258 (is_tx_ring(rd->ring) ? "tx" : "rx"), rd->queue_index); 1259 1260 rd->ring->ring_name[RCB_RING_NAME_LEN - 1] = '\0'; 1261 1262 ret = request_irq(rd->ring->irq, 1263 hns_irq_handle, 0, rd->ring->ring_name, rd); 1264 if (ret) { 1265 netdev_err(priv->netdev, "request irq(%d) fail\n", 1266 rd->ring->irq); 1267 return ret; 1268 } 1269 disable_irq(rd->ring->irq); 1270 1271 cpu = hns_nic_init_affinity_mask(h->q_num, i, 1272 rd->ring, &rd->mask); 1273 1274 if (cpu_online(cpu)) 1275 irq_set_affinity_hint(rd->ring->irq, 1276 &rd->mask); 1277 1278 rd->ring->irq_init_flag = RCB_IRQ_INITED; 1279 } 1280 1281 return 0; 1282 } 1283 1284 static int hns_nic_net_up(struct net_device *ndev) 1285 { 1286 struct hns_nic_priv *priv = netdev_priv(ndev); 1287 struct hnae_handle *h = priv->ae_handle; 1288 int i, j; 1289 int ret; 1290 1291 ret = hns_nic_init_irq(priv); 1292 if (ret != 0) { 1293 netdev_err(ndev, "hns init irq failed! ret=%d\n", ret); 1294 return ret; 1295 } 1296 1297 for (i = 0; i < h->q_num * 2; i++) { 1298 ret = hns_nic_ring_open(ndev, i); 1299 if (ret) 1300 goto out_has_some_queues; 1301 } 1302 1303 ret = h->dev->ops->set_mac_addr(h, ndev->dev_addr); 1304 if (ret) 1305 goto out_set_mac_addr_err; 1306 1307 ret = h->dev->ops->start ? h->dev->ops->start(h) : 0; 1308 if (ret) 1309 goto out_start_err; 1310 1311 if (ndev->phydev) 1312 phy_start(ndev->phydev); 1313 1314 clear_bit(NIC_STATE_DOWN, &priv->state); 1315 (void)mod_timer(&priv->service_timer, jiffies + SERVICE_TIMER_HZ); 1316 1317 return 0; 1318 1319 out_start_err: 1320 netif_stop_queue(ndev); 1321 out_set_mac_addr_err: 1322 out_has_some_queues: 1323 for (j = i - 1; j >= 0; j--) 1324 hns_nic_ring_close(ndev, j); 1325 1326 set_bit(NIC_STATE_DOWN, &priv->state); 1327 1328 return ret; 1329 } 1330 1331 static void hns_nic_net_down(struct net_device *ndev) 1332 { 1333 int i; 1334 struct hnae_ae_ops *ops; 1335 struct hns_nic_priv *priv = netdev_priv(ndev); 1336 1337 if (test_and_set_bit(NIC_STATE_DOWN, &priv->state)) 1338 return; 1339 1340 (void)del_timer_sync(&priv->service_timer); 1341 netif_tx_stop_all_queues(ndev); 1342 netif_carrier_off(ndev); 1343 netif_tx_disable(ndev); 1344 priv->link = 0; 1345 1346 if (ndev->phydev) 1347 phy_stop(ndev->phydev); 1348 1349 ops = priv->ae_handle->dev->ops; 1350 1351 if (ops->stop) 1352 ops->stop(priv->ae_handle); 1353 1354 netif_tx_stop_all_queues(ndev); 1355 1356 for (i = priv->ae_handle->q_num - 1; i >= 0; i--) { 1357 hns_nic_ring_close(ndev, i); 1358 hns_nic_ring_close(ndev, i + priv->ae_handle->q_num); 1359 1360 /* clean tx buffers*/ 1361 hns_nic_tx_clr_all_bufs(priv->ring_data + i); 1362 } 1363 } 1364 1365 void hns_nic_net_reset(struct net_device *ndev) 1366 { 1367 struct hns_nic_priv *priv = netdev_priv(ndev); 1368 struct hnae_handle *handle = priv->ae_handle; 1369 1370 while (test_and_set_bit(NIC_STATE_RESETTING, &priv->state)) 1371 usleep_range(1000, 2000); 1372 1373 (void)hnae_reinit_handle(handle); 1374 1375 clear_bit(NIC_STATE_RESETTING, &priv->state); 1376 } 1377 1378 void hns_nic_net_reinit(struct net_device *netdev) 1379 { 1380 struct hns_nic_priv *priv = netdev_priv(netdev); 1381 enum hnae_port_type type = priv->ae_handle->port_type; 1382 1383 netif_trans_update(priv->netdev); 1384 while (test_and_set_bit(NIC_STATE_REINITING, &priv->state)) 1385 usleep_range(1000, 2000); 1386 1387 hns_nic_net_down(netdev); 1388 1389 /* Only do hns_nic_net_reset in debug mode 1390 * because of hardware limitation. 1391 */ 1392 if (type == HNAE_PORT_DEBUG) 1393 hns_nic_net_reset(netdev); 1394 1395 (void)hns_nic_net_up(netdev); 1396 clear_bit(NIC_STATE_REINITING, &priv->state); 1397 } 1398 1399 static int hns_nic_net_open(struct net_device *ndev) 1400 { 1401 struct hns_nic_priv *priv = netdev_priv(ndev); 1402 struct hnae_handle *h = priv->ae_handle; 1403 int ret; 1404 1405 if (test_bit(NIC_STATE_TESTING, &priv->state)) 1406 return -EBUSY; 1407 1408 priv->link = 0; 1409 netif_carrier_off(ndev); 1410 1411 ret = netif_set_real_num_tx_queues(ndev, h->q_num); 1412 if (ret < 0) { 1413 netdev_err(ndev, "netif_set_real_num_tx_queues fail, ret=%d!\n", 1414 ret); 1415 return ret; 1416 } 1417 1418 ret = netif_set_real_num_rx_queues(ndev, h->q_num); 1419 if (ret < 0) { 1420 netdev_err(ndev, 1421 "netif_set_real_num_rx_queues fail, ret=%d!\n", ret); 1422 return ret; 1423 } 1424 1425 ret = hns_nic_net_up(ndev); 1426 if (ret) { 1427 netdev_err(ndev, 1428 "hns net up fail, ret=%d!\n", ret); 1429 return ret; 1430 } 1431 1432 return 0; 1433 } 1434 1435 static int hns_nic_net_stop(struct net_device *ndev) 1436 { 1437 hns_nic_net_down(ndev); 1438 1439 return 0; 1440 } 1441 1442 static void hns_tx_timeout_reset(struct hns_nic_priv *priv); 1443 static void hns_nic_net_timeout(struct net_device *ndev) 1444 { 1445 struct hns_nic_priv *priv = netdev_priv(ndev); 1446 1447 hns_tx_timeout_reset(priv); 1448 } 1449 1450 static int hns_nic_do_ioctl(struct net_device *netdev, struct ifreq *ifr, 1451 int cmd) 1452 { 1453 struct phy_device *phy_dev = netdev->phydev; 1454 1455 if (!netif_running(netdev)) 1456 return -EINVAL; 1457 1458 if (!phy_dev) 1459 return -ENOTSUPP; 1460 1461 return phy_mii_ioctl(phy_dev, ifr, cmd); 1462 } 1463 1464 /* use only for netconsole to poll with the device without interrupt */ 1465 #ifdef CONFIG_NET_POLL_CONTROLLER 1466 void hns_nic_poll_controller(struct net_device *ndev) 1467 { 1468 struct hns_nic_priv *priv = netdev_priv(ndev); 1469 unsigned long flags; 1470 int i; 1471 1472 local_irq_save(flags); 1473 for (i = 0; i < priv->ae_handle->q_num * 2; i++) 1474 napi_schedule(&priv->ring_data[i].napi); 1475 local_irq_restore(flags); 1476 } 1477 #endif 1478 1479 static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb, 1480 struct net_device *ndev) 1481 { 1482 struct hns_nic_priv *priv = netdev_priv(ndev); 1483 1484 assert(skb->queue_mapping < ndev->ae_handle->q_num); 1485 1486 return hns_nic_net_xmit_hw(ndev, skb, 1487 &tx_ring_data(priv, skb->queue_mapping)); 1488 } 1489 1490 static void hns_nic_drop_rx_fetch(struct hns_nic_ring_data *ring_data, 1491 struct sk_buff *skb) 1492 { 1493 dev_kfree_skb_any(skb); 1494 } 1495 1496 #define HNS_LB_TX_RING 0 1497 static struct sk_buff *hns_assemble_skb(struct net_device *ndev) 1498 { 1499 struct sk_buff *skb; 1500 struct ethhdr *ethhdr; 1501 int frame_len; 1502 1503 /* allocate test skb */ 1504 skb = alloc_skb(64, GFP_KERNEL); 1505 if (!skb) 1506 return NULL; 1507 1508 skb_put(skb, 64); 1509 skb->dev = ndev; 1510 memset(skb->data, 0xFF, skb->len); 1511 1512 /* must be tcp/ip package */ 1513 ethhdr = (struct ethhdr *)skb->data; 1514 ethhdr->h_proto = htons(ETH_P_IP); 1515 1516 frame_len = skb->len & (~1ul); 1517 memset(&skb->data[frame_len / 2], 0xAA, 1518 frame_len / 2 - 1); 1519 1520 skb->queue_mapping = HNS_LB_TX_RING; 1521 1522 return skb; 1523 } 1524 1525 static int hns_enable_serdes_lb(struct net_device *ndev) 1526 { 1527 struct hns_nic_priv *priv = netdev_priv(ndev); 1528 struct hnae_handle *h = priv->ae_handle; 1529 struct hnae_ae_ops *ops = h->dev->ops; 1530 int speed, duplex; 1531 int ret; 1532 1533 ret = ops->set_loopback(h, MAC_INTERNALLOOP_SERDES, 1); 1534 if (ret) 1535 return ret; 1536 1537 ret = ops->start ? ops->start(h) : 0; 1538 if (ret) 1539 return ret; 1540 1541 /* link adjust duplex*/ 1542 if (h->phy_if != PHY_INTERFACE_MODE_XGMII) 1543 speed = 1000; 1544 else 1545 speed = 10000; 1546 duplex = 1; 1547 1548 ops->adjust_link(h, speed, duplex); 1549 1550 /* wait h/w ready */ 1551 mdelay(300); 1552 1553 return 0; 1554 } 1555 1556 static void hns_disable_serdes_lb(struct net_device *ndev) 1557 { 1558 struct hns_nic_priv *priv = netdev_priv(ndev); 1559 struct hnae_handle *h = priv->ae_handle; 1560 struct hnae_ae_ops *ops = h->dev->ops; 1561 1562 ops->stop(h); 1563 ops->set_loopback(h, MAC_INTERNALLOOP_SERDES, 0); 1564 } 1565 1566 /** 1567 *hns_nic_clear_all_rx_fetch - clear the chip fetched descriptions. The 1568 *function as follows: 1569 * 1. if one rx ring has found the page_offset is not equal 0 between head 1570 * and tail, it means that the chip fetched the wrong descs for the ring 1571 * which buffer size is 4096. 1572 * 2. we set the chip serdes loopback and set rss indirection to the ring. 1573 * 3. construct 64-bytes ip broadcast packages, wait the associated rx ring 1574 * recieving all packages and it will fetch new descriptions. 1575 * 4. recover to the original state. 1576 * 1577 *@ndev: net device 1578 */ 1579 static int hns_nic_clear_all_rx_fetch(struct net_device *ndev) 1580 { 1581 struct hns_nic_priv *priv = netdev_priv(ndev); 1582 struct hnae_handle *h = priv->ae_handle; 1583 struct hnae_ae_ops *ops = h->dev->ops; 1584 struct hns_nic_ring_data *rd; 1585 struct hnae_ring *ring; 1586 struct sk_buff *skb; 1587 u32 *org_indir; 1588 u32 *cur_indir; 1589 int indir_size; 1590 int head, tail; 1591 int fetch_num; 1592 int i, j; 1593 bool found; 1594 int retry_times; 1595 int ret = 0; 1596 1597 /* alloc indir memory */ 1598 indir_size = ops->get_rss_indir_size(h) * sizeof(*org_indir); 1599 org_indir = kzalloc(indir_size, GFP_KERNEL); 1600 if (!org_indir) 1601 return -ENOMEM; 1602 1603 /* store the orginal indirection */ 1604 ops->get_rss(h, org_indir, NULL, NULL); 1605 1606 cur_indir = kzalloc(indir_size, GFP_KERNEL); 1607 if (!cur_indir) { 1608 ret = -ENOMEM; 1609 goto cur_indir_alloc_err; 1610 } 1611 1612 /* set loopback */ 1613 if (hns_enable_serdes_lb(ndev)) { 1614 ret = -EINVAL; 1615 goto enable_serdes_lb_err; 1616 } 1617 1618 /* foreach every rx ring to clear fetch desc */ 1619 for (i = 0; i < h->q_num; i++) { 1620 ring = &h->qs[i]->rx_ring; 1621 head = readl_relaxed(ring->io_base + RCB_REG_HEAD); 1622 tail = readl_relaxed(ring->io_base + RCB_REG_TAIL); 1623 found = false; 1624 fetch_num = ring_dist(ring, head, tail); 1625 1626 while (head != tail) { 1627 if (ring->desc_cb[head].page_offset != 0) { 1628 found = true; 1629 break; 1630 } 1631 1632 head++; 1633 if (head == ring->desc_num) 1634 head = 0; 1635 } 1636 1637 if (found) { 1638 for (j = 0; j < indir_size / sizeof(*org_indir); j++) 1639 cur_indir[j] = i; 1640 ops->set_rss(h, cur_indir, NULL, 0); 1641 1642 for (j = 0; j < fetch_num; j++) { 1643 /* alloc one skb and init */ 1644 skb = hns_assemble_skb(ndev); 1645 if (!skb) 1646 goto out; 1647 rd = &tx_ring_data(priv, skb->queue_mapping); 1648 hns_nic_net_xmit_hw(ndev, skb, rd); 1649 1650 retry_times = 0; 1651 while (retry_times++ < 10) { 1652 mdelay(10); 1653 /* clean rx */ 1654 rd = &rx_ring_data(priv, i); 1655 if (rd->poll_one(rd, fetch_num, 1656 hns_nic_drop_rx_fetch)) 1657 break; 1658 } 1659 1660 retry_times = 0; 1661 while (retry_times++ < 10) { 1662 mdelay(10); 1663 /* clean tx ring 0 send package */ 1664 rd = &tx_ring_data(priv, 1665 HNS_LB_TX_RING); 1666 if (rd->poll_one(rd, fetch_num, NULL)) 1667 break; 1668 } 1669 } 1670 } 1671 } 1672 1673 out: 1674 /* restore everything */ 1675 ops->set_rss(h, org_indir, NULL, 0); 1676 hns_disable_serdes_lb(ndev); 1677 enable_serdes_lb_err: 1678 kfree(cur_indir); 1679 cur_indir_alloc_err: 1680 kfree(org_indir); 1681 1682 return ret; 1683 } 1684 1685 static int hns_nic_change_mtu(struct net_device *ndev, int new_mtu) 1686 { 1687 struct hns_nic_priv *priv = netdev_priv(ndev); 1688 struct hnae_handle *h = priv->ae_handle; 1689 bool if_running = netif_running(ndev); 1690 int ret; 1691 1692 /* MTU < 68 is an error and causes problems on some kernels */ 1693 if (new_mtu < 68) 1694 return -EINVAL; 1695 1696 /* MTU no change */ 1697 if (new_mtu == ndev->mtu) 1698 return 0; 1699 1700 if (!h->dev->ops->set_mtu) 1701 return -ENOTSUPP; 1702 1703 if (if_running) { 1704 (void)hns_nic_net_stop(ndev); 1705 msleep(100); 1706 } 1707 1708 if (priv->enet_ver != AE_VERSION_1 && 1709 ndev->mtu <= BD_SIZE_2048_MAX_MTU && 1710 new_mtu > BD_SIZE_2048_MAX_MTU) { 1711 /* update desc */ 1712 hnae_reinit_all_ring_desc(h); 1713 1714 /* clear the package which the chip has fetched */ 1715 ret = hns_nic_clear_all_rx_fetch(ndev); 1716 1717 /* the page offset must be consist with desc */ 1718 hnae_reinit_all_ring_page_off(h); 1719 1720 if (ret) { 1721 netdev_err(ndev, "clear the fetched desc fail\n"); 1722 goto out; 1723 } 1724 } 1725 1726 ret = h->dev->ops->set_mtu(h, new_mtu); 1727 if (ret) { 1728 netdev_err(ndev, "set mtu fail, return value %d\n", 1729 ret); 1730 goto out; 1731 } 1732 1733 /* finally, set new mtu to netdevice */ 1734 ndev->mtu = new_mtu; 1735 1736 out: 1737 if (if_running) { 1738 if (hns_nic_net_open(ndev)) { 1739 netdev_err(ndev, "hns net open fail\n"); 1740 ret = -EINVAL; 1741 } 1742 } 1743 1744 return ret; 1745 } 1746 1747 static int hns_nic_set_features(struct net_device *netdev, 1748 netdev_features_t features) 1749 { 1750 struct hns_nic_priv *priv = netdev_priv(netdev); 1751 1752 switch (priv->enet_ver) { 1753 case AE_VERSION_1: 1754 if (features & (NETIF_F_TSO | NETIF_F_TSO6)) 1755 netdev_info(netdev, "enet v1 do not support tso!\n"); 1756 break; 1757 default: 1758 if (features & (NETIF_F_TSO | NETIF_F_TSO6)) { 1759 priv->ops.fill_desc = fill_tso_desc; 1760 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso; 1761 /* The chip only support 7*4096 */ 1762 netif_set_gso_max_size(netdev, 7 * 4096); 1763 } else { 1764 priv->ops.fill_desc = fill_v2_desc; 1765 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx; 1766 } 1767 break; 1768 } 1769 netdev->features = features; 1770 return 0; 1771 } 1772 1773 static netdev_features_t hns_nic_fix_features( 1774 struct net_device *netdev, netdev_features_t features) 1775 { 1776 struct hns_nic_priv *priv = netdev_priv(netdev); 1777 1778 switch (priv->enet_ver) { 1779 case AE_VERSION_1: 1780 features &= ~(NETIF_F_TSO | NETIF_F_TSO6 | 1781 NETIF_F_HW_VLAN_CTAG_FILTER); 1782 break; 1783 default: 1784 break; 1785 } 1786 return features; 1787 } 1788 1789 static int hns_nic_uc_sync(struct net_device *netdev, const unsigned char *addr) 1790 { 1791 struct hns_nic_priv *priv = netdev_priv(netdev); 1792 struct hnae_handle *h = priv->ae_handle; 1793 1794 if (h->dev->ops->add_uc_addr) 1795 return h->dev->ops->add_uc_addr(h, addr); 1796 1797 return 0; 1798 } 1799 1800 static int hns_nic_uc_unsync(struct net_device *netdev, 1801 const unsigned char *addr) 1802 { 1803 struct hns_nic_priv *priv = netdev_priv(netdev); 1804 struct hnae_handle *h = priv->ae_handle; 1805 1806 if (h->dev->ops->rm_uc_addr) 1807 return h->dev->ops->rm_uc_addr(h, addr); 1808 1809 return 0; 1810 } 1811 1812 /** 1813 * nic_set_multicast_list - set mutl mac address 1814 * @netdev: net device 1815 * @p: mac address 1816 * 1817 * return void 1818 */ 1819 void hns_set_multicast_list(struct net_device *ndev) 1820 { 1821 struct hns_nic_priv *priv = netdev_priv(ndev); 1822 struct hnae_handle *h = priv->ae_handle; 1823 struct netdev_hw_addr *ha = NULL; 1824 1825 if (!h) { 1826 netdev_err(ndev, "hnae handle is null\n"); 1827 return; 1828 } 1829 1830 if (h->dev->ops->clr_mc_addr) 1831 if (h->dev->ops->clr_mc_addr(h)) 1832 netdev_err(ndev, "clear multicast address fail\n"); 1833 1834 if (h->dev->ops->set_mc_addr) { 1835 netdev_for_each_mc_addr(ha, ndev) 1836 if (h->dev->ops->set_mc_addr(h, ha->addr)) 1837 netdev_err(ndev, "set multicast fail\n"); 1838 } 1839 } 1840 1841 void hns_nic_set_rx_mode(struct net_device *ndev) 1842 { 1843 struct hns_nic_priv *priv = netdev_priv(ndev); 1844 struct hnae_handle *h = priv->ae_handle; 1845 1846 if (h->dev->ops->set_promisc_mode) { 1847 if (ndev->flags & IFF_PROMISC) 1848 h->dev->ops->set_promisc_mode(h, 1); 1849 else 1850 h->dev->ops->set_promisc_mode(h, 0); 1851 } 1852 1853 hns_set_multicast_list(ndev); 1854 1855 if (__dev_uc_sync(ndev, hns_nic_uc_sync, hns_nic_uc_unsync)) 1856 netdev_err(ndev, "sync uc address fail\n"); 1857 } 1858 1859 static void hns_nic_get_stats64(struct net_device *ndev, 1860 struct rtnl_link_stats64 *stats) 1861 { 1862 int idx = 0; 1863 u64 tx_bytes = 0; 1864 u64 rx_bytes = 0; 1865 u64 tx_pkts = 0; 1866 u64 rx_pkts = 0; 1867 struct hns_nic_priv *priv = netdev_priv(ndev); 1868 struct hnae_handle *h = priv->ae_handle; 1869 1870 for (idx = 0; idx < h->q_num; idx++) { 1871 tx_bytes += h->qs[idx]->tx_ring.stats.tx_bytes; 1872 tx_pkts += h->qs[idx]->tx_ring.stats.tx_pkts; 1873 rx_bytes += h->qs[idx]->rx_ring.stats.rx_bytes; 1874 rx_pkts += h->qs[idx]->rx_ring.stats.rx_pkts; 1875 } 1876 1877 stats->tx_bytes = tx_bytes; 1878 stats->tx_packets = tx_pkts; 1879 stats->rx_bytes = rx_bytes; 1880 stats->rx_packets = rx_pkts; 1881 1882 stats->rx_errors = ndev->stats.rx_errors; 1883 stats->multicast = ndev->stats.multicast; 1884 stats->rx_length_errors = ndev->stats.rx_length_errors; 1885 stats->rx_crc_errors = ndev->stats.rx_crc_errors; 1886 stats->rx_missed_errors = ndev->stats.rx_missed_errors; 1887 1888 stats->tx_errors = ndev->stats.tx_errors; 1889 stats->rx_dropped = ndev->stats.rx_dropped; 1890 stats->tx_dropped = ndev->stats.tx_dropped; 1891 stats->collisions = ndev->stats.collisions; 1892 stats->rx_over_errors = ndev->stats.rx_over_errors; 1893 stats->rx_frame_errors = ndev->stats.rx_frame_errors; 1894 stats->rx_fifo_errors = ndev->stats.rx_fifo_errors; 1895 stats->tx_aborted_errors = ndev->stats.tx_aborted_errors; 1896 stats->tx_carrier_errors = ndev->stats.tx_carrier_errors; 1897 stats->tx_fifo_errors = ndev->stats.tx_fifo_errors; 1898 stats->tx_heartbeat_errors = ndev->stats.tx_heartbeat_errors; 1899 stats->tx_window_errors = ndev->stats.tx_window_errors; 1900 stats->rx_compressed = ndev->stats.rx_compressed; 1901 stats->tx_compressed = ndev->stats.tx_compressed; 1902 } 1903 1904 static u16 1905 hns_nic_select_queue(struct net_device *ndev, struct sk_buff *skb, 1906 void *accel_priv, select_queue_fallback_t fallback) 1907 { 1908 struct ethhdr *eth_hdr = (struct ethhdr *)skb->data; 1909 struct hns_nic_priv *priv = netdev_priv(ndev); 1910 1911 /* fix hardware broadcast/multicast packets queue loopback */ 1912 if (!AE_IS_VER1(priv->enet_ver) && 1913 is_multicast_ether_addr(eth_hdr->h_dest)) 1914 return 0; 1915 else 1916 return fallback(ndev, skb); 1917 } 1918 1919 static const struct net_device_ops hns_nic_netdev_ops = { 1920 .ndo_open = hns_nic_net_open, 1921 .ndo_stop = hns_nic_net_stop, 1922 .ndo_start_xmit = hns_nic_net_xmit, 1923 .ndo_tx_timeout = hns_nic_net_timeout, 1924 .ndo_set_mac_address = hns_nic_net_set_mac_address, 1925 .ndo_change_mtu = hns_nic_change_mtu, 1926 .ndo_do_ioctl = hns_nic_do_ioctl, 1927 .ndo_set_features = hns_nic_set_features, 1928 .ndo_fix_features = hns_nic_fix_features, 1929 .ndo_get_stats64 = hns_nic_get_stats64, 1930 #ifdef CONFIG_NET_POLL_CONTROLLER 1931 .ndo_poll_controller = hns_nic_poll_controller, 1932 #endif 1933 .ndo_set_rx_mode = hns_nic_set_rx_mode, 1934 .ndo_select_queue = hns_nic_select_queue, 1935 }; 1936 1937 static void hns_nic_update_link_status(struct net_device *netdev) 1938 { 1939 struct hns_nic_priv *priv = netdev_priv(netdev); 1940 1941 struct hnae_handle *h = priv->ae_handle; 1942 1943 if (h->phy_dev) { 1944 if (h->phy_if != PHY_INTERFACE_MODE_XGMII) 1945 return; 1946 1947 (void)genphy_read_status(h->phy_dev); 1948 } 1949 hns_nic_adjust_link(netdev); 1950 } 1951 1952 /* for dumping key regs*/ 1953 static void hns_nic_dump(struct hns_nic_priv *priv) 1954 { 1955 struct hnae_handle *h = priv->ae_handle; 1956 struct hnae_ae_ops *ops = h->dev->ops; 1957 u32 *data, reg_num, i; 1958 1959 if (ops->get_regs_len && ops->get_regs) { 1960 reg_num = ops->get_regs_len(priv->ae_handle); 1961 reg_num = (reg_num + 3ul) & ~3ul; 1962 data = kcalloc(reg_num, sizeof(u32), GFP_KERNEL); 1963 if (data) { 1964 ops->get_regs(priv->ae_handle, data); 1965 for (i = 0; i < reg_num; i += 4) 1966 pr_info("0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n", 1967 i, data[i], data[i + 1], 1968 data[i + 2], data[i + 3]); 1969 kfree(data); 1970 } 1971 } 1972 1973 for (i = 0; i < h->q_num; i++) { 1974 pr_info("tx_queue%d_next_to_clean:%d\n", 1975 i, h->qs[i]->tx_ring.next_to_clean); 1976 pr_info("tx_queue%d_next_to_use:%d\n", 1977 i, h->qs[i]->tx_ring.next_to_use); 1978 pr_info("rx_queue%d_next_to_clean:%d\n", 1979 i, h->qs[i]->rx_ring.next_to_clean); 1980 pr_info("rx_queue%d_next_to_use:%d\n", 1981 i, h->qs[i]->rx_ring.next_to_use); 1982 } 1983 } 1984 1985 /* for resetting subtask */ 1986 static void hns_nic_reset_subtask(struct hns_nic_priv *priv) 1987 { 1988 enum hnae_port_type type = priv->ae_handle->port_type; 1989 1990 if (!test_bit(NIC_STATE2_RESET_REQUESTED, &priv->state)) 1991 return; 1992 clear_bit(NIC_STATE2_RESET_REQUESTED, &priv->state); 1993 1994 /* If we're already down, removing or resetting, just bail */ 1995 if (test_bit(NIC_STATE_DOWN, &priv->state) || 1996 test_bit(NIC_STATE_REMOVING, &priv->state) || 1997 test_bit(NIC_STATE_RESETTING, &priv->state)) 1998 return; 1999 2000 hns_nic_dump(priv); 2001 netdev_info(priv->netdev, "try to reset %s port!\n", 2002 (type == HNAE_PORT_DEBUG ? "debug" : "service")); 2003 2004 rtnl_lock(); 2005 /* put off any impending NetWatchDogTimeout */ 2006 netif_trans_update(priv->netdev); 2007 hns_nic_net_reinit(priv->netdev); 2008 2009 rtnl_unlock(); 2010 } 2011 2012 /* for doing service complete*/ 2013 static void hns_nic_service_event_complete(struct hns_nic_priv *priv) 2014 { 2015 WARN_ON(!test_bit(NIC_STATE_SERVICE_SCHED, &priv->state)); 2016 /* make sure to commit the things */ 2017 smp_mb__before_atomic(); 2018 clear_bit(NIC_STATE_SERVICE_SCHED, &priv->state); 2019 } 2020 2021 static void hns_nic_service_task(struct work_struct *work) 2022 { 2023 struct hns_nic_priv *priv 2024 = container_of(work, struct hns_nic_priv, service_task); 2025 struct hnae_handle *h = priv->ae_handle; 2026 2027 hns_nic_update_link_status(priv->netdev); 2028 h->dev->ops->update_led_status(h); 2029 hns_nic_update_stats(priv->netdev); 2030 2031 hns_nic_reset_subtask(priv); 2032 hns_nic_service_event_complete(priv); 2033 } 2034 2035 static void hns_nic_task_schedule(struct hns_nic_priv *priv) 2036 { 2037 if (!test_bit(NIC_STATE_DOWN, &priv->state) && 2038 !test_bit(NIC_STATE_REMOVING, &priv->state) && 2039 !test_and_set_bit(NIC_STATE_SERVICE_SCHED, &priv->state)) 2040 (void)schedule_work(&priv->service_task); 2041 } 2042 2043 static void hns_nic_service_timer(unsigned long data) 2044 { 2045 struct hns_nic_priv *priv = (struct hns_nic_priv *)data; 2046 2047 (void)mod_timer(&priv->service_timer, jiffies + SERVICE_TIMER_HZ); 2048 2049 hns_nic_task_schedule(priv); 2050 } 2051 2052 /** 2053 * hns_tx_timeout_reset - initiate reset due to Tx timeout 2054 * @priv: driver private struct 2055 **/ 2056 static void hns_tx_timeout_reset(struct hns_nic_priv *priv) 2057 { 2058 /* Do the reset outside of interrupt context */ 2059 if (!test_bit(NIC_STATE_DOWN, &priv->state)) { 2060 set_bit(NIC_STATE2_RESET_REQUESTED, &priv->state); 2061 netdev_warn(priv->netdev, 2062 "initiating reset due to tx timeout(%llu,0x%lx)\n", 2063 priv->tx_timeout_count, priv->state); 2064 priv->tx_timeout_count++; 2065 hns_nic_task_schedule(priv); 2066 } 2067 } 2068 2069 static int hns_nic_init_ring_data(struct hns_nic_priv *priv) 2070 { 2071 struct hnae_handle *h = priv->ae_handle; 2072 struct hns_nic_ring_data *rd; 2073 bool is_ver1 = AE_IS_VER1(priv->enet_ver); 2074 int i; 2075 2076 if (h->q_num > NIC_MAX_Q_PER_VF) { 2077 netdev_err(priv->netdev, "too much queue (%d)\n", h->q_num); 2078 return -EINVAL; 2079 } 2080 2081 priv->ring_data = kzalloc(h->q_num * sizeof(*priv->ring_data) * 2, 2082 GFP_KERNEL); 2083 if (!priv->ring_data) 2084 return -ENOMEM; 2085 2086 for (i = 0; i < h->q_num; i++) { 2087 rd = &priv->ring_data[i]; 2088 rd->queue_index = i; 2089 rd->ring = &h->qs[i]->tx_ring; 2090 rd->poll_one = hns_nic_tx_poll_one; 2091 rd->fini_process = is_ver1 ? hns_nic_tx_fini_pro : 2092 hns_nic_tx_fini_pro_v2; 2093 2094 netif_napi_add(priv->netdev, &rd->napi, 2095 hns_nic_common_poll, NIC_TX_CLEAN_MAX_NUM); 2096 rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED; 2097 } 2098 for (i = h->q_num; i < h->q_num * 2; i++) { 2099 rd = &priv->ring_data[i]; 2100 rd->queue_index = i - h->q_num; 2101 rd->ring = &h->qs[i - h->q_num]->rx_ring; 2102 rd->poll_one = hns_nic_rx_poll_one; 2103 rd->ex_process = hns_nic_rx_up_pro; 2104 rd->fini_process = is_ver1 ? hns_nic_rx_fini_pro : 2105 hns_nic_rx_fini_pro_v2; 2106 2107 netif_napi_add(priv->netdev, &rd->napi, 2108 hns_nic_common_poll, NIC_RX_CLEAN_MAX_NUM); 2109 rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED; 2110 } 2111 2112 return 0; 2113 } 2114 2115 static void hns_nic_uninit_ring_data(struct hns_nic_priv *priv) 2116 { 2117 struct hnae_handle *h = priv->ae_handle; 2118 int i; 2119 2120 for (i = 0; i < h->q_num * 2; i++) { 2121 netif_napi_del(&priv->ring_data[i].napi); 2122 if (priv->ring_data[i].ring->irq_init_flag == RCB_IRQ_INITED) { 2123 (void)irq_set_affinity_hint( 2124 priv->ring_data[i].ring->irq, 2125 NULL); 2126 free_irq(priv->ring_data[i].ring->irq, 2127 &priv->ring_data[i]); 2128 } 2129 2130 priv->ring_data[i].ring->irq_init_flag = RCB_IRQ_NOT_INITED; 2131 } 2132 kfree(priv->ring_data); 2133 } 2134 2135 static void hns_nic_set_priv_ops(struct net_device *netdev) 2136 { 2137 struct hns_nic_priv *priv = netdev_priv(netdev); 2138 struct hnae_handle *h = priv->ae_handle; 2139 2140 if (AE_IS_VER1(priv->enet_ver)) { 2141 priv->ops.fill_desc = fill_desc; 2142 priv->ops.get_rxd_bnum = get_rx_desc_bnum; 2143 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx; 2144 } else { 2145 priv->ops.get_rxd_bnum = get_v2rx_desc_bnum; 2146 if ((netdev->features & NETIF_F_TSO) || 2147 (netdev->features & NETIF_F_TSO6)) { 2148 priv->ops.fill_desc = fill_tso_desc; 2149 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso; 2150 /* This chip only support 7*4096 */ 2151 netif_set_gso_max_size(netdev, 7 * 4096); 2152 } else { 2153 priv->ops.fill_desc = fill_v2_desc; 2154 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx; 2155 } 2156 /* enable tso when init 2157 * control tso on/off through TSE bit in bd 2158 */ 2159 h->dev->ops->set_tso_stats(h, 1); 2160 } 2161 } 2162 2163 static int hns_nic_try_get_ae(struct net_device *ndev) 2164 { 2165 struct hns_nic_priv *priv = netdev_priv(ndev); 2166 struct hnae_handle *h; 2167 int ret; 2168 2169 h = hnae_get_handle(&priv->netdev->dev, 2170 priv->fwnode, priv->port_id, NULL); 2171 if (IS_ERR_OR_NULL(h)) { 2172 ret = -ENODEV; 2173 dev_dbg(priv->dev, "has not handle, register notifier!\n"); 2174 goto out; 2175 } 2176 priv->ae_handle = h; 2177 2178 ret = hns_nic_init_phy(ndev, h); 2179 if (ret) { 2180 dev_err(priv->dev, "probe phy device fail!\n"); 2181 goto out_init_phy; 2182 } 2183 2184 ret = hns_nic_init_ring_data(priv); 2185 if (ret) { 2186 ret = -ENOMEM; 2187 goto out_init_ring_data; 2188 } 2189 2190 hns_nic_set_priv_ops(ndev); 2191 2192 ret = register_netdev(ndev); 2193 if (ret) { 2194 dev_err(priv->dev, "probe register netdev fail!\n"); 2195 goto out_reg_ndev_fail; 2196 } 2197 return 0; 2198 2199 out_reg_ndev_fail: 2200 hns_nic_uninit_ring_data(priv); 2201 priv->ring_data = NULL; 2202 out_init_phy: 2203 out_init_ring_data: 2204 hnae_put_handle(priv->ae_handle); 2205 priv->ae_handle = NULL; 2206 out: 2207 return ret; 2208 } 2209 2210 static int hns_nic_notifier_action(struct notifier_block *nb, 2211 unsigned long action, void *data) 2212 { 2213 struct hns_nic_priv *priv = 2214 container_of(nb, struct hns_nic_priv, notifier_block); 2215 2216 assert(action == HNAE_AE_REGISTER); 2217 2218 if (!hns_nic_try_get_ae(priv->netdev)) { 2219 hnae_unregister_notifier(&priv->notifier_block); 2220 priv->notifier_block.notifier_call = NULL; 2221 } 2222 return 0; 2223 } 2224 2225 static int hns_nic_dev_probe(struct platform_device *pdev) 2226 { 2227 struct device *dev = &pdev->dev; 2228 struct net_device *ndev; 2229 struct hns_nic_priv *priv; 2230 u32 port_id; 2231 int ret; 2232 2233 ndev = alloc_etherdev_mq(sizeof(struct hns_nic_priv), NIC_MAX_Q_PER_VF); 2234 if (!ndev) 2235 return -ENOMEM; 2236 2237 platform_set_drvdata(pdev, ndev); 2238 2239 priv = netdev_priv(ndev); 2240 priv->dev = dev; 2241 priv->netdev = ndev; 2242 2243 if (dev_of_node(dev)) { 2244 struct device_node *ae_node; 2245 2246 if (of_device_is_compatible(dev->of_node, 2247 "hisilicon,hns-nic-v1")) 2248 priv->enet_ver = AE_VERSION_1; 2249 else 2250 priv->enet_ver = AE_VERSION_2; 2251 2252 ae_node = of_parse_phandle(dev->of_node, "ae-handle", 0); 2253 if (IS_ERR_OR_NULL(ae_node)) { 2254 ret = PTR_ERR(ae_node); 2255 dev_err(dev, "not find ae-handle\n"); 2256 goto out_read_prop_fail; 2257 } 2258 priv->fwnode = &ae_node->fwnode; 2259 } else if (is_acpi_node(dev->fwnode)) { 2260 struct acpi_reference_args args; 2261 2262 if (acpi_dev_found(hns_enet_acpi_match[0].id)) 2263 priv->enet_ver = AE_VERSION_1; 2264 else if (acpi_dev_found(hns_enet_acpi_match[1].id)) 2265 priv->enet_ver = AE_VERSION_2; 2266 else 2267 return -ENXIO; 2268 2269 /* try to find port-idx-in-ae first */ 2270 ret = acpi_node_get_property_reference(dev->fwnode, 2271 "ae-handle", 0, &args); 2272 if (ret) { 2273 dev_err(dev, "not find ae-handle\n"); 2274 goto out_read_prop_fail; 2275 } 2276 priv->fwnode = acpi_fwnode_handle(args.adev); 2277 } else { 2278 dev_err(dev, "cannot read cfg data from OF or acpi\n"); 2279 return -ENXIO; 2280 } 2281 2282 ret = device_property_read_u32(dev, "port-idx-in-ae", &port_id); 2283 if (ret) { 2284 /* only for old code compatible */ 2285 ret = device_property_read_u32(dev, "port-id", &port_id); 2286 if (ret) 2287 goto out_read_prop_fail; 2288 /* for old dts, we need to caculate the port offset */ 2289 port_id = port_id < HNS_SRV_OFFSET ? port_id + HNS_DEBUG_OFFSET 2290 : port_id - HNS_SRV_OFFSET; 2291 } 2292 priv->port_id = port_id; 2293 2294 hns_init_mac_addr(ndev); 2295 2296 ndev->watchdog_timeo = HNS_NIC_TX_TIMEOUT; 2297 ndev->priv_flags |= IFF_UNICAST_FLT; 2298 ndev->netdev_ops = &hns_nic_netdev_ops; 2299 hns_ethtool_set_ops(ndev); 2300 2301 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 2302 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | 2303 NETIF_F_GRO; 2304 ndev->vlan_features |= 2305 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM; 2306 ndev->vlan_features |= NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO; 2307 2308 /* MTU range: 68 - 9578 (v1) or 9706 (v2) */ 2309 ndev->min_mtu = MAC_MIN_MTU; 2310 switch (priv->enet_ver) { 2311 case AE_VERSION_2: 2312 ndev->features |= NETIF_F_TSO | NETIF_F_TSO6; 2313 ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 2314 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | 2315 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6; 2316 ndev->max_mtu = MAC_MAX_MTU_V2 - 2317 (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); 2318 break; 2319 default: 2320 ndev->max_mtu = MAC_MAX_MTU - 2321 (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); 2322 break; 2323 } 2324 2325 SET_NETDEV_DEV(ndev, dev); 2326 2327 if (!dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) 2328 dev_dbg(dev, "set mask to 64bit\n"); 2329 else 2330 dev_err(dev, "set mask to 64bit fail!\n"); 2331 2332 /* carrier off reporting is important to ethtool even BEFORE open */ 2333 netif_carrier_off(ndev); 2334 2335 setup_timer(&priv->service_timer, hns_nic_service_timer, 2336 (unsigned long)priv); 2337 INIT_WORK(&priv->service_task, hns_nic_service_task); 2338 2339 set_bit(NIC_STATE_SERVICE_INITED, &priv->state); 2340 clear_bit(NIC_STATE_SERVICE_SCHED, &priv->state); 2341 set_bit(NIC_STATE_DOWN, &priv->state); 2342 2343 if (hns_nic_try_get_ae(priv->netdev)) { 2344 priv->notifier_block.notifier_call = hns_nic_notifier_action; 2345 ret = hnae_register_notifier(&priv->notifier_block); 2346 if (ret) { 2347 dev_err(dev, "register notifier fail!\n"); 2348 goto out_notify_fail; 2349 } 2350 dev_dbg(dev, "has not handle, register notifier!\n"); 2351 } 2352 2353 return 0; 2354 2355 out_notify_fail: 2356 (void)cancel_work_sync(&priv->service_task); 2357 out_read_prop_fail: 2358 free_netdev(ndev); 2359 return ret; 2360 } 2361 2362 static int hns_nic_dev_remove(struct platform_device *pdev) 2363 { 2364 struct net_device *ndev = platform_get_drvdata(pdev); 2365 struct hns_nic_priv *priv = netdev_priv(ndev); 2366 2367 if (ndev->reg_state != NETREG_UNINITIALIZED) 2368 unregister_netdev(ndev); 2369 2370 if (priv->ring_data) 2371 hns_nic_uninit_ring_data(priv); 2372 priv->ring_data = NULL; 2373 2374 if (ndev->phydev) 2375 phy_disconnect(ndev->phydev); 2376 2377 if (!IS_ERR_OR_NULL(priv->ae_handle)) 2378 hnae_put_handle(priv->ae_handle); 2379 priv->ae_handle = NULL; 2380 if (priv->notifier_block.notifier_call) 2381 hnae_unregister_notifier(&priv->notifier_block); 2382 priv->notifier_block.notifier_call = NULL; 2383 2384 set_bit(NIC_STATE_REMOVING, &priv->state); 2385 (void)cancel_work_sync(&priv->service_task); 2386 2387 free_netdev(ndev); 2388 return 0; 2389 } 2390 2391 static const struct of_device_id hns_enet_of_match[] = { 2392 {.compatible = "hisilicon,hns-nic-v1",}, 2393 {.compatible = "hisilicon,hns-nic-v2",}, 2394 {}, 2395 }; 2396 2397 MODULE_DEVICE_TABLE(of, hns_enet_of_match); 2398 2399 static struct platform_driver hns_nic_dev_driver = { 2400 .driver = { 2401 .name = "hns-nic", 2402 .of_match_table = hns_enet_of_match, 2403 .acpi_match_table = ACPI_PTR(hns_enet_acpi_match), 2404 }, 2405 .probe = hns_nic_dev_probe, 2406 .remove = hns_nic_dev_remove, 2407 }; 2408 2409 module_platform_driver(hns_nic_dev_driver); 2410 2411 MODULE_DESCRIPTION("HISILICON HNS Ethernet driver"); 2412 MODULE_AUTHOR("Hisilicon, Inc."); 2413 MODULE_LICENSE("GPL"); 2414 MODULE_ALIAS("platform:hns-nic"); 2415