1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) 2 /* Copyright 2017-2019 NXP */ 3 4 #include "enetc.h" 5 #include <linux/bpf_trace.h> 6 #include <linux/tcp.h> 7 #include <linux/udp.h> 8 #include <linux/vmalloc.h> 9 #include <linux/ptp_classify.h> 10 #include <net/ip6_checksum.h> 11 #include <net/pkt_sched.h> 12 #include <net/tso.h> 13 14 u32 enetc_port_mac_rd(struct enetc_si *si, u32 reg) 15 { 16 return enetc_port_rd(&si->hw, reg); 17 } 18 EXPORT_SYMBOL_GPL(enetc_port_mac_rd); 19 20 void enetc_port_mac_wr(struct enetc_si *si, u32 reg, u32 val) 21 { 22 enetc_port_wr(&si->hw, reg, val); 23 if (si->hw_features & ENETC_SI_F_QBU) 24 enetc_port_wr(&si->hw, reg + ENETC_PMAC_OFFSET, val); 25 } 26 EXPORT_SYMBOL_GPL(enetc_port_mac_wr); 27 28 static void enetc_change_preemptible_tcs(struct enetc_ndev_priv *priv, 29 u8 preemptible_tcs) 30 { 31 priv->preemptible_tcs = preemptible_tcs; 32 enetc_mm_commit_preemptible_tcs(priv); 33 } 34 35 static int enetc_num_stack_tx_queues(struct enetc_ndev_priv *priv) 36 { 37 int num_tx_rings = priv->num_tx_rings; 38 39 if (priv->xdp_prog) 40 return num_tx_rings - num_possible_cpus(); 41 42 return num_tx_rings; 43 } 44 45 static struct enetc_bdr *enetc_rx_ring_from_xdp_tx_ring(struct enetc_ndev_priv *priv, 46 struct enetc_bdr *tx_ring) 47 { 48 int index = &priv->tx_ring[tx_ring->index] - priv->xdp_tx_ring; 49 50 return priv->rx_ring[index]; 51 } 52 53 static struct sk_buff *enetc_tx_swbd_get_skb(struct enetc_tx_swbd *tx_swbd) 54 { 55 if (tx_swbd->is_xdp_tx || tx_swbd->is_xdp_redirect) 56 return NULL; 57 58 return tx_swbd->skb; 59 } 60 61 static struct xdp_frame * 62 enetc_tx_swbd_get_xdp_frame(struct enetc_tx_swbd *tx_swbd) 63 { 64 if (tx_swbd->is_xdp_redirect) 65 return tx_swbd->xdp_frame; 66 67 return NULL; 68 } 69 70 static void enetc_unmap_tx_buff(struct enetc_bdr *tx_ring, 71 struct enetc_tx_swbd *tx_swbd) 72 { 73 /* For XDP_TX, pages come from RX, whereas for the other contexts where 74 * we have is_dma_page_set, those come from skb_frag_dma_map. We need 75 * to match the DMA mapping length, so we need to differentiate those. 76 */ 77 if (tx_swbd->is_dma_page) 78 dma_unmap_page(tx_ring->dev, tx_swbd->dma, 79 tx_swbd->is_xdp_tx ? PAGE_SIZE : tx_swbd->len, 80 tx_swbd->dir); 81 else 82 dma_unmap_single(tx_ring->dev, tx_swbd->dma, 83 tx_swbd->len, tx_swbd->dir); 84 tx_swbd->dma = 0; 85 } 86 87 static void enetc_free_tx_frame(struct enetc_bdr *tx_ring, 88 struct enetc_tx_swbd *tx_swbd) 89 { 90 struct xdp_frame *xdp_frame = enetc_tx_swbd_get_xdp_frame(tx_swbd); 91 struct sk_buff *skb = enetc_tx_swbd_get_skb(tx_swbd); 92 93 if (tx_swbd->dma) 94 enetc_unmap_tx_buff(tx_ring, tx_swbd); 95 96 if (xdp_frame) { 97 xdp_return_frame(tx_swbd->xdp_frame); 98 tx_swbd->xdp_frame = NULL; 99 } else if (skb) { 100 dev_kfree_skb_any(skb); 101 tx_swbd->skb = NULL; 102 } 103 } 104 105 /* Let H/W know BD ring has been updated */ 106 static void enetc_update_tx_ring_tail(struct enetc_bdr *tx_ring) 107 { 108 /* includes wmb() */ 109 enetc_wr_reg_hot(tx_ring->tpir, tx_ring->next_to_use); 110 } 111 112 static int enetc_ptp_parse(struct sk_buff *skb, u8 *udp, 113 u8 *msgtype, u8 *twostep, 114 u16 *correction_offset, u16 *body_offset) 115 { 116 unsigned int ptp_class; 117 struct ptp_header *hdr; 118 unsigned int type; 119 u8 *base; 120 121 ptp_class = ptp_classify_raw(skb); 122 if (ptp_class == PTP_CLASS_NONE) 123 return -EINVAL; 124 125 hdr = ptp_parse_header(skb, ptp_class); 126 if (!hdr) 127 return -EINVAL; 128 129 type = ptp_class & PTP_CLASS_PMASK; 130 if (type == PTP_CLASS_IPV4 || type == PTP_CLASS_IPV6) 131 *udp = 1; 132 else 133 *udp = 0; 134 135 *msgtype = ptp_get_msgtype(hdr, ptp_class); 136 *twostep = hdr->flag_field[0] & 0x2; 137 138 base = skb_mac_header(skb); 139 *correction_offset = (u8 *)&hdr->correction - base; 140 *body_offset = (u8 *)hdr + sizeof(struct ptp_header) - base; 141 142 return 0; 143 } 144 145 static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb) 146 { 147 bool do_vlan, do_onestep_tstamp = false, do_twostep_tstamp = false; 148 struct enetc_ndev_priv *priv = netdev_priv(tx_ring->ndev); 149 struct enetc_hw *hw = &priv->si->hw; 150 struct enetc_tx_swbd *tx_swbd; 151 int len = skb_headlen(skb); 152 union enetc_tx_bd temp_bd; 153 u8 msgtype, twostep, udp; 154 union enetc_tx_bd *txbd; 155 u16 offset1, offset2; 156 int i, count = 0; 157 skb_frag_t *frag; 158 unsigned int f; 159 dma_addr_t dma; 160 u8 flags = 0; 161 162 i = tx_ring->next_to_use; 163 txbd = ENETC_TXBD(*tx_ring, i); 164 prefetchw(txbd); 165 166 dma = dma_map_single(tx_ring->dev, skb->data, len, DMA_TO_DEVICE); 167 if (unlikely(dma_mapping_error(tx_ring->dev, dma))) 168 goto dma_err; 169 170 temp_bd.addr = cpu_to_le64(dma); 171 temp_bd.buf_len = cpu_to_le16(len); 172 temp_bd.lstatus = 0; 173 174 tx_swbd = &tx_ring->tx_swbd[i]; 175 tx_swbd->dma = dma; 176 tx_swbd->len = len; 177 tx_swbd->is_dma_page = 0; 178 tx_swbd->dir = DMA_TO_DEVICE; 179 count++; 180 181 do_vlan = skb_vlan_tag_present(skb); 182 if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) { 183 if (enetc_ptp_parse(skb, &udp, &msgtype, &twostep, &offset1, 184 &offset2) || 185 msgtype != PTP_MSGTYPE_SYNC || twostep) 186 WARN_ONCE(1, "Bad packet for one-step timestamping\n"); 187 else 188 do_onestep_tstamp = true; 189 } else if (skb->cb[0] & ENETC_F_TX_TSTAMP) { 190 do_twostep_tstamp = true; 191 } 192 193 tx_swbd->do_twostep_tstamp = do_twostep_tstamp; 194 tx_swbd->qbv_en = !!(priv->active_offloads & ENETC_F_QBV); 195 tx_swbd->check_wb = tx_swbd->do_twostep_tstamp || tx_swbd->qbv_en; 196 197 if (do_vlan || do_onestep_tstamp || do_twostep_tstamp) 198 flags |= ENETC_TXBD_FLAGS_EX; 199 200 if (tx_ring->tsd_enable) 201 flags |= ENETC_TXBD_FLAGS_TSE | ENETC_TXBD_FLAGS_TXSTART; 202 203 /* first BD needs frm_len and offload flags set */ 204 temp_bd.frm_len = cpu_to_le16(skb->len); 205 temp_bd.flags = flags; 206 207 if (flags & ENETC_TXBD_FLAGS_TSE) 208 temp_bd.txstart = enetc_txbd_set_tx_start(skb->skb_mstamp_ns, 209 flags); 210 211 if (flags & ENETC_TXBD_FLAGS_EX) { 212 u8 e_flags = 0; 213 *txbd = temp_bd; 214 enetc_clear_tx_bd(&temp_bd); 215 216 /* add extension BD for VLAN and/or timestamping */ 217 flags = 0; 218 tx_swbd++; 219 txbd++; 220 i++; 221 if (unlikely(i == tx_ring->bd_count)) { 222 i = 0; 223 tx_swbd = tx_ring->tx_swbd; 224 txbd = ENETC_TXBD(*tx_ring, 0); 225 } 226 prefetchw(txbd); 227 228 if (do_vlan) { 229 temp_bd.ext.vid = cpu_to_le16(skb_vlan_tag_get(skb)); 230 temp_bd.ext.tpid = 0; /* < C-TAG */ 231 e_flags |= ENETC_TXBD_E_FLAGS_VLAN_INS; 232 } 233 234 if (do_onestep_tstamp) { 235 u32 lo, hi, val; 236 u64 sec, nsec; 237 u8 *data; 238 239 lo = enetc_rd_hot(hw, ENETC_SICTR0); 240 hi = enetc_rd_hot(hw, ENETC_SICTR1); 241 sec = (u64)hi << 32 | lo; 242 nsec = do_div(sec, 1000000000); 243 244 /* Configure extension BD */ 245 temp_bd.ext.tstamp = cpu_to_le32(lo & 0x3fffffff); 246 e_flags |= ENETC_TXBD_E_FLAGS_ONE_STEP_PTP; 247 248 /* Update originTimestamp field of Sync packet 249 * - 48 bits seconds field 250 * - 32 bits nanseconds field 251 */ 252 data = skb_mac_header(skb); 253 *(__be16 *)(data + offset2) = 254 htons((sec >> 32) & 0xffff); 255 *(__be32 *)(data + offset2 + 2) = 256 htonl(sec & 0xffffffff); 257 *(__be32 *)(data + offset2 + 6) = htonl(nsec); 258 259 /* Configure single-step register */ 260 val = ENETC_PM0_SINGLE_STEP_EN; 261 val |= ENETC_SET_SINGLE_STEP_OFFSET(offset1); 262 if (udp) 263 val |= ENETC_PM0_SINGLE_STEP_CH; 264 265 enetc_port_mac_wr(priv->si, ENETC_PM0_SINGLE_STEP, 266 val); 267 } else if (do_twostep_tstamp) { 268 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 269 e_flags |= ENETC_TXBD_E_FLAGS_TWO_STEP_PTP; 270 } 271 272 temp_bd.ext.e_flags = e_flags; 273 count++; 274 } 275 276 frag = &skb_shinfo(skb)->frags[0]; 277 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++, frag++) { 278 len = skb_frag_size(frag); 279 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, len, 280 DMA_TO_DEVICE); 281 if (dma_mapping_error(tx_ring->dev, dma)) 282 goto dma_err; 283 284 *txbd = temp_bd; 285 enetc_clear_tx_bd(&temp_bd); 286 287 flags = 0; 288 tx_swbd++; 289 txbd++; 290 i++; 291 if (unlikely(i == tx_ring->bd_count)) { 292 i = 0; 293 tx_swbd = tx_ring->tx_swbd; 294 txbd = ENETC_TXBD(*tx_ring, 0); 295 } 296 prefetchw(txbd); 297 298 temp_bd.addr = cpu_to_le64(dma); 299 temp_bd.buf_len = cpu_to_le16(len); 300 301 tx_swbd->dma = dma; 302 tx_swbd->len = len; 303 tx_swbd->is_dma_page = 1; 304 tx_swbd->dir = DMA_TO_DEVICE; 305 count++; 306 } 307 308 /* last BD needs 'F' bit set */ 309 flags |= ENETC_TXBD_FLAGS_F; 310 temp_bd.flags = flags; 311 *txbd = temp_bd; 312 313 tx_ring->tx_swbd[i].is_eof = true; 314 tx_ring->tx_swbd[i].skb = skb; 315 316 enetc_bdr_idx_inc(tx_ring, &i); 317 tx_ring->next_to_use = i; 318 319 skb_tx_timestamp(skb); 320 321 enetc_update_tx_ring_tail(tx_ring); 322 323 return count; 324 325 dma_err: 326 dev_err(tx_ring->dev, "DMA map error"); 327 328 do { 329 tx_swbd = &tx_ring->tx_swbd[i]; 330 enetc_free_tx_frame(tx_ring, tx_swbd); 331 if (i == 0) 332 i = tx_ring->bd_count; 333 i--; 334 } while (count--); 335 336 return 0; 337 } 338 339 static void enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb, 340 struct enetc_tx_swbd *tx_swbd, 341 union enetc_tx_bd *txbd, int *i, int hdr_len, 342 int data_len) 343 { 344 union enetc_tx_bd txbd_tmp; 345 u8 flags = 0, e_flags = 0; 346 dma_addr_t addr; 347 348 enetc_clear_tx_bd(&txbd_tmp); 349 addr = tx_ring->tso_headers_dma + *i * TSO_HEADER_SIZE; 350 351 if (skb_vlan_tag_present(skb)) 352 flags |= ENETC_TXBD_FLAGS_EX; 353 354 txbd_tmp.addr = cpu_to_le64(addr); 355 txbd_tmp.buf_len = cpu_to_le16(hdr_len); 356 357 /* first BD needs frm_len and offload flags set */ 358 txbd_tmp.frm_len = cpu_to_le16(hdr_len + data_len); 359 txbd_tmp.flags = flags; 360 361 /* For the TSO header we do not set the dma address since we do not 362 * want it unmapped when we do cleanup. We still set len so that we 363 * count the bytes sent. 364 */ 365 tx_swbd->len = hdr_len; 366 tx_swbd->do_twostep_tstamp = false; 367 tx_swbd->check_wb = false; 368 369 /* Actually write the header in the BD */ 370 *txbd = txbd_tmp; 371 372 /* Add extension BD for VLAN */ 373 if (flags & ENETC_TXBD_FLAGS_EX) { 374 /* Get the next BD */ 375 enetc_bdr_idx_inc(tx_ring, i); 376 txbd = ENETC_TXBD(*tx_ring, *i); 377 tx_swbd = &tx_ring->tx_swbd[*i]; 378 prefetchw(txbd); 379 380 /* Setup the VLAN fields */ 381 enetc_clear_tx_bd(&txbd_tmp); 382 txbd_tmp.ext.vid = cpu_to_le16(skb_vlan_tag_get(skb)); 383 txbd_tmp.ext.tpid = 0; /* < C-TAG */ 384 e_flags |= ENETC_TXBD_E_FLAGS_VLAN_INS; 385 386 /* Write the BD */ 387 txbd_tmp.ext.e_flags = e_flags; 388 *txbd = txbd_tmp; 389 } 390 } 391 392 static int enetc_map_tx_tso_data(struct enetc_bdr *tx_ring, struct sk_buff *skb, 393 struct enetc_tx_swbd *tx_swbd, 394 union enetc_tx_bd *txbd, char *data, 395 int size, bool last_bd) 396 { 397 union enetc_tx_bd txbd_tmp; 398 dma_addr_t addr; 399 u8 flags = 0; 400 401 enetc_clear_tx_bd(&txbd_tmp); 402 403 addr = dma_map_single(tx_ring->dev, data, size, DMA_TO_DEVICE); 404 if (unlikely(dma_mapping_error(tx_ring->dev, addr))) { 405 netdev_err(tx_ring->ndev, "DMA map error\n"); 406 return -ENOMEM; 407 } 408 409 if (last_bd) { 410 flags |= ENETC_TXBD_FLAGS_F; 411 tx_swbd->is_eof = 1; 412 } 413 414 txbd_tmp.addr = cpu_to_le64(addr); 415 txbd_tmp.buf_len = cpu_to_le16(size); 416 txbd_tmp.flags = flags; 417 418 tx_swbd->dma = addr; 419 tx_swbd->len = size; 420 tx_swbd->dir = DMA_TO_DEVICE; 421 422 *txbd = txbd_tmp; 423 424 return 0; 425 } 426 427 static __wsum enetc_tso_hdr_csum(struct tso_t *tso, struct sk_buff *skb, 428 char *hdr, int hdr_len, int *l4_hdr_len) 429 { 430 char *l4_hdr = hdr + skb_transport_offset(skb); 431 int mac_hdr_len = skb_network_offset(skb); 432 433 if (tso->tlen != sizeof(struct udphdr)) { 434 struct tcphdr *tcph = (struct tcphdr *)(l4_hdr); 435 436 tcph->check = 0; 437 } else { 438 struct udphdr *udph = (struct udphdr *)(l4_hdr); 439 440 udph->check = 0; 441 } 442 443 /* Compute the IP checksum. This is necessary since tso_build_hdr() 444 * already incremented the IP ID field. 445 */ 446 if (!tso->ipv6) { 447 struct iphdr *iph = (void *)(hdr + mac_hdr_len); 448 449 iph->check = 0; 450 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); 451 } 452 453 /* Compute the checksum over the L4 header. */ 454 *l4_hdr_len = hdr_len - skb_transport_offset(skb); 455 return csum_partial(l4_hdr, *l4_hdr_len, 0); 456 } 457 458 static void enetc_tso_complete_csum(struct enetc_bdr *tx_ring, struct tso_t *tso, 459 struct sk_buff *skb, char *hdr, int len, 460 __wsum sum) 461 { 462 char *l4_hdr = hdr + skb_transport_offset(skb); 463 __sum16 csum_final; 464 465 /* Complete the L4 checksum by appending the pseudo-header to the 466 * already computed checksum. 467 */ 468 if (!tso->ipv6) 469 csum_final = csum_tcpudp_magic(ip_hdr(skb)->saddr, 470 ip_hdr(skb)->daddr, 471 len, ip_hdr(skb)->protocol, sum); 472 else 473 csum_final = csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 474 &ipv6_hdr(skb)->daddr, 475 len, ipv6_hdr(skb)->nexthdr, sum); 476 477 if (tso->tlen != sizeof(struct udphdr)) { 478 struct tcphdr *tcph = (struct tcphdr *)(l4_hdr); 479 480 tcph->check = csum_final; 481 } else { 482 struct udphdr *udph = (struct udphdr *)(l4_hdr); 483 484 udph->check = csum_final; 485 } 486 } 487 488 static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb) 489 { 490 int hdr_len, total_len, data_len; 491 struct enetc_tx_swbd *tx_swbd; 492 union enetc_tx_bd *txbd; 493 struct tso_t tso; 494 __wsum csum, csum2; 495 int count = 0, pos; 496 int err, i, bd_data_num; 497 498 /* Initialize the TSO handler, and prepare the first payload */ 499 hdr_len = tso_start(skb, &tso); 500 total_len = skb->len - hdr_len; 501 i = tx_ring->next_to_use; 502 503 while (total_len > 0) { 504 char *hdr; 505 506 /* Get the BD */ 507 txbd = ENETC_TXBD(*tx_ring, i); 508 tx_swbd = &tx_ring->tx_swbd[i]; 509 prefetchw(txbd); 510 511 /* Determine the length of this packet */ 512 data_len = min_t(int, skb_shinfo(skb)->gso_size, total_len); 513 total_len -= data_len; 514 515 /* prepare packet headers: MAC + IP + TCP */ 516 hdr = tx_ring->tso_headers + i * TSO_HEADER_SIZE; 517 tso_build_hdr(skb, hdr, &tso, data_len, total_len == 0); 518 519 /* compute the csum over the L4 header */ 520 csum = enetc_tso_hdr_csum(&tso, skb, hdr, hdr_len, &pos); 521 enetc_map_tx_tso_hdr(tx_ring, skb, tx_swbd, txbd, &i, hdr_len, data_len); 522 bd_data_num = 0; 523 count++; 524 525 while (data_len > 0) { 526 int size; 527 528 size = min_t(int, tso.size, data_len); 529 530 /* Advance the index in the BDR */ 531 enetc_bdr_idx_inc(tx_ring, &i); 532 txbd = ENETC_TXBD(*tx_ring, i); 533 tx_swbd = &tx_ring->tx_swbd[i]; 534 prefetchw(txbd); 535 536 /* Compute the checksum over this segment of data and 537 * add it to the csum already computed (over the L4 538 * header and possible other data segments). 539 */ 540 csum2 = csum_partial(tso.data, size, 0); 541 csum = csum_block_add(csum, csum2, pos); 542 pos += size; 543 544 err = enetc_map_tx_tso_data(tx_ring, skb, tx_swbd, txbd, 545 tso.data, size, 546 size == data_len); 547 if (err) 548 goto err_map_data; 549 550 data_len -= size; 551 count++; 552 bd_data_num++; 553 tso_build_data(skb, &tso, size); 554 555 if (unlikely(bd_data_num >= ENETC_MAX_SKB_FRAGS && data_len)) 556 goto err_chained_bd; 557 } 558 559 enetc_tso_complete_csum(tx_ring, &tso, skb, hdr, pos, csum); 560 561 if (total_len == 0) 562 tx_swbd->skb = skb; 563 564 /* Go to the next BD */ 565 enetc_bdr_idx_inc(tx_ring, &i); 566 } 567 568 tx_ring->next_to_use = i; 569 enetc_update_tx_ring_tail(tx_ring); 570 571 return count; 572 573 err_map_data: 574 dev_err(tx_ring->dev, "DMA map error"); 575 576 err_chained_bd: 577 do { 578 tx_swbd = &tx_ring->tx_swbd[i]; 579 enetc_free_tx_frame(tx_ring, tx_swbd); 580 if (i == 0) 581 i = tx_ring->bd_count; 582 i--; 583 } while (count--); 584 585 return 0; 586 } 587 588 static netdev_tx_t enetc_start_xmit(struct sk_buff *skb, 589 struct net_device *ndev) 590 { 591 struct enetc_ndev_priv *priv = netdev_priv(ndev); 592 struct enetc_bdr *tx_ring; 593 int count, err; 594 595 /* Queue one-step Sync packet if already locked */ 596 if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) { 597 if (test_and_set_bit_lock(ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS, 598 &priv->flags)) { 599 skb_queue_tail(&priv->tx_skbs, skb); 600 return NETDEV_TX_OK; 601 } 602 } 603 604 tx_ring = priv->tx_ring[skb->queue_mapping]; 605 606 if (skb_is_gso(skb)) { 607 if (enetc_bd_unused(tx_ring) < tso_count_descs(skb)) { 608 netif_stop_subqueue(ndev, tx_ring->index); 609 return NETDEV_TX_BUSY; 610 } 611 612 enetc_lock_mdio(); 613 count = enetc_map_tx_tso_buffs(tx_ring, skb); 614 enetc_unlock_mdio(); 615 } else { 616 if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS)) 617 if (unlikely(skb_linearize(skb))) 618 goto drop_packet_err; 619 620 count = skb_shinfo(skb)->nr_frags + 1; /* fragments + head */ 621 if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(count)) { 622 netif_stop_subqueue(ndev, tx_ring->index); 623 return NETDEV_TX_BUSY; 624 } 625 626 if (skb->ip_summed == CHECKSUM_PARTIAL) { 627 err = skb_checksum_help(skb); 628 if (err) 629 goto drop_packet_err; 630 } 631 enetc_lock_mdio(); 632 count = enetc_map_tx_buffs(tx_ring, skb); 633 enetc_unlock_mdio(); 634 } 635 636 if (unlikely(!count)) 637 goto drop_packet_err; 638 639 if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED) 640 netif_stop_subqueue(ndev, tx_ring->index); 641 642 return NETDEV_TX_OK; 643 644 drop_packet_err: 645 dev_kfree_skb_any(skb); 646 return NETDEV_TX_OK; 647 } 648 649 netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev) 650 { 651 struct enetc_ndev_priv *priv = netdev_priv(ndev); 652 u8 udp, msgtype, twostep; 653 u16 offset1, offset2; 654 655 /* Mark tx timestamp type on skb->cb[0] if requires */ 656 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 657 (priv->active_offloads & ENETC_F_TX_TSTAMP_MASK)) { 658 skb->cb[0] = priv->active_offloads & ENETC_F_TX_TSTAMP_MASK; 659 } else { 660 skb->cb[0] = 0; 661 } 662 663 /* Fall back to two-step timestamp if not one-step Sync packet */ 664 if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) { 665 if (enetc_ptp_parse(skb, &udp, &msgtype, &twostep, 666 &offset1, &offset2) || 667 msgtype != PTP_MSGTYPE_SYNC || twostep != 0) 668 skb->cb[0] = ENETC_F_TX_TSTAMP; 669 } 670 671 return enetc_start_xmit(skb, ndev); 672 } 673 EXPORT_SYMBOL_GPL(enetc_xmit); 674 675 static irqreturn_t enetc_msix(int irq, void *data) 676 { 677 struct enetc_int_vector *v = data; 678 int i; 679 680 enetc_lock_mdio(); 681 682 /* disable interrupts */ 683 enetc_wr_reg_hot(v->rbier, 0); 684 enetc_wr_reg_hot(v->ricr1, v->rx_ictt); 685 686 for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS) 687 enetc_wr_reg_hot(v->tbier_base + ENETC_BDR_OFF(i), 0); 688 689 enetc_unlock_mdio(); 690 691 napi_schedule(&v->napi); 692 693 return IRQ_HANDLED; 694 } 695 696 static void enetc_rx_dim_work(struct work_struct *w) 697 { 698 struct dim *dim = container_of(w, struct dim, work); 699 struct dim_cq_moder moder = 700 net_dim_get_rx_moderation(dim->mode, dim->profile_ix); 701 struct enetc_int_vector *v = 702 container_of(dim, struct enetc_int_vector, rx_dim); 703 704 v->rx_ictt = enetc_usecs_to_cycles(moder.usec); 705 dim->state = DIM_START_MEASURE; 706 } 707 708 static void enetc_rx_net_dim(struct enetc_int_vector *v) 709 { 710 struct dim_sample dim_sample = {}; 711 712 v->comp_cnt++; 713 714 if (!v->rx_napi_work) 715 return; 716 717 dim_update_sample(v->comp_cnt, 718 v->rx_ring.stats.packets, 719 v->rx_ring.stats.bytes, 720 &dim_sample); 721 net_dim(&v->rx_dim, dim_sample); 722 } 723 724 static int enetc_bd_ready_count(struct enetc_bdr *tx_ring, int ci) 725 { 726 int pi = enetc_rd_reg_hot(tx_ring->tcir) & ENETC_TBCIR_IDX_MASK; 727 728 return pi >= ci ? pi - ci : tx_ring->bd_count - ci + pi; 729 } 730 731 static bool enetc_page_reusable(struct page *page) 732 { 733 return (!page_is_pfmemalloc(page) && page_ref_count(page) == 1); 734 } 735 736 static void enetc_reuse_page(struct enetc_bdr *rx_ring, 737 struct enetc_rx_swbd *old) 738 { 739 struct enetc_rx_swbd *new; 740 741 new = &rx_ring->rx_swbd[rx_ring->next_to_alloc]; 742 743 /* next buf that may reuse a page */ 744 enetc_bdr_idx_inc(rx_ring, &rx_ring->next_to_alloc); 745 746 /* copy page reference */ 747 *new = *old; 748 } 749 750 static void enetc_get_tx_tstamp(struct enetc_hw *hw, union enetc_tx_bd *txbd, 751 u64 *tstamp) 752 { 753 u32 lo, hi, tstamp_lo; 754 755 lo = enetc_rd_hot(hw, ENETC_SICTR0); 756 hi = enetc_rd_hot(hw, ENETC_SICTR1); 757 tstamp_lo = le32_to_cpu(txbd->wb.tstamp); 758 if (lo <= tstamp_lo) 759 hi -= 1; 760 *tstamp = (u64)hi << 32 | tstamp_lo; 761 } 762 763 static void enetc_tstamp_tx(struct sk_buff *skb, u64 tstamp) 764 { 765 struct skb_shared_hwtstamps shhwtstamps; 766 767 if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) { 768 memset(&shhwtstamps, 0, sizeof(shhwtstamps)); 769 shhwtstamps.hwtstamp = ns_to_ktime(tstamp); 770 skb_txtime_consumed(skb); 771 skb_tstamp_tx(skb, &shhwtstamps); 772 } 773 } 774 775 static void enetc_recycle_xdp_tx_buff(struct enetc_bdr *tx_ring, 776 struct enetc_tx_swbd *tx_swbd) 777 { 778 struct enetc_ndev_priv *priv = netdev_priv(tx_ring->ndev); 779 struct enetc_rx_swbd rx_swbd = { 780 .dma = tx_swbd->dma, 781 .page = tx_swbd->page, 782 .page_offset = tx_swbd->page_offset, 783 .dir = tx_swbd->dir, 784 .len = tx_swbd->len, 785 }; 786 struct enetc_bdr *rx_ring; 787 788 rx_ring = enetc_rx_ring_from_xdp_tx_ring(priv, tx_ring); 789 790 if (likely(enetc_swbd_unused(rx_ring))) { 791 enetc_reuse_page(rx_ring, &rx_swbd); 792 793 /* sync for use by the device */ 794 dma_sync_single_range_for_device(rx_ring->dev, rx_swbd.dma, 795 rx_swbd.page_offset, 796 ENETC_RXB_DMA_SIZE_XDP, 797 rx_swbd.dir); 798 799 rx_ring->stats.recycles++; 800 } else { 801 /* RX ring is already full, we need to unmap and free the 802 * page, since there's nothing useful we can do with it. 803 */ 804 rx_ring->stats.recycle_failures++; 805 806 dma_unmap_page(rx_ring->dev, rx_swbd.dma, PAGE_SIZE, 807 rx_swbd.dir); 808 __free_page(rx_swbd.page); 809 } 810 811 rx_ring->xdp.xdp_tx_in_flight--; 812 } 813 814 static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget) 815 { 816 int tx_frm_cnt = 0, tx_byte_cnt = 0, tx_win_drop = 0; 817 struct net_device *ndev = tx_ring->ndev; 818 struct enetc_ndev_priv *priv = netdev_priv(ndev); 819 struct enetc_tx_swbd *tx_swbd; 820 int i, bds_to_clean; 821 bool do_twostep_tstamp; 822 u64 tstamp = 0; 823 824 i = tx_ring->next_to_clean; 825 tx_swbd = &tx_ring->tx_swbd[i]; 826 827 bds_to_clean = enetc_bd_ready_count(tx_ring, i); 828 829 do_twostep_tstamp = false; 830 831 while (bds_to_clean && tx_frm_cnt < ENETC_DEFAULT_TX_WORK) { 832 struct xdp_frame *xdp_frame = enetc_tx_swbd_get_xdp_frame(tx_swbd); 833 struct sk_buff *skb = enetc_tx_swbd_get_skb(tx_swbd); 834 bool is_eof = tx_swbd->is_eof; 835 836 if (unlikely(tx_swbd->check_wb)) { 837 union enetc_tx_bd *txbd = ENETC_TXBD(*tx_ring, i); 838 839 if (txbd->flags & ENETC_TXBD_FLAGS_W && 840 tx_swbd->do_twostep_tstamp) { 841 enetc_get_tx_tstamp(&priv->si->hw, txbd, 842 &tstamp); 843 do_twostep_tstamp = true; 844 } 845 846 if (tx_swbd->qbv_en && 847 txbd->wb.status & ENETC_TXBD_STATS_WIN) 848 tx_win_drop++; 849 } 850 851 if (tx_swbd->is_xdp_tx) 852 enetc_recycle_xdp_tx_buff(tx_ring, tx_swbd); 853 else if (likely(tx_swbd->dma)) 854 enetc_unmap_tx_buff(tx_ring, tx_swbd); 855 856 if (xdp_frame) { 857 xdp_return_frame(xdp_frame); 858 } else if (skb) { 859 if (unlikely(skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP)) { 860 /* Start work to release lock for next one-step 861 * timestamping packet. And send one skb in 862 * tx_skbs queue if has. 863 */ 864 schedule_work(&priv->tx_onestep_tstamp); 865 } else if (unlikely(do_twostep_tstamp)) { 866 enetc_tstamp_tx(skb, tstamp); 867 do_twostep_tstamp = false; 868 } 869 napi_consume_skb(skb, napi_budget); 870 } 871 872 tx_byte_cnt += tx_swbd->len; 873 /* Scrub the swbd here so we don't have to do that 874 * when we reuse it during xmit 875 */ 876 memset(tx_swbd, 0, sizeof(*tx_swbd)); 877 878 bds_to_clean--; 879 tx_swbd++; 880 i++; 881 if (unlikely(i == tx_ring->bd_count)) { 882 i = 0; 883 tx_swbd = tx_ring->tx_swbd; 884 } 885 886 /* BD iteration loop end */ 887 if (is_eof) { 888 tx_frm_cnt++; 889 /* re-arm interrupt source */ 890 enetc_wr_reg_hot(tx_ring->idr, BIT(tx_ring->index) | 891 BIT(16 + tx_ring->index)); 892 } 893 894 if (unlikely(!bds_to_clean)) 895 bds_to_clean = enetc_bd_ready_count(tx_ring, i); 896 } 897 898 tx_ring->next_to_clean = i; 899 tx_ring->stats.packets += tx_frm_cnt; 900 tx_ring->stats.bytes += tx_byte_cnt; 901 tx_ring->stats.win_drop += tx_win_drop; 902 903 if (unlikely(tx_frm_cnt && netif_carrier_ok(ndev) && 904 __netif_subqueue_stopped(ndev, tx_ring->index) && 905 (enetc_bd_unused(tx_ring) >= ENETC_TXBDS_MAX_NEEDED))) { 906 netif_wake_subqueue(ndev, tx_ring->index); 907 } 908 909 return tx_frm_cnt != ENETC_DEFAULT_TX_WORK; 910 } 911 912 static bool enetc_new_page(struct enetc_bdr *rx_ring, 913 struct enetc_rx_swbd *rx_swbd) 914 { 915 bool xdp = !!(rx_ring->xdp.prog); 916 struct page *page; 917 dma_addr_t addr; 918 919 page = dev_alloc_page(); 920 if (unlikely(!page)) 921 return false; 922 923 /* For XDP_TX, we forgo dma_unmap -> dma_map */ 924 rx_swbd->dir = xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; 925 926 addr = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, rx_swbd->dir); 927 if (unlikely(dma_mapping_error(rx_ring->dev, addr))) { 928 __free_page(page); 929 930 return false; 931 } 932 933 rx_swbd->dma = addr; 934 rx_swbd->page = page; 935 rx_swbd->page_offset = rx_ring->buffer_offset; 936 937 return true; 938 } 939 940 static int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt) 941 { 942 struct enetc_rx_swbd *rx_swbd; 943 union enetc_rx_bd *rxbd; 944 int i, j; 945 946 i = rx_ring->next_to_use; 947 rx_swbd = &rx_ring->rx_swbd[i]; 948 rxbd = enetc_rxbd(rx_ring, i); 949 950 for (j = 0; j < buff_cnt; j++) { 951 /* try reuse page */ 952 if (unlikely(!rx_swbd->page)) { 953 if (unlikely(!enetc_new_page(rx_ring, rx_swbd))) { 954 rx_ring->stats.rx_alloc_errs++; 955 break; 956 } 957 } 958 959 /* update RxBD */ 960 rxbd->w.addr = cpu_to_le64(rx_swbd->dma + 961 rx_swbd->page_offset); 962 /* clear 'R" as well */ 963 rxbd->r.lstatus = 0; 964 965 enetc_rxbd_next(rx_ring, &rxbd, &i); 966 rx_swbd = &rx_ring->rx_swbd[i]; 967 } 968 969 if (likely(j)) { 970 rx_ring->next_to_alloc = i; /* keep track from page reuse */ 971 rx_ring->next_to_use = i; 972 973 /* update ENETC's consumer index */ 974 enetc_wr_reg_hot(rx_ring->rcir, rx_ring->next_to_use); 975 } 976 977 return j; 978 } 979 980 #ifdef CONFIG_FSL_ENETC_PTP_CLOCK 981 static void enetc_get_rx_tstamp(struct net_device *ndev, 982 union enetc_rx_bd *rxbd, 983 struct sk_buff *skb) 984 { 985 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); 986 struct enetc_ndev_priv *priv = netdev_priv(ndev); 987 struct enetc_hw *hw = &priv->si->hw; 988 u32 lo, hi, tstamp_lo; 989 u64 tstamp; 990 991 if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_TSTMP) { 992 lo = enetc_rd_reg_hot(hw->reg + ENETC_SICTR0); 993 hi = enetc_rd_reg_hot(hw->reg + ENETC_SICTR1); 994 rxbd = enetc_rxbd_ext(rxbd); 995 tstamp_lo = le32_to_cpu(rxbd->ext.tstamp); 996 if (lo <= tstamp_lo) 997 hi -= 1; 998 999 tstamp = (u64)hi << 32 | tstamp_lo; 1000 memset(shhwtstamps, 0, sizeof(*shhwtstamps)); 1001 shhwtstamps->hwtstamp = ns_to_ktime(tstamp); 1002 } 1003 } 1004 #endif 1005 1006 static void enetc_get_offloads(struct enetc_bdr *rx_ring, 1007 union enetc_rx_bd *rxbd, struct sk_buff *skb) 1008 { 1009 struct enetc_ndev_priv *priv = netdev_priv(rx_ring->ndev); 1010 1011 /* TODO: hashing */ 1012 if (rx_ring->ndev->features & NETIF_F_RXCSUM) { 1013 u16 inet_csum = le16_to_cpu(rxbd->r.inet_csum); 1014 1015 skb->csum = csum_unfold((__force __sum16)~htons(inet_csum)); 1016 skb->ip_summed = CHECKSUM_COMPLETE; 1017 } 1018 1019 if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_VLAN) { 1020 __be16 tpid = 0; 1021 1022 switch (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_TPID) { 1023 case 0: 1024 tpid = htons(ETH_P_8021Q); 1025 break; 1026 case 1: 1027 tpid = htons(ETH_P_8021AD); 1028 break; 1029 case 2: 1030 tpid = htons(enetc_port_rd(&priv->si->hw, 1031 ENETC_PCVLANR1)); 1032 break; 1033 case 3: 1034 tpid = htons(enetc_port_rd(&priv->si->hw, 1035 ENETC_PCVLANR2)); 1036 break; 1037 default: 1038 break; 1039 } 1040 1041 __vlan_hwaccel_put_tag(skb, tpid, le16_to_cpu(rxbd->r.vlan_opt)); 1042 } 1043 1044 #ifdef CONFIG_FSL_ENETC_PTP_CLOCK 1045 if (priv->active_offloads & ENETC_F_RX_TSTAMP) 1046 enetc_get_rx_tstamp(rx_ring->ndev, rxbd, skb); 1047 #endif 1048 } 1049 1050 /* This gets called during the non-XDP NAPI poll cycle as well as on XDP_PASS, 1051 * so it needs to work with both DMA_FROM_DEVICE as well as DMA_BIDIRECTIONAL 1052 * mapped buffers. 1053 */ 1054 static struct enetc_rx_swbd *enetc_get_rx_buff(struct enetc_bdr *rx_ring, 1055 int i, u16 size) 1056 { 1057 struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i]; 1058 1059 dma_sync_single_range_for_cpu(rx_ring->dev, rx_swbd->dma, 1060 rx_swbd->page_offset, 1061 size, rx_swbd->dir); 1062 return rx_swbd; 1063 } 1064 1065 /* Reuse the current page without performing half-page buffer flipping */ 1066 static void enetc_put_rx_buff(struct enetc_bdr *rx_ring, 1067 struct enetc_rx_swbd *rx_swbd) 1068 { 1069 size_t buffer_size = ENETC_RXB_TRUESIZE - rx_ring->buffer_offset; 1070 1071 enetc_reuse_page(rx_ring, rx_swbd); 1072 1073 dma_sync_single_range_for_device(rx_ring->dev, rx_swbd->dma, 1074 rx_swbd->page_offset, 1075 buffer_size, rx_swbd->dir); 1076 1077 rx_swbd->page = NULL; 1078 } 1079 1080 /* Reuse the current page by performing half-page buffer flipping */ 1081 static void enetc_flip_rx_buff(struct enetc_bdr *rx_ring, 1082 struct enetc_rx_swbd *rx_swbd) 1083 { 1084 if (likely(enetc_page_reusable(rx_swbd->page))) { 1085 rx_swbd->page_offset ^= ENETC_RXB_TRUESIZE; 1086 page_ref_inc(rx_swbd->page); 1087 1088 enetc_put_rx_buff(rx_ring, rx_swbd); 1089 } else { 1090 dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE, 1091 rx_swbd->dir); 1092 rx_swbd->page = NULL; 1093 } 1094 } 1095 1096 static struct sk_buff *enetc_map_rx_buff_to_skb(struct enetc_bdr *rx_ring, 1097 int i, u16 size) 1098 { 1099 struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size); 1100 struct sk_buff *skb; 1101 void *ba; 1102 1103 ba = page_address(rx_swbd->page) + rx_swbd->page_offset; 1104 skb = build_skb(ba - rx_ring->buffer_offset, ENETC_RXB_TRUESIZE); 1105 if (unlikely(!skb)) { 1106 rx_ring->stats.rx_alloc_errs++; 1107 return NULL; 1108 } 1109 1110 skb_reserve(skb, rx_ring->buffer_offset); 1111 __skb_put(skb, size); 1112 1113 enetc_flip_rx_buff(rx_ring, rx_swbd); 1114 1115 return skb; 1116 } 1117 1118 static void enetc_add_rx_buff_to_skb(struct enetc_bdr *rx_ring, int i, 1119 u16 size, struct sk_buff *skb) 1120 { 1121 struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size); 1122 1123 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_swbd->page, 1124 rx_swbd->page_offset, size, ENETC_RXB_TRUESIZE); 1125 1126 enetc_flip_rx_buff(rx_ring, rx_swbd); 1127 } 1128 1129 static bool enetc_check_bd_errors_and_consume(struct enetc_bdr *rx_ring, 1130 u32 bd_status, 1131 union enetc_rx_bd **rxbd, int *i) 1132 { 1133 if (likely(!(bd_status & ENETC_RXBD_LSTATUS(ENETC_RXBD_ERR_MASK)))) 1134 return false; 1135 1136 enetc_put_rx_buff(rx_ring, &rx_ring->rx_swbd[*i]); 1137 enetc_rxbd_next(rx_ring, rxbd, i); 1138 1139 while (!(bd_status & ENETC_RXBD_LSTATUS_F)) { 1140 dma_rmb(); 1141 bd_status = le32_to_cpu((*rxbd)->r.lstatus); 1142 1143 enetc_put_rx_buff(rx_ring, &rx_ring->rx_swbd[*i]); 1144 enetc_rxbd_next(rx_ring, rxbd, i); 1145 } 1146 1147 rx_ring->ndev->stats.rx_dropped++; 1148 rx_ring->ndev->stats.rx_errors++; 1149 1150 return true; 1151 } 1152 1153 static struct sk_buff *enetc_build_skb(struct enetc_bdr *rx_ring, 1154 u32 bd_status, union enetc_rx_bd **rxbd, 1155 int *i, int *cleaned_cnt, int buffer_size) 1156 { 1157 struct sk_buff *skb; 1158 u16 size; 1159 1160 size = le16_to_cpu((*rxbd)->r.buf_len); 1161 skb = enetc_map_rx_buff_to_skb(rx_ring, *i, size); 1162 if (!skb) 1163 return NULL; 1164 1165 enetc_get_offloads(rx_ring, *rxbd, skb); 1166 1167 (*cleaned_cnt)++; 1168 1169 enetc_rxbd_next(rx_ring, rxbd, i); 1170 1171 /* not last BD in frame? */ 1172 while (!(bd_status & ENETC_RXBD_LSTATUS_F)) { 1173 bd_status = le32_to_cpu((*rxbd)->r.lstatus); 1174 size = buffer_size; 1175 1176 if (bd_status & ENETC_RXBD_LSTATUS_F) { 1177 dma_rmb(); 1178 size = le16_to_cpu((*rxbd)->r.buf_len); 1179 } 1180 1181 enetc_add_rx_buff_to_skb(rx_ring, *i, size, skb); 1182 1183 (*cleaned_cnt)++; 1184 1185 enetc_rxbd_next(rx_ring, rxbd, i); 1186 } 1187 1188 skb_record_rx_queue(skb, rx_ring->index); 1189 skb->protocol = eth_type_trans(skb, rx_ring->ndev); 1190 1191 return skb; 1192 } 1193 1194 #define ENETC_RXBD_BUNDLE 16 /* # of BDs to update at once */ 1195 1196 static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring, 1197 struct napi_struct *napi, int work_limit) 1198 { 1199 int rx_frm_cnt = 0, rx_byte_cnt = 0; 1200 int cleaned_cnt, i; 1201 1202 cleaned_cnt = enetc_bd_unused(rx_ring); 1203 /* next descriptor to process */ 1204 i = rx_ring->next_to_clean; 1205 1206 while (likely(rx_frm_cnt < work_limit)) { 1207 union enetc_rx_bd *rxbd; 1208 struct sk_buff *skb; 1209 u32 bd_status; 1210 1211 if (cleaned_cnt >= ENETC_RXBD_BUNDLE) 1212 cleaned_cnt -= enetc_refill_rx_ring(rx_ring, 1213 cleaned_cnt); 1214 1215 rxbd = enetc_rxbd(rx_ring, i); 1216 bd_status = le32_to_cpu(rxbd->r.lstatus); 1217 if (!bd_status) 1218 break; 1219 1220 enetc_wr_reg_hot(rx_ring->idr, BIT(rx_ring->index)); 1221 dma_rmb(); /* for reading other rxbd fields */ 1222 1223 if (enetc_check_bd_errors_and_consume(rx_ring, bd_status, 1224 &rxbd, &i)) 1225 break; 1226 1227 skb = enetc_build_skb(rx_ring, bd_status, &rxbd, &i, 1228 &cleaned_cnt, ENETC_RXB_DMA_SIZE); 1229 if (!skb) 1230 break; 1231 1232 rx_byte_cnt += skb->len; 1233 rx_frm_cnt++; 1234 1235 napi_gro_receive(napi, skb); 1236 } 1237 1238 rx_ring->next_to_clean = i; 1239 1240 rx_ring->stats.packets += rx_frm_cnt; 1241 rx_ring->stats.bytes += rx_byte_cnt; 1242 1243 return rx_frm_cnt; 1244 } 1245 1246 static void enetc_xdp_map_tx_buff(struct enetc_bdr *tx_ring, int i, 1247 struct enetc_tx_swbd *tx_swbd, 1248 int frm_len) 1249 { 1250 union enetc_tx_bd *txbd = ENETC_TXBD(*tx_ring, i); 1251 1252 prefetchw(txbd); 1253 1254 enetc_clear_tx_bd(txbd); 1255 txbd->addr = cpu_to_le64(tx_swbd->dma + tx_swbd->page_offset); 1256 txbd->buf_len = cpu_to_le16(tx_swbd->len); 1257 txbd->frm_len = cpu_to_le16(frm_len); 1258 1259 memcpy(&tx_ring->tx_swbd[i], tx_swbd, sizeof(*tx_swbd)); 1260 } 1261 1262 /* Puts in the TX ring one XDP frame, mapped as an array of TX software buffer 1263 * descriptors. 1264 */ 1265 static bool enetc_xdp_tx(struct enetc_bdr *tx_ring, 1266 struct enetc_tx_swbd *xdp_tx_arr, int num_tx_swbd) 1267 { 1268 struct enetc_tx_swbd *tmp_tx_swbd = xdp_tx_arr; 1269 int i, k, frm_len = tmp_tx_swbd->len; 1270 1271 if (unlikely(enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(num_tx_swbd))) 1272 return false; 1273 1274 while (unlikely(!tmp_tx_swbd->is_eof)) { 1275 tmp_tx_swbd++; 1276 frm_len += tmp_tx_swbd->len; 1277 } 1278 1279 i = tx_ring->next_to_use; 1280 1281 for (k = 0; k < num_tx_swbd; k++) { 1282 struct enetc_tx_swbd *xdp_tx_swbd = &xdp_tx_arr[k]; 1283 1284 enetc_xdp_map_tx_buff(tx_ring, i, xdp_tx_swbd, frm_len); 1285 1286 /* last BD needs 'F' bit set */ 1287 if (xdp_tx_swbd->is_eof) { 1288 union enetc_tx_bd *txbd = ENETC_TXBD(*tx_ring, i); 1289 1290 txbd->flags = ENETC_TXBD_FLAGS_F; 1291 } 1292 1293 enetc_bdr_idx_inc(tx_ring, &i); 1294 } 1295 1296 tx_ring->next_to_use = i; 1297 1298 return true; 1299 } 1300 1301 static int enetc_xdp_frame_to_xdp_tx_swbd(struct enetc_bdr *tx_ring, 1302 struct enetc_tx_swbd *xdp_tx_arr, 1303 struct xdp_frame *xdp_frame) 1304 { 1305 struct enetc_tx_swbd *xdp_tx_swbd = &xdp_tx_arr[0]; 1306 struct skb_shared_info *shinfo; 1307 void *data = xdp_frame->data; 1308 int len = xdp_frame->len; 1309 skb_frag_t *frag; 1310 dma_addr_t dma; 1311 unsigned int f; 1312 int n = 0; 1313 1314 dma = dma_map_single(tx_ring->dev, data, len, DMA_TO_DEVICE); 1315 if (unlikely(dma_mapping_error(tx_ring->dev, dma))) { 1316 netdev_err(tx_ring->ndev, "DMA map error\n"); 1317 return -1; 1318 } 1319 1320 xdp_tx_swbd->dma = dma; 1321 xdp_tx_swbd->dir = DMA_TO_DEVICE; 1322 xdp_tx_swbd->len = len; 1323 xdp_tx_swbd->is_xdp_redirect = true; 1324 xdp_tx_swbd->is_eof = false; 1325 xdp_tx_swbd->xdp_frame = NULL; 1326 1327 n++; 1328 1329 if (!xdp_frame_has_frags(xdp_frame)) 1330 goto out; 1331 1332 xdp_tx_swbd = &xdp_tx_arr[n]; 1333 1334 shinfo = xdp_get_shared_info_from_frame(xdp_frame); 1335 1336 for (f = 0, frag = &shinfo->frags[0]; f < shinfo->nr_frags; 1337 f++, frag++) { 1338 data = skb_frag_address(frag); 1339 len = skb_frag_size(frag); 1340 1341 dma = dma_map_single(tx_ring->dev, data, len, DMA_TO_DEVICE); 1342 if (unlikely(dma_mapping_error(tx_ring->dev, dma))) { 1343 /* Undo the DMA mapping for all fragments */ 1344 while (--n >= 0) 1345 enetc_unmap_tx_buff(tx_ring, &xdp_tx_arr[n]); 1346 1347 netdev_err(tx_ring->ndev, "DMA map error\n"); 1348 return -1; 1349 } 1350 1351 xdp_tx_swbd->dma = dma; 1352 xdp_tx_swbd->dir = DMA_TO_DEVICE; 1353 xdp_tx_swbd->len = len; 1354 xdp_tx_swbd->is_xdp_redirect = true; 1355 xdp_tx_swbd->is_eof = false; 1356 xdp_tx_swbd->xdp_frame = NULL; 1357 1358 n++; 1359 xdp_tx_swbd = &xdp_tx_arr[n]; 1360 } 1361 out: 1362 xdp_tx_arr[n - 1].is_eof = true; 1363 xdp_tx_arr[n - 1].xdp_frame = xdp_frame; 1364 1365 return n; 1366 } 1367 1368 int enetc_xdp_xmit(struct net_device *ndev, int num_frames, 1369 struct xdp_frame **frames, u32 flags) 1370 { 1371 struct enetc_tx_swbd xdp_redirect_arr[ENETC_MAX_SKB_FRAGS] = {0}; 1372 struct enetc_ndev_priv *priv = netdev_priv(ndev); 1373 struct enetc_bdr *tx_ring; 1374 int xdp_tx_bd_cnt, i, k; 1375 int xdp_tx_frm_cnt = 0; 1376 1377 enetc_lock_mdio(); 1378 1379 tx_ring = priv->xdp_tx_ring[smp_processor_id()]; 1380 1381 prefetchw(ENETC_TXBD(*tx_ring, tx_ring->next_to_use)); 1382 1383 for (k = 0; k < num_frames; k++) { 1384 xdp_tx_bd_cnt = enetc_xdp_frame_to_xdp_tx_swbd(tx_ring, 1385 xdp_redirect_arr, 1386 frames[k]); 1387 if (unlikely(xdp_tx_bd_cnt < 0)) 1388 break; 1389 1390 if (unlikely(!enetc_xdp_tx(tx_ring, xdp_redirect_arr, 1391 xdp_tx_bd_cnt))) { 1392 for (i = 0; i < xdp_tx_bd_cnt; i++) 1393 enetc_unmap_tx_buff(tx_ring, 1394 &xdp_redirect_arr[i]); 1395 tx_ring->stats.xdp_tx_drops++; 1396 break; 1397 } 1398 1399 xdp_tx_frm_cnt++; 1400 } 1401 1402 if (unlikely((flags & XDP_XMIT_FLUSH) || k != xdp_tx_frm_cnt)) 1403 enetc_update_tx_ring_tail(tx_ring); 1404 1405 tx_ring->stats.xdp_tx += xdp_tx_frm_cnt; 1406 1407 enetc_unlock_mdio(); 1408 1409 return xdp_tx_frm_cnt; 1410 } 1411 EXPORT_SYMBOL_GPL(enetc_xdp_xmit); 1412 1413 static void enetc_map_rx_buff_to_xdp(struct enetc_bdr *rx_ring, int i, 1414 struct xdp_buff *xdp_buff, u16 size) 1415 { 1416 struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size); 1417 void *hard_start = page_address(rx_swbd->page) + rx_swbd->page_offset; 1418 1419 /* To be used for XDP_TX */ 1420 rx_swbd->len = size; 1421 1422 xdp_prepare_buff(xdp_buff, hard_start - rx_ring->buffer_offset, 1423 rx_ring->buffer_offset, size, false); 1424 } 1425 1426 static void enetc_add_rx_buff_to_xdp(struct enetc_bdr *rx_ring, int i, 1427 u16 size, struct xdp_buff *xdp_buff) 1428 { 1429 struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp_buff); 1430 struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size); 1431 skb_frag_t *frag; 1432 1433 /* To be used for XDP_TX */ 1434 rx_swbd->len = size; 1435 1436 if (!xdp_buff_has_frags(xdp_buff)) { 1437 xdp_buff_set_frags_flag(xdp_buff); 1438 shinfo->xdp_frags_size = size; 1439 shinfo->nr_frags = 0; 1440 } else { 1441 shinfo->xdp_frags_size += size; 1442 } 1443 1444 if (page_is_pfmemalloc(rx_swbd->page)) 1445 xdp_buff_set_frag_pfmemalloc(xdp_buff); 1446 1447 frag = &shinfo->frags[shinfo->nr_frags]; 1448 skb_frag_off_set(frag, rx_swbd->page_offset); 1449 skb_frag_size_set(frag, size); 1450 __skb_frag_set_page(frag, rx_swbd->page); 1451 1452 shinfo->nr_frags++; 1453 } 1454 1455 static void enetc_build_xdp_buff(struct enetc_bdr *rx_ring, u32 bd_status, 1456 union enetc_rx_bd **rxbd, int *i, 1457 int *cleaned_cnt, struct xdp_buff *xdp_buff) 1458 { 1459 u16 size = le16_to_cpu((*rxbd)->r.buf_len); 1460 1461 xdp_init_buff(xdp_buff, ENETC_RXB_TRUESIZE, &rx_ring->xdp.rxq); 1462 1463 enetc_map_rx_buff_to_xdp(rx_ring, *i, xdp_buff, size); 1464 (*cleaned_cnt)++; 1465 enetc_rxbd_next(rx_ring, rxbd, i); 1466 1467 /* not last BD in frame? */ 1468 while (!(bd_status & ENETC_RXBD_LSTATUS_F)) { 1469 bd_status = le32_to_cpu((*rxbd)->r.lstatus); 1470 size = ENETC_RXB_DMA_SIZE_XDP; 1471 1472 if (bd_status & ENETC_RXBD_LSTATUS_F) { 1473 dma_rmb(); 1474 size = le16_to_cpu((*rxbd)->r.buf_len); 1475 } 1476 1477 enetc_add_rx_buff_to_xdp(rx_ring, *i, size, xdp_buff); 1478 (*cleaned_cnt)++; 1479 enetc_rxbd_next(rx_ring, rxbd, i); 1480 } 1481 } 1482 1483 /* Convert RX buffer descriptors to TX buffer descriptors. These will be 1484 * recycled back into the RX ring in enetc_clean_tx_ring. 1485 */ 1486 static int enetc_rx_swbd_to_xdp_tx_swbd(struct enetc_tx_swbd *xdp_tx_arr, 1487 struct enetc_bdr *rx_ring, 1488 int rx_ring_first, int rx_ring_last) 1489 { 1490 int n = 0; 1491 1492 for (; rx_ring_first != rx_ring_last; 1493 n++, enetc_bdr_idx_inc(rx_ring, &rx_ring_first)) { 1494 struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[rx_ring_first]; 1495 struct enetc_tx_swbd *tx_swbd = &xdp_tx_arr[n]; 1496 1497 /* No need to dma_map, we already have DMA_BIDIRECTIONAL */ 1498 tx_swbd->dma = rx_swbd->dma; 1499 tx_swbd->dir = rx_swbd->dir; 1500 tx_swbd->page = rx_swbd->page; 1501 tx_swbd->page_offset = rx_swbd->page_offset; 1502 tx_swbd->len = rx_swbd->len; 1503 tx_swbd->is_dma_page = true; 1504 tx_swbd->is_xdp_tx = true; 1505 tx_swbd->is_eof = false; 1506 } 1507 1508 /* We rely on caller providing an rx_ring_last > rx_ring_first */ 1509 xdp_tx_arr[n - 1].is_eof = true; 1510 1511 return n; 1512 } 1513 1514 static void enetc_xdp_drop(struct enetc_bdr *rx_ring, int rx_ring_first, 1515 int rx_ring_last) 1516 { 1517 while (rx_ring_first != rx_ring_last) { 1518 enetc_put_rx_buff(rx_ring, 1519 &rx_ring->rx_swbd[rx_ring_first]); 1520 enetc_bdr_idx_inc(rx_ring, &rx_ring_first); 1521 } 1522 rx_ring->stats.xdp_drops++; 1523 } 1524 1525 static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring, 1526 struct napi_struct *napi, int work_limit, 1527 struct bpf_prog *prog) 1528 { 1529 int xdp_tx_bd_cnt, xdp_tx_frm_cnt = 0, xdp_redirect_frm_cnt = 0; 1530 struct enetc_tx_swbd xdp_tx_arr[ENETC_MAX_SKB_FRAGS] = {0}; 1531 struct enetc_ndev_priv *priv = netdev_priv(rx_ring->ndev); 1532 int rx_frm_cnt = 0, rx_byte_cnt = 0; 1533 struct enetc_bdr *tx_ring; 1534 int cleaned_cnt, i; 1535 u32 xdp_act; 1536 1537 cleaned_cnt = enetc_bd_unused(rx_ring); 1538 /* next descriptor to process */ 1539 i = rx_ring->next_to_clean; 1540 1541 while (likely(rx_frm_cnt < work_limit)) { 1542 union enetc_rx_bd *rxbd, *orig_rxbd; 1543 int orig_i, orig_cleaned_cnt; 1544 struct xdp_buff xdp_buff; 1545 struct sk_buff *skb; 1546 u32 bd_status; 1547 int err; 1548 1549 rxbd = enetc_rxbd(rx_ring, i); 1550 bd_status = le32_to_cpu(rxbd->r.lstatus); 1551 if (!bd_status) 1552 break; 1553 1554 enetc_wr_reg_hot(rx_ring->idr, BIT(rx_ring->index)); 1555 dma_rmb(); /* for reading other rxbd fields */ 1556 1557 if (enetc_check_bd_errors_and_consume(rx_ring, bd_status, 1558 &rxbd, &i)) 1559 break; 1560 1561 orig_rxbd = rxbd; 1562 orig_cleaned_cnt = cleaned_cnt; 1563 orig_i = i; 1564 1565 enetc_build_xdp_buff(rx_ring, bd_status, &rxbd, &i, 1566 &cleaned_cnt, &xdp_buff); 1567 1568 xdp_act = bpf_prog_run_xdp(prog, &xdp_buff); 1569 1570 switch (xdp_act) { 1571 default: 1572 bpf_warn_invalid_xdp_action(rx_ring->ndev, prog, xdp_act); 1573 fallthrough; 1574 case XDP_ABORTED: 1575 trace_xdp_exception(rx_ring->ndev, prog, xdp_act); 1576 fallthrough; 1577 case XDP_DROP: 1578 enetc_xdp_drop(rx_ring, orig_i, i); 1579 break; 1580 case XDP_PASS: 1581 rxbd = orig_rxbd; 1582 cleaned_cnt = orig_cleaned_cnt; 1583 i = orig_i; 1584 1585 skb = enetc_build_skb(rx_ring, bd_status, &rxbd, 1586 &i, &cleaned_cnt, 1587 ENETC_RXB_DMA_SIZE_XDP); 1588 if (unlikely(!skb)) 1589 goto out; 1590 1591 napi_gro_receive(napi, skb); 1592 break; 1593 case XDP_TX: 1594 tx_ring = priv->xdp_tx_ring[rx_ring->index]; 1595 xdp_tx_bd_cnt = enetc_rx_swbd_to_xdp_tx_swbd(xdp_tx_arr, 1596 rx_ring, 1597 orig_i, i); 1598 1599 if (!enetc_xdp_tx(tx_ring, xdp_tx_arr, xdp_tx_bd_cnt)) { 1600 enetc_xdp_drop(rx_ring, orig_i, i); 1601 tx_ring->stats.xdp_tx_drops++; 1602 } else { 1603 tx_ring->stats.xdp_tx += xdp_tx_bd_cnt; 1604 rx_ring->xdp.xdp_tx_in_flight += xdp_tx_bd_cnt; 1605 xdp_tx_frm_cnt++; 1606 /* The XDP_TX enqueue was successful, so we 1607 * need to scrub the RX software BDs because 1608 * the ownership of the buffers no longer 1609 * belongs to the RX ring, and we must prevent 1610 * enetc_refill_rx_ring() from reusing 1611 * rx_swbd->page. 1612 */ 1613 while (orig_i != i) { 1614 rx_ring->rx_swbd[orig_i].page = NULL; 1615 enetc_bdr_idx_inc(rx_ring, &orig_i); 1616 } 1617 } 1618 break; 1619 case XDP_REDIRECT: 1620 err = xdp_do_redirect(rx_ring->ndev, &xdp_buff, prog); 1621 if (unlikely(err)) { 1622 enetc_xdp_drop(rx_ring, orig_i, i); 1623 rx_ring->stats.xdp_redirect_failures++; 1624 } else { 1625 while (orig_i != i) { 1626 enetc_flip_rx_buff(rx_ring, 1627 &rx_ring->rx_swbd[orig_i]); 1628 enetc_bdr_idx_inc(rx_ring, &orig_i); 1629 } 1630 xdp_redirect_frm_cnt++; 1631 rx_ring->stats.xdp_redirect++; 1632 } 1633 } 1634 1635 rx_frm_cnt++; 1636 } 1637 1638 out: 1639 rx_ring->next_to_clean = i; 1640 1641 rx_ring->stats.packets += rx_frm_cnt; 1642 rx_ring->stats.bytes += rx_byte_cnt; 1643 1644 if (xdp_redirect_frm_cnt) 1645 xdp_do_flush_map(); 1646 1647 if (xdp_tx_frm_cnt) 1648 enetc_update_tx_ring_tail(tx_ring); 1649 1650 if (cleaned_cnt > rx_ring->xdp.xdp_tx_in_flight) 1651 enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring) - 1652 rx_ring->xdp.xdp_tx_in_flight); 1653 1654 return rx_frm_cnt; 1655 } 1656 1657 static int enetc_poll(struct napi_struct *napi, int budget) 1658 { 1659 struct enetc_int_vector 1660 *v = container_of(napi, struct enetc_int_vector, napi); 1661 struct enetc_bdr *rx_ring = &v->rx_ring; 1662 struct bpf_prog *prog; 1663 bool complete = true; 1664 int work_done; 1665 int i; 1666 1667 enetc_lock_mdio(); 1668 1669 for (i = 0; i < v->count_tx_rings; i++) 1670 if (!enetc_clean_tx_ring(&v->tx_ring[i], budget)) 1671 complete = false; 1672 1673 prog = rx_ring->xdp.prog; 1674 if (prog) 1675 work_done = enetc_clean_rx_ring_xdp(rx_ring, napi, budget, prog); 1676 else 1677 work_done = enetc_clean_rx_ring(rx_ring, napi, budget); 1678 if (work_done == budget) 1679 complete = false; 1680 if (work_done) 1681 v->rx_napi_work = true; 1682 1683 if (!complete) { 1684 enetc_unlock_mdio(); 1685 return budget; 1686 } 1687 1688 napi_complete_done(napi, work_done); 1689 1690 if (likely(v->rx_dim_en)) 1691 enetc_rx_net_dim(v); 1692 1693 v->rx_napi_work = false; 1694 1695 /* enable interrupts */ 1696 enetc_wr_reg_hot(v->rbier, ENETC_RBIER_RXTIE); 1697 1698 for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS) 1699 enetc_wr_reg_hot(v->tbier_base + ENETC_BDR_OFF(i), 1700 ENETC_TBIER_TXTIE); 1701 1702 enetc_unlock_mdio(); 1703 1704 return work_done; 1705 } 1706 1707 /* Probing and Init */ 1708 #define ENETC_MAX_RFS_SIZE 64 1709 void enetc_get_si_caps(struct enetc_si *si) 1710 { 1711 struct enetc_hw *hw = &si->hw; 1712 u32 val; 1713 1714 /* find out how many of various resources we have to work with */ 1715 val = enetc_rd(hw, ENETC_SICAPR0); 1716 si->num_rx_rings = (val >> 16) & 0xff; 1717 si->num_tx_rings = val & 0xff; 1718 1719 val = enetc_rd(hw, ENETC_SIRFSCAPR); 1720 si->num_fs_entries = ENETC_SIRFSCAPR_GET_NUM_RFS(val); 1721 si->num_fs_entries = min(si->num_fs_entries, ENETC_MAX_RFS_SIZE); 1722 1723 si->num_rss = 0; 1724 val = enetc_rd(hw, ENETC_SIPCAPR0); 1725 if (val & ENETC_SIPCAPR0_RSS) { 1726 u32 rss; 1727 1728 rss = enetc_rd(hw, ENETC_SIRSSCAPR); 1729 si->num_rss = ENETC_SIRSSCAPR_GET_NUM_RSS(rss); 1730 } 1731 1732 if (val & ENETC_SIPCAPR0_QBV) 1733 si->hw_features |= ENETC_SI_F_QBV; 1734 1735 if (val & ENETC_SIPCAPR0_QBU) 1736 si->hw_features |= ENETC_SI_F_QBU; 1737 1738 if (val & ENETC_SIPCAPR0_PSFP) 1739 si->hw_features |= ENETC_SI_F_PSFP; 1740 } 1741 EXPORT_SYMBOL_GPL(enetc_get_si_caps); 1742 1743 static int enetc_dma_alloc_bdr(struct enetc_bdr_resource *res) 1744 { 1745 size_t bd_base_size = res->bd_count * res->bd_size; 1746 1747 res->bd_base = dma_alloc_coherent(res->dev, bd_base_size, 1748 &res->bd_dma_base, GFP_KERNEL); 1749 if (!res->bd_base) 1750 return -ENOMEM; 1751 1752 /* h/w requires 128B alignment */ 1753 if (!IS_ALIGNED(res->bd_dma_base, 128)) { 1754 dma_free_coherent(res->dev, bd_base_size, res->bd_base, 1755 res->bd_dma_base); 1756 return -EINVAL; 1757 } 1758 1759 return 0; 1760 } 1761 1762 static void enetc_dma_free_bdr(const struct enetc_bdr_resource *res) 1763 { 1764 size_t bd_base_size = res->bd_count * res->bd_size; 1765 1766 dma_free_coherent(res->dev, bd_base_size, res->bd_base, 1767 res->bd_dma_base); 1768 } 1769 1770 static int enetc_alloc_tx_resource(struct enetc_bdr_resource *res, 1771 struct device *dev, size_t bd_count) 1772 { 1773 int err; 1774 1775 res->dev = dev; 1776 res->bd_count = bd_count; 1777 res->bd_size = sizeof(union enetc_tx_bd); 1778 1779 res->tx_swbd = vzalloc(bd_count * sizeof(*res->tx_swbd)); 1780 if (!res->tx_swbd) 1781 return -ENOMEM; 1782 1783 err = enetc_dma_alloc_bdr(res); 1784 if (err) 1785 goto err_alloc_bdr; 1786 1787 res->tso_headers = dma_alloc_coherent(dev, bd_count * TSO_HEADER_SIZE, 1788 &res->tso_headers_dma, 1789 GFP_KERNEL); 1790 if (!res->tso_headers) { 1791 err = -ENOMEM; 1792 goto err_alloc_tso; 1793 } 1794 1795 return 0; 1796 1797 err_alloc_tso: 1798 enetc_dma_free_bdr(res); 1799 err_alloc_bdr: 1800 vfree(res->tx_swbd); 1801 res->tx_swbd = NULL; 1802 1803 return err; 1804 } 1805 1806 static void enetc_free_tx_resource(const struct enetc_bdr_resource *res) 1807 { 1808 dma_free_coherent(res->dev, res->bd_count * TSO_HEADER_SIZE, 1809 res->tso_headers, res->tso_headers_dma); 1810 enetc_dma_free_bdr(res); 1811 vfree(res->tx_swbd); 1812 } 1813 1814 static struct enetc_bdr_resource * 1815 enetc_alloc_tx_resources(struct enetc_ndev_priv *priv) 1816 { 1817 struct enetc_bdr_resource *tx_res; 1818 int i, err; 1819 1820 tx_res = kcalloc(priv->num_tx_rings, sizeof(*tx_res), GFP_KERNEL); 1821 if (!tx_res) 1822 return ERR_PTR(-ENOMEM); 1823 1824 for (i = 0; i < priv->num_tx_rings; i++) { 1825 struct enetc_bdr *tx_ring = priv->tx_ring[i]; 1826 1827 err = enetc_alloc_tx_resource(&tx_res[i], tx_ring->dev, 1828 tx_ring->bd_count); 1829 if (err) 1830 goto fail; 1831 } 1832 1833 return tx_res; 1834 1835 fail: 1836 while (i-- > 0) 1837 enetc_free_tx_resource(&tx_res[i]); 1838 1839 kfree(tx_res); 1840 1841 return ERR_PTR(err); 1842 } 1843 1844 static void enetc_free_tx_resources(const struct enetc_bdr_resource *tx_res, 1845 size_t num_resources) 1846 { 1847 size_t i; 1848 1849 for (i = 0; i < num_resources; i++) 1850 enetc_free_tx_resource(&tx_res[i]); 1851 1852 kfree(tx_res); 1853 } 1854 1855 static int enetc_alloc_rx_resource(struct enetc_bdr_resource *res, 1856 struct device *dev, size_t bd_count, 1857 bool extended) 1858 { 1859 int err; 1860 1861 res->dev = dev; 1862 res->bd_count = bd_count; 1863 res->bd_size = sizeof(union enetc_rx_bd); 1864 if (extended) 1865 res->bd_size *= 2; 1866 1867 res->rx_swbd = vzalloc(bd_count * sizeof(struct enetc_rx_swbd)); 1868 if (!res->rx_swbd) 1869 return -ENOMEM; 1870 1871 err = enetc_dma_alloc_bdr(res); 1872 if (err) { 1873 vfree(res->rx_swbd); 1874 return err; 1875 } 1876 1877 return 0; 1878 } 1879 1880 static void enetc_free_rx_resource(const struct enetc_bdr_resource *res) 1881 { 1882 enetc_dma_free_bdr(res); 1883 vfree(res->rx_swbd); 1884 } 1885 1886 static struct enetc_bdr_resource * 1887 enetc_alloc_rx_resources(struct enetc_ndev_priv *priv, bool extended) 1888 { 1889 struct enetc_bdr_resource *rx_res; 1890 int i, err; 1891 1892 rx_res = kcalloc(priv->num_rx_rings, sizeof(*rx_res), GFP_KERNEL); 1893 if (!rx_res) 1894 return ERR_PTR(-ENOMEM); 1895 1896 for (i = 0; i < priv->num_rx_rings; i++) { 1897 struct enetc_bdr *rx_ring = priv->rx_ring[i]; 1898 1899 err = enetc_alloc_rx_resource(&rx_res[i], rx_ring->dev, 1900 rx_ring->bd_count, extended); 1901 if (err) 1902 goto fail; 1903 } 1904 1905 return rx_res; 1906 1907 fail: 1908 while (i-- > 0) 1909 enetc_free_rx_resource(&rx_res[i]); 1910 1911 kfree(rx_res); 1912 1913 return ERR_PTR(err); 1914 } 1915 1916 static void enetc_free_rx_resources(const struct enetc_bdr_resource *rx_res, 1917 size_t num_resources) 1918 { 1919 size_t i; 1920 1921 for (i = 0; i < num_resources; i++) 1922 enetc_free_rx_resource(&rx_res[i]); 1923 1924 kfree(rx_res); 1925 } 1926 1927 static void enetc_assign_tx_resource(struct enetc_bdr *tx_ring, 1928 const struct enetc_bdr_resource *res) 1929 { 1930 tx_ring->bd_base = res ? res->bd_base : NULL; 1931 tx_ring->bd_dma_base = res ? res->bd_dma_base : 0; 1932 tx_ring->tx_swbd = res ? res->tx_swbd : NULL; 1933 tx_ring->tso_headers = res ? res->tso_headers : NULL; 1934 tx_ring->tso_headers_dma = res ? res->tso_headers_dma : 0; 1935 } 1936 1937 static void enetc_assign_rx_resource(struct enetc_bdr *rx_ring, 1938 const struct enetc_bdr_resource *res) 1939 { 1940 rx_ring->bd_base = res ? res->bd_base : NULL; 1941 rx_ring->bd_dma_base = res ? res->bd_dma_base : 0; 1942 rx_ring->rx_swbd = res ? res->rx_swbd : NULL; 1943 } 1944 1945 static void enetc_assign_tx_resources(struct enetc_ndev_priv *priv, 1946 const struct enetc_bdr_resource *res) 1947 { 1948 int i; 1949 1950 if (priv->tx_res) 1951 enetc_free_tx_resources(priv->tx_res, priv->num_tx_rings); 1952 1953 for (i = 0; i < priv->num_tx_rings; i++) { 1954 enetc_assign_tx_resource(priv->tx_ring[i], 1955 res ? &res[i] : NULL); 1956 } 1957 1958 priv->tx_res = res; 1959 } 1960 1961 static void enetc_assign_rx_resources(struct enetc_ndev_priv *priv, 1962 const struct enetc_bdr_resource *res) 1963 { 1964 int i; 1965 1966 if (priv->rx_res) 1967 enetc_free_rx_resources(priv->rx_res, priv->num_rx_rings); 1968 1969 for (i = 0; i < priv->num_rx_rings; i++) { 1970 enetc_assign_rx_resource(priv->rx_ring[i], 1971 res ? &res[i] : NULL); 1972 } 1973 1974 priv->rx_res = res; 1975 } 1976 1977 static void enetc_free_tx_ring(struct enetc_bdr *tx_ring) 1978 { 1979 int i; 1980 1981 for (i = 0; i < tx_ring->bd_count; i++) { 1982 struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i]; 1983 1984 enetc_free_tx_frame(tx_ring, tx_swbd); 1985 } 1986 } 1987 1988 static void enetc_free_rx_ring(struct enetc_bdr *rx_ring) 1989 { 1990 int i; 1991 1992 for (i = 0; i < rx_ring->bd_count; i++) { 1993 struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i]; 1994 1995 if (!rx_swbd->page) 1996 continue; 1997 1998 dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE, 1999 rx_swbd->dir); 2000 __free_page(rx_swbd->page); 2001 rx_swbd->page = NULL; 2002 } 2003 } 2004 2005 static void enetc_free_rxtx_rings(struct enetc_ndev_priv *priv) 2006 { 2007 int i; 2008 2009 for (i = 0; i < priv->num_rx_rings; i++) 2010 enetc_free_rx_ring(priv->rx_ring[i]); 2011 2012 for (i = 0; i < priv->num_tx_rings; i++) 2013 enetc_free_tx_ring(priv->tx_ring[i]); 2014 } 2015 2016 static int enetc_setup_default_rss_table(struct enetc_si *si, int num_groups) 2017 { 2018 int *rss_table; 2019 int i; 2020 2021 rss_table = kmalloc_array(si->num_rss, sizeof(*rss_table), GFP_KERNEL); 2022 if (!rss_table) 2023 return -ENOMEM; 2024 2025 /* Set up RSS table defaults */ 2026 for (i = 0; i < si->num_rss; i++) 2027 rss_table[i] = i % num_groups; 2028 2029 enetc_set_rss_table(si, rss_table, si->num_rss); 2030 2031 kfree(rss_table); 2032 2033 return 0; 2034 } 2035 2036 int enetc_configure_si(struct enetc_ndev_priv *priv) 2037 { 2038 struct enetc_si *si = priv->si; 2039 struct enetc_hw *hw = &si->hw; 2040 int err; 2041 2042 /* set SI cache attributes */ 2043 enetc_wr(hw, ENETC_SICAR0, 2044 ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT); 2045 enetc_wr(hw, ENETC_SICAR1, ENETC_SICAR_MSI); 2046 /* enable SI */ 2047 enetc_wr(hw, ENETC_SIMR, ENETC_SIMR_EN); 2048 2049 if (si->num_rss) { 2050 err = enetc_setup_default_rss_table(si, priv->num_rx_rings); 2051 if (err) 2052 return err; 2053 } 2054 2055 return 0; 2056 } 2057 EXPORT_SYMBOL_GPL(enetc_configure_si); 2058 2059 void enetc_init_si_rings_params(struct enetc_ndev_priv *priv) 2060 { 2061 struct enetc_si *si = priv->si; 2062 int cpus = num_online_cpus(); 2063 2064 priv->tx_bd_count = ENETC_TX_RING_DEFAULT_SIZE; 2065 priv->rx_bd_count = ENETC_RX_RING_DEFAULT_SIZE; 2066 2067 /* Enable all available TX rings in order to configure as many 2068 * priorities as possible, when needed. 2069 * TODO: Make # of TX rings run-time configurable 2070 */ 2071 priv->num_rx_rings = min_t(int, cpus, si->num_rx_rings); 2072 priv->num_tx_rings = si->num_tx_rings; 2073 priv->bdr_int_num = cpus; 2074 priv->ic_mode = ENETC_IC_RX_ADAPTIVE | ENETC_IC_TX_MANUAL; 2075 priv->tx_ictt = ENETC_TXIC_TIMETHR; 2076 } 2077 EXPORT_SYMBOL_GPL(enetc_init_si_rings_params); 2078 2079 int enetc_alloc_si_resources(struct enetc_ndev_priv *priv) 2080 { 2081 struct enetc_si *si = priv->si; 2082 2083 priv->cls_rules = kcalloc(si->num_fs_entries, sizeof(*priv->cls_rules), 2084 GFP_KERNEL); 2085 if (!priv->cls_rules) 2086 return -ENOMEM; 2087 2088 return 0; 2089 } 2090 EXPORT_SYMBOL_GPL(enetc_alloc_si_resources); 2091 2092 void enetc_free_si_resources(struct enetc_ndev_priv *priv) 2093 { 2094 kfree(priv->cls_rules); 2095 } 2096 EXPORT_SYMBOL_GPL(enetc_free_si_resources); 2097 2098 static void enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring) 2099 { 2100 int idx = tx_ring->index; 2101 u32 tbmr; 2102 2103 enetc_txbdr_wr(hw, idx, ENETC_TBBAR0, 2104 lower_32_bits(tx_ring->bd_dma_base)); 2105 2106 enetc_txbdr_wr(hw, idx, ENETC_TBBAR1, 2107 upper_32_bits(tx_ring->bd_dma_base)); 2108 2109 WARN_ON(!IS_ALIGNED(tx_ring->bd_count, 64)); /* multiple of 64 */ 2110 enetc_txbdr_wr(hw, idx, ENETC_TBLENR, 2111 ENETC_RTBLENR_LEN(tx_ring->bd_count)); 2112 2113 /* clearing PI/CI registers for Tx not supported, adjust sw indexes */ 2114 tx_ring->next_to_use = enetc_txbdr_rd(hw, idx, ENETC_TBPIR); 2115 tx_ring->next_to_clean = enetc_txbdr_rd(hw, idx, ENETC_TBCIR); 2116 2117 /* enable Tx ints by setting pkt thr to 1 */ 2118 enetc_txbdr_wr(hw, idx, ENETC_TBICR0, ENETC_TBICR0_ICEN | 0x1); 2119 2120 tbmr = ENETC_TBMR_SET_PRIO(tx_ring->prio); 2121 if (tx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_TX) 2122 tbmr |= ENETC_TBMR_VIH; 2123 2124 /* enable ring */ 2125 enetc_txbdr_wr(hw, idx, ENETC_TBMR, tbmr); 2126 2127 tx_ring->tpir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBPIR); 2128 tx_ring->tcir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBCIR); 2129 tx_ring->idr = hw->reg + ENETC_SITXIDR; 2130 } 2131 2132 static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring, 2133 bool extended) 2134 { 2135 int idx = rx_ring->index; 2136 u32 rbmr = 0; 2137 2138 enetc_rxbdr_wr(hw, idx, ENETC_RBBAR0, 2139 lower_32_bits(rx_ring->bd_dma_base)); 2140 2141 enetc_rxbdr_wr(hw, idx, ENETC_RBBAR1, 2142 upper_32_bits(rx_ring->bd_dma_base)); 2143 2144 WARN_ON(!IS_ALIGNED(rx_ring->bd_count, 64)); /* multiple of 64 */ 2145 enetc_rxbdr_wr(hw, idx, ENETC_RBLENR, 2146 ENETC_RTBLENR_LEN(rx_ring->bd_count)); 2147 2148 if (rx_ring->xdp.prog) 2149 enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE_XDP); 2150 else 2151 enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE); 2152 2153 /* Also prepare the consumer index in case page allocation never 2154 * succeeds. In that case, hardware will never advance producer index 2155 * to match consumer index, and will drop all frames. 2156 */ 2157 enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0); 2158 enetc_rxbdr_wr(hw, idx, ENETC_RBCIR, 1); 2159 2160 /* enable Rx ints by setting pkt thr to 1 */ 2161 enetc_rxbdr_wr(hw, idx, ENETC_RBICR0, ENETC_RBICR0_ICEN | 0x1); 2162 2163 rx_ring->ext_en = extended; 2164 if (rx_ring->ext_en) 2165 rbmr |= ENETC_RBMR_BDS; 2166 2167 if (rx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) 2168 rbmr |= ENETC_RBMR_VTE; 2169 2170 rx_ring->rcir = hw->reg + ENETC_BDR(RX, idx, ENETC_RBCIR); 2171 rx_ring->idr = hw->reg + ENETC_SIRXIDR; 2172 2173 rx_ring->next_to_clean = 0; 2174 rx_ring->next_to_use = 0; 2175 rx_ring->next_to_alloc = 0; 2176 2177 enetc_lock_mdio(); 2178 enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring)); 2179 enetc_unlock_mdio(); 2180 2181 enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr); 2182 } 2183 2184 static void enetc_setup_bdrs(struct enetc_ndev_priv *priv, bool extended) 2185 { 2186 struct enetc_hw *hw = &priv->si->hw; 2187 int i; 2188 2189 for (i = 0; i < priv->num_tx_rings; i++) 2190 enetc_setup_txbdr(hw, priv->tx_ring[i]); 2191 2192 for (i = 0; i < priv->num_rx_rings; i++) 2193 enetc_setup_rxbdr(hw, priv->rx_ring[i], extended); 2194 } 2195 2196 static void enetc_enable_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring) 2197 { 2198 int idx = tx_ring->index; 2199 u32 tbmr; 2200 2201 tbmr = enetc_txbdr_rd(hw, idx, ENETC_TBMR); 2202 tbmr |= ENETC_TBMR_EN; 2203 enetc_txbdr_wr(hw, idx, ENETC_TBMR, tbmr); 2204 } 2205 2206 static void enetc_enable_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring) 2207 { 2208 int idx = rx_ring->index; 2209 u32 rbmr; 2210 2211 rbmr = enetc_rxbdr_rd(hw, idx, ENETC_RBMR); 2212 rbmr |= ENETC_RBMR_EN; 2213 enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr); 2214 } 2215 2216 static void enetc_enable_bdrs(struct enetc_ndev_priv *priv) 2217 { 2218 struct enetc_hw *hw = &priv->si->hw; 2219 int i; 2220 2221 for (i = 0; i < priv->num_tx_rings; i++) 2222 enetc_enable_txbdr(hw, priv->tx_ring[i]); 2223 2224 for (i = 0; i < priv->num_rx_rings; i++) 2225 enetc_enable_rxbdr(hw, priv->rx_ring[i]); 2226 } 2227 2228 static void enetc_disable_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring) 2229 { 2230 int idx = rx_ring->index; 2231 2232 /* disable EN bit on ring */ 2233 enetc_rxbdr_wr(hw, idx, ENETC_RBMR, 0); 2234 } 2235 2236 static void enetc_disable_txbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring) 2237 { 2238 int idx = rx_ring->index; 2239 2240 /* disable EN bit on ring */ 2241 enetc_txbdr_wr(hw, idx, ENETC_TBMR, 0); 2242 } 2243 2244 static void enetc_disable_bdrs(struct enetc_ndev_priv *priv) 2245 { 2246 struct enetc_hw *hw = &priv->si->hw; 2247 int i; 2248 2249 for (i = 0; i < priv->num_tx_rings; i++) 2250 enetc_disable_txbdr(hw, priv->tx_ring[i]); 2251 2252 for (i = 0; i < priv->num_rx_rings; i++) 2253 enetc_disable_rxbdr(hw, priv->rx_ring[i]); 2254 } 2255 2256 static void enetc_wait_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring) 2257 { 2258 int delay = 8, timeout = 100; 2259 int idx = tx_ring->index; 2260 2261 /* wait for busy to clear */ 2262 while (delay < timeout && 2263 enetc_txbdr_rd(hw, idx, ENETC_TBSR) & ENETC_TBSR_BUSY) { 2264 msleep(delay); 2265 delay *= 2; 2266 } 2267 2268 if (delay >= timeout) 2269 netdev_warn(tx_ring->ndev, "timeout for tx ring #%d clear\n", 2270 idx); 2271 } 2272 2273 static void enetc_wait_bdrs(struct enetc_ndev_priv *priv) 2274 { 2275 struct enetc_hw *hw = &priv->si->hw; 2276 int i; 2277 2278 for (i = 0; i < priv->num_tx_rings; i++) 2279 enetc_wait_txbdr(hw, priv->tx_ring[i]); 2280 } 2281 2282 static int enetc_setup_irqs(struct enetc_ndev_priv *priv) 2283 { 2284 struct pci_dev *pdev = priv->si->pdev; 2285 struct enetc_hw *hw = &priv->si->hw; 2286 int i, j, err; 2287 2288 for (i = 0; i < priv->bdr_int_num; i++) { 2289 int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i); 2290 struct enetc_int_vector *v = priv->int_vector[i]; 2291 int entry = ENETC_BDR_INT_BASE_IDX + i; 2292 2293 snprintf(v->name, sizeof(v->name), "%s-rxtx%d", 2294 priv->ndev->name, i); 2295 err = request_irq(irq, enetc_msix, 0, v->name, v); 2296 if (err) { 2297 dev_err(priv->dev, "request_irq() failed!\n"); 2298 goto irq_err; 2299 } 2300 disable_irq(irq); 2301 2302 v->tbier_base = hw->reg + ENETC_BDR(TX, 0, ENETC_TBIER); 2303 v->rbier = hw->reg + ENETC_BDR(RX, i, ENETC_RBIER); 2304 v->ricr1 = hw->reg + ENETC_BDR(RX, i, ENETC_RBICR1); 2305 2306 enetc_wr(hw, ENETC_SIMSIRRV(i), entry); 2307 2308 for (j = 0; j < v->count_tx_rings; j++) { 2309 int idx = v->tx_ring[j].index; 2310 2311 enetc_wr(hw, ENETC_SIMSITRV(idx), entry); 2312 } 2313 irq_set_affinity_hint(irq, get_cpu_mask(i % num_online_cpus())); 2314 } 2315 2316 return 0; 2317 2318 irq_err: 2319 while (i--) { 2320 int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i); 2321 2322 irq_set_affinity_hint(irq, NULL); 2323 free_irq(irq, priv->int_vector[i]); 2324 } 2325 2326 return err; 2327 } 2328 2329 static void enetc_free_irqs(struct enetc_ndev_priv *priv) 2330 { 2331 struct pci_dev *pdev = priv->si->pdev; 2332 int i; 2333 2334 for (i = 0; i < priv->bdr_int_num; i++) { 2335 int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i); 2336 2337 irq_set_affinity_hint(irq, NULL); 2338 free_irq(irq, priv->int_vector[i]); 2339 } 2340 } 2341 2342 static void enetc_setup_interrupts(struct enetc_ndev_priv *priv) 2343 { 2344 struct enetc_hw *hw = &priv->si->hw; 2345 u32 icpt, ictt; 2346 int i; 2347 2348 /* enable Tx & Rx event indication */ 2349 if (priv->ic_mode & 2350 (ENETC_IC_RX_MANUAL | ENETC_IC_RX_ADAPTIVE)) { 2351 icpt = ENETC_RBICR0_SET_ICPT(ENETC_RXIC_PKTTHR); 2352 /* init to non-0 minimum, will be adjusted later */ 2353 ictt = 0x1; 2354 } else { 2355 icpt = 0x1; /* enable Rx ints by setting pkt thr to 1 */ 2356 ictt = 0; 2357 } 2358 2359 for (i = 0; i < priv->num_rx_rings; i++) { 2360 enetc_rxbdr_wr(hw, i, ENETC_RBICR1, ictt); 2361 enetc_rxbdr_wr(hw, i, ENETC_RBICR0, ENETC_RBICR0_ICEN | icpt); 2362 enetc_rxbdr_wr(hw, i, ENETC_RBIER, ENETC_RBIER_RXTIE); 2363 } 2364 2365 if (priv->ic_mode & ENETC_IC_TX_MANUAL) 2366 icpt = ENETC_TBICR0_SET_ICPT(ENETC_TXIC_PKTTHR); 2367 else 2368 icpt = 0x1; /* enable Tx ints by setting pkt thr to 1 */ 2369 2370 for (i = 0; i < priv->num_tx_rings; i++) { 2371 enetc_txbdr_wr(hw, i, ENETC_TBICR1, priv->tx_ictt); 2372 enetc_txbdr_wr(hw, i, ENETC_TBICR0, ENETC_TBICR0_ICEN | icpt); 2373 enetc_txbdr_wr(hw, i, ENETC_TBIER, ENETC_TBIER_TXTIE); 2374 } 2375 } 2376 2377 static void enetc_clear_interrupts(struct enetc_ndev_priv *priv) 2378 { 2379 struct enetc_hw *hw = &priv->si->hw; 2380 int i; 2381 2382 for (i = 0; i < priv->num_tx_rings; i++) 2383 enetc_txbdr_wr(hw, i, ENETC_TBIER, 0); 2384 2385 for (i = 0; i < priv->num_rx_rings; i++) 2386 enetc_rxbdr_wr(hw, i, ENETC_RBIER, 0); 2387 } 2388 2389 static int enetc_phylink_connect(struct net_device *ndev) 2390 { 2391 struct enetc_ndev_priv *priv = netdev_priv(ndev); 2392 struct ethtool_eee edata; 2393 int err; 2394 2395 if (!priv->phylink) { 2396 /* phy-less mode */ 2397 netif_carrier_on(ndev); 2398 return 0; 2399 } 2400 2401 err = phylink_of_phy_connect(priv->phylink, priv->dev->of_node, 0); 2402 if (err) { 2403 dev_err(&ndev->dev, "could not attach to PHY\n"); 2404 return err; 2405 } 2406 2407 /* disable EEE autoneg, until ENETC driver supports it */ 2408 memset(&edata, 0, sizeof(struct ethtool_eee)); 2409 phylink_ethtool_set_eee(priv->phylink, &edata); 2410 2411 phylink_start(priv->phylink); 2412 2413 return 0; 2414 } 2415 2416 static void enetc_tx_onestep_tstamp(struct work_struct *work) 2417 { 2418 struct enetc_ndev_priv *priv; 2419 struct sk_buff *skb; 2420 2421 priv = container_of(work, struct enetc_ndev_priv, tx_onestep_tstamp); 2422 2423 netif_tx_lock_bh(priv->ndev); 2424 2425 clear_bit_unlock(ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS, &priv->flags); 2426 skb = skb_dequeue(&priv->tx_skbs); 2427 if (skb) 2428 enetc_start_xmit(skb, priv->ndev); 2429 2430 netif_tx_unlock_bh(priv->ndev); 2431 } 2432 2433 static void enetc_tx_onestep_tstamp_init(struct enetc_ndev_priv *priv) 2434 { 2435 INIT_WORK(&priv->tx_onestep_tstamp, enetc_tx_onestep_tstamp); 2436 skb_queue_head_init(&priv->tx_skbs); 2437 } 2438 2439 void enetc_start(struct net_device *ndev) 2440 { 2441 struct enetc_ndev_priv *priv = netdev_priv(ndev); 2442 int i; 2443 2444 enetc_setup_interrupts(priv); 2445 2446 for (i = 0; i < priv->bdr_int_num; i++) { 2447 int irq = pci_irq_vector(priv->si->pdev, 2448 ENETC_BDR_INT_BASE_IDX + i); 2449 2450 napi_enable(&priv->int_vector[i]->napi); 2451 enable_irq(irq); 2452 } 2453 2454 enetc_enable_bdrs(priv); 2455 2456 netif_tx_start_all_queues(ndev); 2457 } 2458 EXPORT_SYMBOL_GPL(enetc_start); 2459 2460 int enetc_open(struct net_device *ndev) 2461 { 2462 struct enetc_ndev_priv *priv = netdev_priv(ndev); 2463 struct enetc_bdr_resource *tx_res, *rx_res; 2464 bool extended; 2465 int err; 2466 2467 extended = !!(priv->active_offloads & ENETC_F_RX_TSTAMP); 2468 2469 err = enetc_setup_irqs(priv); 2470 if (err) 2471 return err; 2472 2473 err = enetc_phylink_connect(ndev); 2474 if (err) 2475 goto err_phy_connect; 2476 2477 tx_res = enetc_alloc_tx_resources(priv); 2478 if (IS_ERR(tx_res)) { 2479 err = PTR_ERR(tx_res); 2480 goto err_alloc_tx; 2481 } 2482 2483 rx_res = enetc_alloc_rx_resources(priv, extended); 2484 if (IS_ERR(rx_res)) { 2485 err = PTR_ERR(rx_res); 2486 goto err_alloc_rx; 2487 } 2488 2489 enetc_tx_onestep_tstamp_init(priv); 2490 enetc_assign_tx_resources(priv, tx_res); 2491 enetc_assign_rx_resources(priv, rx_res); 2492 enetc_setup_bdrs(priv, extended); 2493 enetc_start(ndev); 2494 2495 return 0; 2496 2497 err_alloc_rx: 2498 enetc_free_tx_resources(tx_res, priv->num_tx_rings); 2499 err_alloc_tx: 2500 if (priv->phylink) 2501 phylink_disconnect_phy(priv->phylink); 2502 err_phy_connect: 2503 enetc_free_irqs(priv); 2504 2505 return err; 2506 } 2507 EXPORT_SYMBOL_GPL(enetc_open); 2508 2509 void enetc_stop(struct net_device *ndev) 2510 { 2511 struct enetc_ndev_priv *priv = netdev_priv(ndev); 2512 int i; 2513 2514 netif_tx_stop_all_queues(ndev); 2515 2516 enetc_disable_bdrs(priv); 2517 2518 for (i = 0; i < priv->bdr_int_num; i++) { 2519 int irq = pci_irq_vector(priv->si->pdev, 2520 ENETC_BDR_INT_BASE_IDX + i); 2521 2522 disable_irq(irq); 2523 napi_synchronize(&priv->int_vector[i]->napi); 2524 napi_disable(&priv->int_vector[i]->napi); 2525 } 2526 2527 enetc_wait_bdrs(priv); 2528 2529 enetc_clear_interrupts(priv); 2530 } 2531 EXPORT_SYMBOL_GPL(enetc_stop); 2532 2533 int enetc_close(struct net_device *ndev) 2534 { 2535 struct enetc_ndev_priv *priv = netdev_priv(ndev); 2536 2537 enetc_stop(ndev); 2538 2539 if (priv->phylink) { 2540 phylink_stop(priv->phylink); 2541 phylink_disconnect_phy(priv->phylink); 2542 } else { 2543 netif_carrier_off(ndev); 2544 } 2545 2546 enetc_free_rxtx_rings(priv); 2547 2548 /* Avoids dangling pointers and also frees old resources */ 2549 enetc_assign_rx_resources(priv, NULL); 2550 enetc_assign_tx_resources(priv, NULL); 2551 2552 enetc_free_irqs(priv); 2553 2554 return 0; 2555 } 2556 EXPORT_SYMBOL_GPL(enetc_close); 2557 2558 static int enetc_reconfigure(struct enetc_ndev_priv *priv, bool extended, 2559 int (*cb)(struct enetc_ndev_priv *priv, void *ctx), 2560 void *ctx) 2561 { 2562 struct enetc_bdr_resource *tx_res, *rx_res; 2563 int err; 2564 2565 ASSERT_RTNL(); 2566 2567 /* If the interface is down, run the callback right away, 2568 * without reconfiguration. 2569 */ 2570 if (!netif_running(priv->ndev)) { 2571 if (cb) { 2572 err = cb(priv, ctx); 2573 if (err) 2574 return err; 2575 } 2576 2577 return 0; 2578 } 2579 2580 tx_res = enetc_alloc_tx_resources(priv); 2581 if (IS_ERR(tx_res)) { 2582 err = PTR_ERR(tx_res); 2583 goto out; 2584 } 2585 2586 rx_res = enetc_alloc_rx_resources(priv, extended); 2587 if (IS_ERR(rx_res)) { 2588 err = PTR_ERR(rx_res); 2589 goto out_free_tx_res; 2590 } 2591 2592 enetc_stop(priv->ndev); 2593 enetc_free_rxtx_rings(priv); 2594 2595 /* Interface is down, run optional callback now */ 2596 if (cb) { 2597 err = cb(priv, ctx); 2598 if (err) 2599 goto out_restart; 2600 } 2601 2602 enetc_assign_tx_resources(priv, tx_res); 2603 enetc_assign_rx_resources(priv, rx_res); 2604 enetc_setup_bdrs(priv, extended); 2605 enetc_start(priv->ndev); 2606 2607 return 0; 2608 2609 out_restart: 2610 enetc_setup_bdrs(priv, extended); 2611 enetc_start(priv->ndev); 2612 enetc_free_rx_resources(rx_res, priv->num_rx_rings); 2613 out_free_tx_res: 2614 enetc_free_tx_resources(tx_res, priv->num_tx_rings); 2615 out: 2616 return err; 2617 } 2618 2619 static void enetc_debug_tx_ring_prios(struct enetc_ndev_priv *priv) 2620 { 2621 int i; 2622 2623 for (i = 0; i < priv->num_tx_rings; i++) 2624 netdev_dbg(priv->ndev, "TX ring %d prio %d\n", i, 2625 priv->tx_ring[i]->prio); 2626 } 2627 2628 static void enetc_reset_tc_mqprio(struct net_device *ndev) 2629 { 2630 struct enetc_ndev_priv *priv = netdev_priv(ndev); 2631 struct enetc_hw *hw = &priv->si->hw; 2632 struct enetc_bdr *tx_ring; 2633 int num_stack_tx_queues; 2634 int i; 2635 2636 num_stack_tx_queues = enetc_num_stack_tx_queues(priv); 2637 2638 netdev_reset_tc(ndev); 2639 netif_set_real_num_tx_queues(ndev, num_stack_tx_queues); 2640 priv->min_num_stack_tx_queues = num_possible_cpus(); 2641 2642 /* Reset all ring priorities to 0 */ 2643 for (i = 0; i < priv->num_tx_rings; i++) { 2644 tx_ring = priv->tx_ring[i]; 2645 tx_ring->prio = 0; 2646 enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio); 2647 } 2648 2649 enetc_debug_tx_ring_prios(priv); 2650 2651 enetc_change_preemptible_tcs(priv, 0); 2652 } 2653 2654 int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data) 2655 { 2656 struct tc_mqprio_qopt_offload *mqprio = type_data; 2657 struct enetc_ndev_priv *priv = netdev_priv(ndev); 2658 struct tc_mqprio_qopt *qopt = &mqprio->qopt; 2659 struct enetc_hw *hw = &priv->si->hw; 2660 int num_stack_tx_queues = 0; 2661 struct enetc_bdr *tx_ring; 2662 u8 num_tc = qopt->num_tc; 2663 int offset, count; 2664 int err, tc, q; 2665 2666 if (!num_tc) { 2667 enetc_reset_tc_mqprio(ndev); 2668 return 0; 2669 } 2670 2671 err = netdev_set_num_tc(ndev, num_tc); 2672 if (err) 2673 return err; 2674 2675 for (tc = 0; tc < num_tc; tc++) { 2676 offset = qopt->offset[tc]; 2677 count = qopt->count[tc]; 2678 num_stack_tx_queues += count; 2679 2680 err = netdev_set_tc_queue(ndev, tc, count, offset); 2681 if (err) 2682 goto err_reset_tc; 2683 2684 for (q = offset; q < offset + count; q++) { 2685 tx_ring = priv->tx_ring[q]; 2686 /* The prio_tc_map is skb_tx_hash()'s way of selecting 2687 * between TX queues based on skb->priority. As such, 2688 * there's nothing to offload based on it. 2689 * Make the mqprio "traffic class" be the priority of 2690 * this ring group, and leave the Tx IPV to traffic 2691 * class mapping as its default mapping value of 1:1. 2692 */ 2693 tx_ring->prio = tc; 2694 enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio); 2695 } 2696 } 2697 2698 err = netif_set_real_num_tx_queues(ndev, num_stack_tx_queues); 2699 if (err) 2700 goto err_reset_tc; 2701 2702 priv->min_num_stack_tx_queues = num_stack_tx_queues; 2703 2704 enetc_debug_tx_ring_prios(priv); 2705 2706 enetc_change_preemptible_tcs(priv, mqprio->preemptible_tcs); 2707 2708 return 0; 2709 2710 err_reset_tc: 2711 enetc_reset_tc_mqprio(ndev); 2712 return err; 2713 } 2714 EXPORT_SYMBOL_GPL(enetc_setup_tc_mqprio); 2715 2716 static int enetc_reconfigure_xdp_cb(struct enetc_ndev_priv *priv, void *ctx) 2717 { 2718 struct bpf_prog *old_prog, *prog = ctx; 2719 int num_stack_tx_queues; 2720 int err, i; 2721 2722 old_prog = xchg(&priv->xdp_prog, prog); 2723 2724 num_stack_tx_queues = enetc_num_stack_tx_queues(priv); 2725 err = netif_set_real_num_tx_queues(priv->ndev, num_stack_tx_queues); 2726 if (err) { 2727 xchg(&priv->xdp_prog, old_prog); 2728 return err; 2729 } 2730 2731 if (old_prog) 2732 bpf_prog_put(old_prog); 2733 2734 for (i = 0; i < priv->num_rx_rings; i++) { 2735 struct enetc_bdr *rx_ring = priv->rx_ring[i]; 2736 2737 rx_ring->xdp.prog = prog; 2738 2739 if (prog) 2740 rx_ring->buffer_offset = XDP_PACKET_HEADROOM; 2741 else 2742 rx_ring->buffer_offset = ENETC_RXB_PAD; 2743 } 2744 2745 return 0; 2746 } 2747 2748 static int enetc_setup_xdp_prog(struct net_device *ndev, struct bpf_prog *prog, 2749 struct netlink_ext_ack *extack) 2750 { 2751 int num_xdp_tx_queues = prog ? num_possible_cpus() : 0; 2752 struct enetc_ndev_priv *priv = netdev_priv(ndev); 2753 bool extended; 2754 2755 if (priv->min_num_stack_tx_queues + num_xdp_tx_queues > 2756 priv->num_tx_rings) { 2757 NL_SET_ERR_MSG_FMT_MOD(extack, 2758 "Reserving %d XDP TXQs does not leave a minimum of %d TXQs for network stack (total %d available)", 2759 num_xdp_tx_queues, 2760 priv->min_num_stack_tx_queues, 2761 priv->num_tx_rings); 2762 return -EBUSY; 2763 } 2764 2765 extended = !!(priv->active_offloads & ENETC_F_RX_TSTAMP); 2766 2767 /* The buffer layout is changing, so we need to drain the old 2768 * RX buffers and seed new ones. 2769 */ 2770 return enetc_reconfigure(priv, extended, enetc_reconfigure_xdp_cb, prog); 2771 } 2772 2773 int enetc_setup_bpf(struct net_device *ndev, struct netdev_bpf *bpf) 2774 { 2775 switch (bpf->command) { 2776 case XDP_SETUP_PROG: 2777 return enetc_setup_xdp_prog(ndev, bpf->prog, bpf->extack); 2778 default: 2779 return -EINVAL; 2780 } 2781 2782 return 0; 2783 } 2784 EXPORT_SYMBOL_GPL(enetc_setup_bpf); 2785 2786 struct net_device_stats *enetc_get_stats(struct net_device *ndev) 2787 { 2788 struct enetc_ndev_priv *priv = netdev_priv(ndev); 2789 struct net_device_stats *stats = &ndev->stats; 2790 unsigned long packets = 0, bytes = 0; 2791 unsigned long tx_dropped = 0; 2792 int i; 2793 2794 for (i = 0; i < priv->num_rx_rings; i++) { 2795 packets += priv->rx_ring[i]->stats.packets; 2796 bytes += priv->rx_ring[i]->stats.bytes; 2797 } 2798 2799 stats->rx_packets = packets; 2800 stats->rx_bytes = bytes; 2801 bytes = 0; 2802 packets = 0; 2803 2804 for (i = 0; i < priv->num_tx_rings; i++) { 2805 packets += priv->tx_ring[i]->stats.packets; 2806 bytes += priv->tx_ring[i]->stats.bytes; 2807 tx_dropped += priv->tx_ring[i]->stats.win_drop; 2808 } 2809 2810 stats->tx_packets = packets; 2811 stats->tx_bytes = bytes; 2812 stats->tx_dropped = tx_dropped; 2813 2814 return stats; 2815 } 2816 EXPORT_SYMBOL_GPL(enetc_get_stats); 2817 2818 static int enetc_set_rss(struct net_device *ndev, int en) 2819 { 2820 struct enetc_ndev_priv *priv = netdev_priv(ndev); 2821 struct enetc_hw *hw = &priv->si->hw; 2822 u32 reg; 2823 2824 enetc_wr(hw, ENETC_SIRBGCR, priv->num_rx_rings); 2825 2826 reg = enetc_rd(hw, ENETC_SIMR); 2827 reg &= ~ENETC_SIMR_RSSE; 2828 reg |= (en) ? ENETC_SIMR_RSSE : 0; 2829 enetc_wr(hw, ENETC_SIMR, reg); 2830 2831 return 0; 2832 } 2833 2834 static void enetc_enable_rxvlan(struct net_device *ndev, bool en) 2835 { 2836 struct enetc_ndev_priv *priv = netdev_priv(ndev); 2837 struct enetc_hw *hw = &priv->si->hw; 2838 int i; 2839 2840 for (i = 0; i < priv->num_rx_rings; i++) 2841 enetc_bdr_enable_rxvlan(hw, i, en); 2842 } 2843 2844 static void enetc_enable_txvlan(struct net_device *ndev, bool en) 2845 { 2846 struct enetc_ndev_priv *priv = netdev_priv(ndev); 2847 struct enetc_hw *hw = &priv->si->hw; 2848 int i; 2849 2850 for (i = 0; i < priv->num_tx_rings; i++) 2851 enetc_bdr_enable_txvlan(hw, i, en); 2852 } 2853 2854 void enetc_set_features(struct net_device *ndev, netdev_features_t features) 2855 { 2856 netdev_features_t changed = ndev->features ^ features; 2857 2858 if (changed & NETIF_F_RXHASH) 2859 enetc_set_rss(ndev, !!(features & NETIF_F_RXHASH)); 2860 2861 if (changed & NETIF_F_HW_VLAN_CTAG_RX) 2862 enetc_enable_rxvlan(ndev, 2863 !!(features & NETIF_F_HW_VLAN_CTAG_RX)); 2864 2865 if (changed & NETIF_F_HW_VLAN_CTAG_TX) 2866 enetc_enable_txvlan(ndev, 2867 !!(features & NETIF_F_HW_VLAN_CTAG_TX)); 2868 } 2869 EXPORT_SYMBOL_GPL(enetc_set_features); 2870 2871 #ifdef CONFIG_FSL_ENETC_PTP_CLOCK 2872 static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr) 2873 { 2874 struct enetc_ndev_priv *priv = netdev_priv(ndev); 2875 int err, new_offloads = priv->active_offloads; 2876 struct hwtstamp_config config; 2877 2878 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 2879 return -EFAULT; 2880 2881 switch (config.tx_type) { 2882 case HWTSTAMP_TX_OFF: 2883 new_offloads &= ~ENETC_F_TX_TSTAMP_MASK; 2884 break; 2885 case HWTSTAMP_TX_ON: 2886 new_offloads &= ~ENETC_F_TX_TSTAMP_MASK; 2887 new_offloads |= ENETC_F_TX_TSTAMP; 2888 break; 2889 case HWTSTAMP_TX_ONESTEP_SYNC: 2890 new_offloads &= ~ENETC_F_TX_TSTAMP_MASK; 2891 new_offloads |= ENETC_F_TX_ONESTEP_SYNC_TSTAMP; 2892 break; 2893 default: 2894 return -ERANGE; 2895 } 2896 2897 switch (config.rx_filter) { 2898 case HWTSTAMP_FILTER_NONE: 2899 new_offloads &= ~ENETC_F_RX_TSTAMP; 2900 break; 2901 default: 2902 new_offloads |= ENETC_F_RX_TSTAMP; 2903 config.rx_filter = HWTSTAMP_FILTER_ALL; 2904 } 2905 2906 if ((new_offloads ^ priv->active_offloads) & ENETC_F_RX_TSTAMP) { 2907 bool extended = !!(new_offloads & ENETC_F_RX_TSTAMP); 2908 2909 err = enetc_reconfigure(priv, extended, NULL, NULL); 2910 if (err) 2911 return err; 2912 } 2913 2914 priv->active_offloads = new_offloads; 2915 2916 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? 2917 -EFAULT : 0; 2918 } 2919 2920 static int enetc_hwtstamp_get(struct net_device *ndev, struct ifreq *ifr) 2921 { 2922 struct enetc_ndev_priv *priv = netdev_priv(ndev); 2923 struct hwtstamp_config config; 2924 2925 config.flags = 0; 2926 2927 if (priv->active_offloads & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) 2928 config.tx_type = HWTSTAMP_TX_ONESTEP_SYNC; 2929 else if (priv->active_offloads & ENETC_F_TX_TSTAMP) 2930 config.tx_type = HWTSTAMP_TX_ON; 2931 else 2932 config.tx_type = HWTSTAMP_TX_OFF; 2933 2934 config.rx_filter = (priv->active_offloads & ENETC_F_RX_TSTAMP) ? 2935 HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE; 2936 2937 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? 2938 -EFAULT : 0; 2939 } 2940 #endif 2941 2942 int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) 2943 { 2944 struct enetc_ndev_priv *priv = netdev_priv(ndev); 2945 #ifdef CONFIG_FSL_ENETC_PTP_CLOCK 2946 if (cmd == SIOCSHWTSTAMP) 2947 return enetc_hwtstamp_set(ndev, rq); 2948 if (cmd == SIOCGHWTSTAMP) 2949 return enetc_hwtstamp_get(ndev, rq); 2950 #endif 2951 2952 if (!priv->phylink) 2953 return -EOPNOTSUPP; 2954 2955 return phylink_mii_ioctl(priv->phylink, rq, cmd); 2956 } 2957 EXPORT_SYMBOL_GPL(enetc_ioctl); 2958 2959 int enetc_alloc_msix(struct enetc_ndev_priv *priv) 2960 { 2961 struct pci_dev *pdev = priv->si->pdev; 2962 int num_stack_tx_queues; 2963 int first_xdp_tx_ring; 2964 int i, n, err, nvec; 2965 int v_tx_rings; 2966 2967 nvec = ENETC_BDR_INT_BASE_IDX + priv->bdr_int_num; 2968 /* allocate MSIX for both messaging and Rx/Tx interrupts */ 2969 n = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX); 2970 2971 if (n < 0) 2972 return n; 2973 2974 if (n != nvec) 2975 return -EPERM; 2976 2977 /* # of tx rings per int vector */ 2978 v_tx_rings = priv->num_tx_rings / priv->bdr_int_num; 2979 2980 for (i = 0; i < priv->bdr_int_num; i++) { 2981 struct enetc_int_vector *v; 2982 struct enetc_bdr *bdr; 2983 int j; 2984 2985 v = kzalloc(struct_size(v, tx_ring, v_tx_rings), GFP_KERNEL); 2986 if (!v) { 2987 err = -ENOMEM; 2988 goto fail; 2989 } 2990 2991 priv->int_vector[i] = v; 2992 2993 bdr = &v->rx_ring; 2994 bdr->index = i; 2995 bdr->ndev = priv->ndev; 2996 bdr->dev = priv->dev; 2997 bdr->bd_count = priv->rx_bd_count; 2998 bdr->buffer_offset = ENETC_RXB_PAD; 2999 priv->rx_ring[i] = bdr; 3000 3001 err = xdp_rxq_info_reg(&bdr->xdp.rxq, priv->ndev, i, 0); 3002 if (err) { 3003 kfree(v); 3004 goto fail; 3005 } 3006 3007 err = xdp_rxq_info_reg_mem_model(&bdr->xdp.rxq, 3008 MEM_TYPE_PAGE_SHARED, NULL); 3009 if (err) { 3010 xdp_rxq_info_unreg(&bdr->xdp.rxq); 3011 kfree(v); 3012 goto fail; 3013 } 3014 3015 /* init defaults for adaptive IC */ 3016 if (priv->ic_mode & ENETC_IC_RX_ADAPTIVE) { 3017 v->rx_ictt = 0x1; 3018 v->rx_dim_en = true; 3019 } 3020 INIT_WORK(&v->rx_dim.work, enetc_rx_dim_work); 3021 netif_napi_add(priv->ndev, &v->napi, enetc_poll); 3022 v->count_tx_rings = v_tx_rings; 3023 3024 for (j = 0; j < v_tx_rings; j++) { 3025 int idx; 3026 3027 /* default tx ring mapping policy */ 3028 idx = priv->bdr_int_num * j + i; 3029 __set_bit(idx, &v->tx_rings_map); 3030 bdr = &v->tx_ring[j]; 3031 bdr->index = idx; 3032 bdr->ndev = priv->ndev; 3033 bdr->dev = priv->dev; 3034 bdr->bd_count = priv->tx_bd_count; 3035 priv->tx_ring[idx] = bdr; 3036 } 3037 } 3038 3039 num_stack_tx_queues = enetc_num_stack_tx_queues(priv); 3040 3041 err = netif_set_real_num_tx_queues(priv->ndev, num_stack_tx_queues); 3042 if (err) 3043 goto fail; 3044 3045 err = netif_set_real_num_rx_queues(priv->ndev, priv->num_rx_rings); 3046 if (err) 3047 goto fail; 3048 3049 priv->min_num_stack_tx_queues = num_possible_cpus(); 3050 first_xdp_tx_ring = priv->num_tx_rings - num_possible_cpus(); 3051 priv->xdp_tx_ring = &priv->tx_ring[first_xdp_tx_ring]; 3052 3053 return 0; 3054 3055 fail: 3056 while (i--) { 3057 struct enetc_int_vector *v = priv->int_vector[i]; 3058 struct enetc_bdr *rx_ring = &v->rx_ring; 3059 3060 xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq); 3061 xdp_rxq_info_unreg(&rx_ring->xdp.rxq); 3062 netif_napi_del(&v->napi); 3063 cancel_work_sync(&v->rx_dim.work); 3064 kfree(v); 3065 } 3066 3067 pci_free_irq_vectors(pdev); 3068 3069 return err; 3070 } 3071 EXPORT_SYMBOL_GPL(enetc_alloc_msix); 3072 3073 void enetc_free_msix(struct enetc_ndev_priv *priv) 3074 { 3075 int i; 3076 3077 for (i = 0; i < priv->bdr_int_num; i++) { 3078 struct enetc_int_vector *v = priv->int_vector[i]; 3079 struct enetc_bdr *rx_ring = &v->rx_ring; 3080 3081 xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq); 3082 xdp_rxq_info_unreg(&rx_ring->xdp.rxq); 3083 netif_napi_del(&v->napi); 3084 cancel_work_sync(&v->rx_dim.work); 3085 } 3086 3087 for (i = 0; i < priv->num_rx_rings; i++) 3088 priv->rx_ring[i] = NULL; 3089 3090 for (i = 0; i < priv->num_tx_rings; i++) 3091 priv->tx_ring[i] = NULL; 3092 3093 for (i = 0; i < priv->bdr_int_num; i++) { 3094 kfree(priv->int_vector[i]); 3095 priv->int_vector[i] = NULL; 3096 } 3097 3098 /* disable all MSIX for this device */ 3099 pci_free_irq_vectors(priv->si->pdev); 3100 } 3101 EXPORT_SYMBOL_GPL(enetc_free_msix); 3102 3103 static void enetc_kfree_si(struct enetc_si *si) 3104 { 3105 char *p = (char *)si - si->pad; 3106 3107 kfree(p); 3108 } 3109 3110 static void enetc_detect_errata(struct enetc_si *si) 3111 { 3112 if (si->pdev->revision == ENETC_REV1) 3113 si->errata = ENETC_ERR_VLAN_ISOL | ENETC_ERR_UCMCSWP; 3114 } 3115 3116 int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv) 3117 { 3118 struct enetc_si *si, *p; 3119 struct enetc_hw *hw; 3120 size_t alloc_size; 3121 int err, len; 3122 3123 pcie_flr(pdev); 3124 err = pci_enable_device_mem(pdev); 3125 if (err) 3126 return dev_err_probe(&pdev->dev, err, "device enable failed\n"); 3127 3128 /* set up for high or low dma */ 3129 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 3130 if (err) { 3131 dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err); 3132 goto err_dma; 3133 } 3134 3135 err = pci_request_mem_regions(pdev, name); 3136 if (err) { 3137 dev_err(&pdev->dev, "pci_request_regions failed err=%d\n", err); 3138 goto err_pci_mem_reg; 3139 } 3140 3141 pci_set_master(pdev); 3142 3143 alloc_size = sizeof(struct enetc_si); 3144 if (sizeof_priv) { 3145 /* align priv to 32B */ 3146 alloc_size = ALIGN(alloc_size, ENETC_SI_ALIGN); 3147 alloc_size += sizeof_priv; 3148 } 3149 /* force 32B alignment for enetc_si */ 3150 alloc_size += ENETC_SI_ALIGN - 1; 3151 3152 p = kzalloc(alloc_size, GFP_KERNEL); 3153 if (!p) { 3154 err = -ENOMEM; 3155 goto err_alloc_si; 3156 } 3157 3158 si = PTR_ALIGN(p, ENETC_SI_ALIGN); 3159 si->pad = (char *)si - (char *)p; 3160 3161 pci_set_drvdata(pdev, si); 3162 si->pdev = pdev; 3163 hw = &si->hw; 3164 3165 len = pci_resource_len(pdev, ENETC_BAR_REGS); 3166 hw->reg = ioremap(pci_resource_start(pdev, ENETC_BAR_REGS), len); 3167 if (!hw->reg) { 3168 err = -ENXIO; 3169 dev_err(&pdev->dev, "ioremap() failed\n"); 3170 goto err_ioremap; 3171 } 3172 if (len > ENETC_PORT_BASE) 3173 hw->port = hw->reg + ENETC_PORT_BASE; 3174 if (len > ENETC_GLOBAL_BASE) 3175 hw->global = hw->reg + ENETC_GLOBAL_BASE; 3176 3177 enetc_detect_errata(si); 3178 3179 return 0; 3180 3181 err_ioremap: 3182 enetc_kfree_si(si); 3183 err_alloc_si: 3184 pci_release_mem_regions(pdev); 3185 err_pci_mem_reg: 3186 err_dma: 3187 pci_disable_device(pdev); 3188 3189 return err; 3190 } 3191 EXPORT_SYMBOL_GPL(enetc_pci_probe); 3192 3193 void enetc_pci_remove(struct pci_dev *pdev) 3194 { 3195 struct enetc_si *si = pci_get_drvdata(pdev); 3196 struct enetc_hw *hw = &si->hw; 3197 3198 iounmap(hw->reg); 3199 enetc_kfree_si(si); 3200 pci_release_mem_regions(pdev); 3201 pci_disable_device(pdev); 3202 } 3203 EXPORT_SYMBOL_GPL(enetc_pci_remove); 3204 3205 MODULE_LICENSE("Dual BSD/GPL"); 3206