1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) 2 /* Copyright 2017-2019 NXP */ 3 4 #include "enetc.h" 5 #include <linux/tcp.h> 6 #include <linux/udp.h> 7 #include <linux/of_mdio.h> 8 #include <linux/vmalloc.h> 9 10 /* ENETC overhead: optional extension BD + 1 BD gap */ 11 #define ENETC_TXBDS_NEEDED(val) ((val) + 2) 12 /* max # of chained Tx BDs is 15, including head and extension BD */ 13 #define ENETC_MAX_SKB_FRAGS 13 14 #define ENETC_TXBDS_MAX_NEEDED ENETC_TXBDS_NEEDED(ENETC_MAX_SKB_FRAGS + 1) 15 16 static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb, 17 int active_offloads); 18 19 netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev) 20 { 21 struct enetc_ndev_priv *priv = netdev_priv(ndev); 22 struct enetc_bdr *tx_ring; 23 int count; 24 25 tx_ring = priv->tx_ring[skb->queue_mapping]; 26 27 if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS)) 28 if (unlikely(skb_linearize(skb))) 29 goto drop_packet_err; 30 31 count = skb_shinfo(skb)->nr_frags + 1; /* fragments + head */ 32 if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(count)) { 33 netif_stop_subqueue(ndev, tx_ring->index); 34 return NETDEV_TX_BUSY; 35 } 36 37 count = enetc_map_tx_buffs(tx_ring, skb, priv->active_offloads); 38 if (unlikely(!count)) 39 goto drop_packet_err; 40 41 if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED) 42 netif_stop_subqueue(ndev, tx_ring->index); 43 44 return NETDEV_TX_OK; 45 46 drop_packet_err: 47 dev_kfree_skb_any(skb); 48 return NETDEV_TX_OK; 49 } 50 51 static bool enetc_tx_csum(struct sk_buff *skb, union enetc_tx_bd *txbd) 52 { 53 int l3_start, l3_hsize; 54 u16 l3_flags, l4_flags; 55 56 if (skb->ip_summed != CHECKSUM_PARTIAL) 57 return false; 58 59 switch (skb->csum_offset) { 60 case offsetof(struct tcphdr, check): 61 l4_flags = ENETC_TXBD_L4_TCP; 62 break; 63 case offsetof(struct udphdr, check): 64 l4_flags = ENETC_TXBD_L4_UDP; 65 break; 66 default: 67 skb_checksum_help(skb); 68 return false; 69 } 70 71 l3_start = skb_network_offset(skb); 72 l3_hsize = skb_network_header_len(skb); 73 74 l3_flags = 0; 75 if (skb->protocol == htons(ETH_P_IPV6)) 76 l3_flags = ENETC_TXBD_L3_IPV6; 77 78 /* write BD fields */ 79 txbd->l3_csoff = enetc_txbd_l3_csoff(l3_start, l3_hsize, l3_flags); 80 txbd->l4_csoff = l4_flags; 81 82 return true; 83 } 84 85 static void enetc_unmap_tx_buff(struct enetc_bdr *tx_ring, 86 struct enetc_tx_swbd *tx_swbd) 87 { 88 if (tx_swbd->is_dma_page) 89 dma_unmap_page(tx_ring->dev, tx_swbd->dma, 90 tx_swbd->len, DMA_TO_DEVICE); 91 else 92 dma_unmap_single(tx_ring->dev, tx_swbd->dma, 93 tx_swbd->len, DMA_TO_DEVICE); 94 tx_swbd->dma = 0; 95 } 96 97 static void enetc_free_tx_skb(struct enetc_bdr *tx_ring, 98 struct enetc_tx_swbd *tx_swbd) 99 { 100 if (tx_swbd->dma) 101 enetc_unmap_tx_buff(tx_ring, tx_swbd); 102 103 if (tx_swbd->skb) { 104 dev_kfree_skb_any(tx_swbd->skb); 105 tx_swbd->skb = NULL; 106 } 107 } 108 109 static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb, 110 int active_offloads) 111 { 112 struct enetc_tx_swbd *tx_swbd; 113 skb_frag_t *frag; 114 int len = skb_headlen(skb); 115 union enetc_tx_bd temp_bd; 116 union enetc_tx_bd *txbd; 117 bool do_vlan, do_tstamp; 118 int i, count = 0; 119 unsigned int f; 120 dma_addr_t dma; 121 u8 flags = 0; 122 123 i = tx_ring->next_to_use; 124 txbd = ENETC_TXBD(*tx_ring, i); 125 prefetchw(txbd); 126 127 dma = dma_map_single(tx_ring->dev, skb->data, len, DMA_TO_DEVICE); 128 if (unlikely(dma_mapping_error(tx_ring->dev, dma))) 129 goto dma_err; 130 131 temp_bd.addr = cpu_to_le64(dma); 132 temp_bd.buf_len = cpu_to_le16(len); 133 temp_bd.lstatus = 0; 134 135 tx_swbd = &tx_ring->tx_swbd[i]; 136 tx_swbd->dma = dma; 137 tx_swbd->len = len; 138 tx_swbd->is_dma_page = 0; 139 count++; 140 141 do_vlan = skb_vlan_tag_present(skb); 142 do_tstamp = (active_offloads & ENETC_F_TX_TSTAMP) && 143 (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP); 144 tx_swbd->do_tstamp = do_tstamp; 145 tx_swbd->check_wb = tx_swbd->do_tstamp; 146 147 if (do_vlan || do_tstamp) 148 flags |= ENETC_TXBD_FLAGS_EX; 149 150 if (enetc_tx_csum(skb, &temp_bd)) 151 flags |= ENETC_TXBD_FLAGS_CSUM | ENETC_TXBD_FLAGS_L4CS; 152 else if (tx_ring->tsd_enable) 153 flags |= ENETC_TXBD_FLAGS_TSE | ENETC_TXBD_FLAGS_TXSTART; 154 155 /* first BD needs frm_len and offload flags set */ 156 temp_bd.frm_len = cpu_to_le16(skb->len); 157 temp_bd.flags = flags; 158 159 if (flags & ENETC_TXBD_FLAGS_TSE) { 160 u32 temp; 161 162 temp = (skb->skb_mstamp_ns >> 5 & ENETC_TXBD_TXSTART_MASK) 163 | (flags << ENETC_TXBD_FLAGS_OFFSET); 164 temp_bd.txstart = cpu_to_le32(temp); 165 } 166 167 if (flags & ENETC_TXBD_FLAGS_EX) { 168 u8 e_flags = 0; 169 *txbd = temp_bd; 170 enetc_clear_tx_bd(&temp_bd); 171 172 /* add extension BD for VLAN and/or timestamping */ 173 flags = 0; 174 tx_swbd++; 175 txbd++; 176 i++; 177 if (unlikely(i == tx_ring->bd_count)) { 178 i = 0; 179 tx_swbd = tx_ring->tx_swbd; 180 txbd = ENETC_TXBD(*tx_ring, 0); 181 } 182 prefetchw(txbd); 183 184 if (do_vlan) { 185 temp_bd.ext.vid = cpu_to_le16(skb_vlan_tag_get(skb)); 186 temp_bd.ext.tpid = 0; /* < C-TAG */ 187 e_flags |= ENETC_TXBD_E_FLAGS_VLAN_INS; 188 } 189 190 if (do_tstamp) { 191 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 192 e_flags |= ENETC_TXBD_E_FLAGS_TWO_STEP_PTP; 193 } 194 195 temp_bd.ext.e_flags = e_flags; 196 count++; 197 } 198 199 frag = &skb_shinfo(skb)->frags[0]; 200 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++, frag++) { 201 len = skb_frag_size(frag); 202 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, len, 203 DMA_TO_DEVICE); 204 if (dma_mapping_error(tx_ring->dev, dma)) 205 goto dma_err; 206 207 *txbd = temp_bd; 208 enetc_clear_tx_bd(&temp_bd); 209 210 flags = 0; 211 tx_swbd++; 212 txbd++; 213 i++; 214 if (unlikely(i == tx_ring->bd_count)) { 215 i = 0; 216 tx_swbd = tx_ring->tx_swbd; 217 txbd = ENETC_TXBD(*tx_ring, 0); 218 } 219 prefetchw(txbd); 220 221 temp_bd.addr = cpu_to_le64(dma); 222 temp_bd.buf_len = cpu_to_le16(len); 223 224 tx_swbd->dma = dma; 225 tx_swbd->len = len; 226 tx_swbd->is_dma_page = 1; 227 count++; 228 } 229 230 /* last BD needs 'F' bit set */ 231 flags |= ENETC_TXBD_FLAGS_F; 232 temp_bd.flags = flags; 233 *txbd = temp_bd; 234 235 tx_ring->tx_swbd[i].skb = skb; 236 237 enetc_bdr_idx_inc(tx_ring, &i); 238 tx_ring->next_to_use = i; 239 240 skb_tx_timestamp(skb); 241 242 /* let H/W know BD ring has been updated */ 243 enetc_wr_reg(tx_ring->tpir, i); /* includes wmb() */ 244 245 return count; 246 247 dma_err: 248 dev_err(tx_ring->dev, "DMA map error"); 249 250 do { 251 tx_swbd = &tx_ring->tx_swbd[i]; 252 enetc_free_tx_skb(tx_ring, tx_swbd); 253 if (i == 0) 254 i = tx_ring->bd_count; 255 i--; 256 } while (count--); 257 258 return 0; 259 } 260 261 static irqreturn_t enetc_msix(int irq, void *data) 262 { 263 struct enetc_int_vector *v = data; 264 int i; 265 266 /* disable interrupts */ 267 enetc_wr_reg(v->rbier, 0); 268 269 for_each_set_bit(i, &v->tx_rings_map, v->count_tx_rings) 270 enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i), 0); 271 272 napi_schedule_irqoff(&v->napi); 273 274 return IRQ_HANDLED; 275 } 276 277 static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget); 278 static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring, 279 struct napi_struct *napi, int work_limit); 280 281 static int enetc_poll(struct napi_struct *napi, int budget) 282 { 283 struct enetc_int_vector 284 *v = container_of(napi, struct enetc_int_vector, napi); 285 bool complete = true; 286 int work_done; 287 int i; 288 289 for (i = 0; i < v->count_tx_rings; i++) 290 if (!enetc_clean_tx_ring(&v->tx_ring[i], budget)) 291 complete = false; 292 293 work_done = enetc_clean_rx_ring(&v->rx_ring, napi, budget); 294 if (work_done == budget) 295 complete = false; 296 297 if (!complete) 298 return budget; 299 300 napi_complete_done(napi, work_done); 301 302 /* enable interrupts */ 303 enetc_wr_reg(v->rbier, ENETC_RBIER_RXTIE); 304 305 for_each_set_bit(i, &v->tx_rings_map, v->count_tx_rings) 306 enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i), 307 ENETC_TBIER_TXTIE); 308 309 return work_done; 310 } 311 312 static int enetc_bd_ready_count(struct enetc_bdr *tx_ring, int ci) 313 { 314 int pi = enetc_rd_reg(tx_ring->tcir) & ENETC_TBCIR_IDX_MASK; 315 316 return pi >= ci ? pi - ci : tx_ring->bd_count - ci + pi; 317 } 318 319 static void enetc_get_tx_tstamp(struct enetc_hw *hw, union enetc_tx_bd *txbd, 320 u64 *tstamp) 321 { 322 u32 lo, hi, tstamp_lo; 323 324 lo = enetc_rd(hw, ENETC_SICTR0); 325 hi = enetc_rd(hw, ENETC_SICTR1); 326 tstamp_lo = le32_to_cpu(txbd->wb.tstamp); 327 if (lo <= tstamp_lo) 328 hi -= 1; 329 *tstamp = (u64)hi << 32 | tstamp_lo; 330 } 331 332 static void enetc_tstamp_tx(struct sk_buff *skb, u64 tstamp) 333 { 334 struct skb_shared_hwtstamps shhwtstamps; 335 336 if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) { 337 memset(&shhwtstamps, 0, sizeof(shhwtstamps)); 338 shhwtstamps.hwtstamp = ns_to_ktime(tstamp); 339 skb_tstamp_tx(skb, &shhwtstamps); 340 } 341 } 342 343 static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget) 344 { 345 struct net_device *ndev = tx_ring->ndev; 346 int tx_frm_cnt = 0, tx_byte_cnt = 0; 347 struct enetc_tx_swbd *tx_swbd; 348 int i, bds_to_clean; 349 bool do_tstamp; 350 u64 tstamp = 0; 351 352 i = tx_ring->next_to_clean; 353 tx_swbd = &tx_ring->tx_swbd[i]; 354 bds_to_clean = enetc_bd_ready_count(tx_ring, i); 355 356 do_tstamp = false; 357 358 while (bds_to_clean && tx_frm_cnt < ENETC_DEFAULT_TX_WORK) { 359 bool is_eof = !!tx_swbd->skb; 360 361 if (unlikely(tx_swbd->check_wb)) { 362 struct enetc_ndev_priv *priv = netdev_priv(ndev); 363 union enetc_tx_bd *txbd; 364 365 txbd = ENETC_TXBD(*tx_ring, i); 366 367 if (txbd->flags & ENETC_TXBD_FLAGS_W && 368 tx_swbd->do_tstamp) { 369 enetc_get_tx_tstamp(&priv->si->hw, txbd, 370 &tstamp); 371 do_tstamp = true; 372 } 373 } 374 375 if (likely(tx_swbd->dma)) 376 enetc_unmap_tx_buff(tx_ring, tx_swbd); 377 378 if (is_eof) { 379 if (unlikely(do_tstamp)) { 380 enetc_tstamp_tx(tx_swbd->skb, tstamp); 381 do_tstamp = false; 382 } 383 napi_consume_skb(tx_swbd->skb, napi_budget); 384 tx_swbd->skb = NULL; 385 } 386 387 tx_byte_cnt += tx_swbd->len; 388 389 bds_to_clean--; 390 tx_swbd++; 391 i++; 392 if (unlikely(i == tx_ring->bd_count)) { 393 i = 0; 394 tx_swbd = tx_ring->tx_swbd; 395 } 396 397 /* BD iteration loop end */ 398 if (is_eof) { 399 tx_frm_cnt++; 400 /* re-arm interrupt source */ 401 enetc_wr_reg(tx_ring->idr, BIT(tx_ring->index) | 402 BIT(16 + tx_ring->index)); 403 } 404 405 if (unlikely(!bds_to_clean)) 406 bds_to_clean = enetc_bd_ready_count(tx_ring, i); 407 } 408 409 tx_ring->next_to_clean = i; 410 tx_ring->stats.packets += tx_frm_cnt; 411 tx_ring->stats.bytes += tx_byte_cnt; 412 413 if (unlikely(tx_frm_cnt && netif_carrier_ok(ndev) && 414 __netif_subqueue_stopped(ndev, tx_ring->index) && 415 (enetc_bd_unused(tx_ring) >= ENETC_TXBDS_MAX_NEEDED))) { 416 netif_wake_subqueue(ndev, tx_ring->index); 417 } 418 419 return tx_frm_cnt != ENETC_DEFAULT_TX_WORK; 420 } 421 422 static bool enetc_new_page(struct enetc_bdr *rx_ring, 423 struct enetc_rx_swbd *rx_swbd) 424 { 425 struct page *page; 426 dma_addr_t addr; 427 428 page = dev_alloc_page(); 429 if (unlikely(!page)) 430 return false; 431 432 addr = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); 433 if (unlikely(dma_mapping_error(rx_ring->dev, addr))) { 434 __free_page(page); 435 436 return false; 437 } 438 439 rx_swbd->dma = addr; 440 rx_swbd->page = page; 441 rx_swbd->page_offset = ENETC_RXB_PAD; 442 443 return true; 444 } 445 446 static int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt) 447 { 448 struct enetc_rx_swbd *rx_swbd; 449 union enetc_rx_bd *rxbd; 450 int i, j; 451 452 i = rx_ring->next_to_use; 453 rx_swbd = &rx_ring->rx_swbd[i]; 454 rxbd = enetc_rxbd(rx_ring, i); 455 456 for (j = 0; j < buff_cnt; j++) { 457 /* try reuse page */ 458 if (unlikely(!rx_swbd->page)) { 459 if (unlikely(!enetc_new_page(rx_ring, rx_swbd))) { 460 rx_ring->stats.rx_alloc_errs++; 461 break; 462 } 463 } 464 465 /* update RxBD */ 466 rxbd->w.addr = cpu_to_le64(rx_swbd->dma + 467 rx_swbd->page_offset); 468 /* clear 'R" as well */ 469 rxbd->r.lstatus = 0; 470 471 rxbd = enetc_rxbd_next(rx_ring, rxbd, i); 472 rx_swbd++; 473 i++; 474 if (unlikely(i == rx_ring->bd_count)) { 475 i = 0; 476 rx_swbd = rx_ring->rx_swbd; 477 } 478 } 479 480 if (likely(j)) { 481 rx_ring->next_to_alloc = i; /* keep track from page reuse */ 482 rx_ring->next_to_use = i; 483 /* update ENETC's consumer index */ 484 enetc_wr_reg(rx_ring->rcir, i); 485 } 486 487 return j; 488 } 489 490 #ifdef CONFIG_FSL_ENETC_PTP_CLOCK 491 static void enetc_get_rx_tstamp(struct net_device *ndev, 492 union enetc_rx_bd *rxbd, 493 struct sk_buff *skb) 494 { 495 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); 496 struct enetc_ndev_priv *priv = netdev_priv(ndev); 497 struct enetc_hw *hw = &priv->si->hw; 498 u32 lo, hi, tstamp_lo; 499 u64 tstamp; 500 501 if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_TSTMP) { 502 lo = enetc_rd(hw, ENETC_SICTR0); 503 hi = enetc_rd(hw, ENETC_SICTR1); 504 rxbd = enetc_rxbd_ext(rxbd); 505 tstamp_lo = le32_to_cpu(rxbd->ext.tstamp); 506 if (lo <= tstamp_lo) 507 hi -= 1; 508 509 tstamp = (u64)hi << 32 | tstamp_lo; 510 memset(shhwtstamps, 0, sizeof(*shhwtstamps)); 511 shhwtstamps->hwtstamp = ns_to_ktime(tstamp); 512 } 513 } 514 #endif 515 516 static void enetc_get_offloads(struct enetc_bdr *rx_ring, 517 union enetc_rx_bd *rxbd, struct sk_buff *skb) 518 { 519 #ifdef CONFIG_FSL_ENETC_PTP_CLOCK 520 struct enetc_ndev_priv *priv = netdev_priv(rx_ring->ndev); 521 #endif 522 /* TODO: hashing */ 523 if (rx_ring->ndev->features & NETIF_F_RXCSUM) { 524 u16 inet_csum = le16_to_cpu(rxbd->r.inet_csum); 525 526 skb->csum = csum_unfold((__force __sum16)~htons(inet_csum)); 527 skb->ip_summed = CHECKSUM_COMPLETE; 528 } 529 530 /* copy VLAN to skb, if one is extracted, for now we assume it's a 531 * standard TPID, but HW also supports custom values 532 */ 533 if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_VLAN) 534 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 535 le16_to_cpu(rxbd->r.vlan_opt)); 536 #ifdef CONFIG_FSL_ENETC_PTP_CLOCK 537 if (priv->active_offloads & ENETC_F_RX_TSTAMP) 538 enetc_get_rx_tstamp(rx_ring->ndev, rxbd, skb); 539 #endif 540 } 541 542 static void enetc_process_skb(struct enetc_bdr *rx_ring, 543 struct sk_buff *skb) 544 { 545 skb_record_rx_queue(skb, rx_ring->index); 546 skb->protocol = eth_type_trans(skb, rx_ring->ndev); 547 } 548 549 static bool enetc_page_reusable(struct page *page) 550 { 551 return (!page_is_pfmemalloc(page) && page_ref_count(page) == 1); 552 } 553 554 static void enetc_reuse_page(struct enetc_bdr *rx_ring, 555 struct enetc_rx_swbd *old) 556 { 557 struct enetc_rx_swbd *new; 558 559 new = &rx_ring->rx_swbd[rx_ring->next_to_alloc]; 560 561 /* next buf that may reuse a page */ 562 enetc_bdr_idx_inc(rx_ring, &rx_ring->next_to_alloc); 563 564 /* copy page reference */ 565 *new = *old; 566 } 567 568 static struct enetc_rx_swbd *enetc_get_rx_buff(struct enetc_bdr *rx_ring, 569 int i, u16 size) 570 { 571 struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i]; 572 573 dma_sync_single_range_for_cpu(rx_ring->dev, rx_swbd->dma, 574 rx_swbd->page_offset, 575 size, DMA_FROM_DEVICE); 576 return rx_swbd; 577 } 578 579 static void enetc_put_rx_buff(struct enetc_bdr *rx_ring, 580 struct enetc_rx_swbd *rx_swbd) 581 { 582 if (likely(enetc_page_reusable(rx_swbd->page))) { 583 rx_swbd->page_offset ^= ENETC_RXB_TRUESIZE; 584 page_ref_inc(rx_swbd->page); 585 586 enetc_reuse_page(rx_ring, rx_swbd); 587 588 /* sync for use by the device */ 589 dma_sync_single_range_for_device(rx_ring->dev, rx_swbd->dma, 590 rx_swbd->page_offset, 591 ENETC_RXB_DMA_SIZE, 592 DMA_FROM_DEVICE); 593 } else { 594 dma_unmap_page(rx_ring->dev, rx_swbd->dma, 595 PAGE_SIZE, DMA_FROM_DEVICE); 596 } 597 598 rx_swbd->page = NULL; 599 } 600 601 static struct sk_buff *enetc_map_rx_buff_to_skb(struct enetc_bdr *rx_ring, 602 int i, u16 size) 603 { 604 struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size); 605 struct sk_buff *skb; 606 void *ba; 607 608 ba = page_address(rx_swbd->page) + rx_swbd->page_offset; 609 skb = build_skb(ba - ENETC_RXB_PAD, ENETC_RXB_TRUESIZE); 610 if (unlikely(!skb)) { 611 rx_ring->stats.rx_alloc_errs++; 612 return NULL; 613 } 614 615 skb_reserve(skb, ENETC_RXB_PAD); 616 __skb_put(skb, size); 617 618 enetc_put_rx_buff(rx_ring, rx_swbd); 619 620 return skb; 621 } 622 623 static void enetc_add_rx_buff_to_skb(struct enetc_bdr *rx_ring, int i, 624 u16 size, struct sk_buff *skb) 625 { 626 struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size); 627 628 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_swbd->page, 629 rx_swbd->page_offset, size, ENETC_RXB_TRUESIZE); 630 631 enetc_put_rx_buff(rx_ring, rx_swbd); 632 } 633 634 #define ENETC_RXBD_BUNDLE 16 /* # of BDs to update at once */ 635 636 static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring, 637 struct napi_struct *napi, int work_limit) 638 { 639 int rx_frm_cnt = 0, rx_byte_cnt = 0; 640 int cleaned_cnt, i; 641 642 cleaned_cnt = enetc_bd_unused(rx_ring); 643 /* next descriptor to process */ 644 i = rx_ring->next_to_clean; 645 646 while (likely(rx_frm_cnt < work_limit)) { 647 union enetc_rx_bd *rxbd; 648 struct sk_buff *skb; 649 u32 bd_status; 650 u16 size; 651 652 if (cleaned_cnt >= ENETC_RXBD_BUNDLE) { 653 int count = enetc_refill_rx_ring(rx_ring, cleaned_cnt); 654 655 cleaned_cnt -= count; 656 } 657 658 rxbd = enetc_rxbd(rx_ring, i); 659 bd_status = le32_to_cpu(rxbd->r.lstatus); 660 if (!bd_status) 661 break; 662 663 enetc_wr_reg(rx_ring->idr, BIT(rx_ring->index)); 664 dma_rmb(); /* for reading other rxbd fields */ 665 size = le16_to_cpu(rxbd->r.buf_len); 666 skb = enetc_map_rx_buff_to_skb(rx_ring, i, size); 667 if (!skb) 668 break; 669 670 enetc_get_offloads(rx_ring, rxbd, skb); 671 672 cleaned_cnt++; 673 674 rxbd = enetc_rxbd_next(rx_ring, rxbd, i); 675 if (unlikely(++i == rx_ring->bd_count)) 676 i = 0; 677 678 if (unlikely(bd_status & 679 ENETC_RXBD_LSTATUS(ENETC_RXBD_ERR_MASK))) { 680 dev_kfree_skb(skb); 681 while (!(bd_status & ENETC_RXBD_LSTATUS_F)) { 682 dma_rmb(); 683 bd_status = le32_to_cpu(rxbd->r.lstatus); 684 685 rxbd = enetc_rxbd_next(rx_ring, rxbd, i); 686 if (unlikely(++i == rx_ring->bd_count)) 687 i = 0; 688 } 689 690 rx_ring->ndev->stats.rx_dropped++; 691 rx_ring->ndev->stats.rx_errors++; 692 693 break; 694 } 695 696 /* not last BD in frame? */ 697 while (!(bd_status & ENETC_RXBD_LSTATUS_F)) { 698 bd_status = le32_to_cpu(rxbd->r.lstatus); 699 size = ENETC_RXB_DMA_SIZE; 700 701 if (bd_status & ENETC_RXBD_LSTATUS_F) { 702 dma_rmb(); 703 size = le16_to_cpu(rxbd->r.buf_len); 704 } 705 706 enetc_add_rx_buff_to_skb(rx_ring, i, size, skb); 707 708 cleaned_cnt++; 709 710 rxbd = enetc_rxbd_next(rx_ring, rxbd, i); 711 if (unlikely(++i == rx_ring->bd_count)) 712 i = 0; 713 } 714 715 rx_byte_cnt += skb->len; 716 717 enetc_process_skb(rx_ring, skb); 718 719 napi_gro_receive(napi, skb); 720 721 rx_frm_cnt++; 722 } 723 724 rx_ring->next_to_clean = i; 725 726 rx_ring->stats.packets += rx_frm_cnt; 727 rx_ring->stats.bytes += rx_byte_cnt; 728 729 return rx_frm_cnt; 730 } 731 732 /* Probing and Init */ 733 #define ENETC_MAX_RFS_SIZE 64 734 void enetc_get_si_caps(struct enetc_si *si) 735 { 736 struct enetc_hw *hw = &si->hw; 737 u32 val; 738 739 /* find out how many of various resources we have to work with */ 740 val = enetc_rd(hw, ENETC_SICAPR0); 741 si->num_rx_rings = (val >> 16) & 0xff; 742 si->num_tx_rings = val & 0xff; 743 744 val = enetc_rd(hw, ENETC_SIRFSCAPR); 745 si->num_fs_entries = ENETC_SIRFSCAPR_GET_NUM_RFS(val); 746 si->num_fs_entries = min(si->num_fs_entries, ENETC_MAX_RFS_SIZE); 747 748 si->num_rss = 0; 749 val = enetc_rd(hw, ENETC_SIPCAPR0); 750 if (val & ENETC_SIPCAPR0_RSS) { 751 u32 rss; 752 753 rss = enetc_rd(hw, ENETC_SIRSSCAPR); 754 si->num_rss = ENETC_SIRSSCAPR_GET_NUM_RSS(rss); 755 } 756 757 if (val & ENETC_SIPCAPR0_QBV) 758 si->hw_features |= ENETC_SI_F_QBV; 759 760 if (val & ENETC_SIPCAPR0_PSFP) 761 si->hw_features |= ENETC_SI_F_PSFP; 762 } 763 764 static int enetc_dma_alloc_bdr(struct enetc_bdr *r, size_t bd_size) 765 { 766 r->bd_base = dma_alloc_coherent(r->dev, r->bd_count * bd_size, 767 &r->bd_dma_base, GFP_KERNEL); 768 if (!r->bd_base) 769 return -ENOMEM; 770 771 /* h/w requires 128B alignment */ 772 if (!IS_ALIGNED(r->bd_dma_base, 128)) { 773 dma_free_coherent(r->dev, r->bd_count * bd_size, r->bd_base, 774 r->bd_dma_base); 775 return -EINVAL; 776 } 777 778 return 0; 779 } 780 781 static int enetc_alloc_txbdr(struct enetc_bdr *txr) 782 { 783 int err; 784 785 txr->tx_swbd = vzalloc(txr->bd_count * sizeof(struct enetc_tx_swbd)); 786 if (!txr->tx_swbd) 787 return -ENOMEM; 788 789 err = enetc_dma_alloc_bdr(txr, sizeof(union enetc_tx_bd)); 790 if (err) { 791 vfree(txr->tx_swbd); 792 return err; 793 } 794 795 txr->next_to_clean = 0; 796 txr->next_to_use = 0; 797 798 return 0; 799 } 800 801 static void enetc_free_txbdr(struct enetc_bdr *txr) 802 { 803 int size, i; 804 805 for (i = 0; i < txr->bd_count; i++) 806 enetc_free_tx_skb(txr, &txr->tx_swbd[i]); 807 808 size = txr->bd_count * sizeof(union enetc_tx_bd); 809 810 dma_free_coherent(txr->dev, size, txr->bd_base, txr->bd_dma_base); 811 txr->bd_base = NULL; 812 813 vfree(txr->tx_swbd); 814 txr->tx_swbd = NULL; 815 } 816 817 static int enetc_alloc_tx_resources(struct enetc_ndev_priv *priv) 818 { 819 int i, err; 820 821 for (i = 0; i < priv->num_tx_rings; i++) { 822 err = enetc_alloc_txbdr(priv->tx_ring[i]); 823 824 if (err) 825 goto fail; 826 } 827 828 return 0; 829 830 fail: 831 while (i-- > 0) 832 enetc_free_txbdr(priv->tx_ring[i]); 833 834 return err; 835 } 836 837 static void enetc_free_tx_resources(struct enetc_ndev_priv *priv) 838 { 839 int i; 840 841 for (i = 0; i < priv->num_tx_rings; i++) 842 enetc_free_txbdr(priv->tx_ring[i]); 843 } 844 845 static int enetc_alloc_rxbdr(struct enetc_bdr *rxr, bool extended) 846 { 847 size_t size = sizeof(union enetc_rx_bd); 848 int err; 849 850 rxr->rx_swbd = vzalloc(rxr->bd_count * sizeof(struct enetc_rx_swbd)); 851 if (!rxr->rx_swbd) 852 return -ENOMEM; 853 854 if (extended) 855 size *= 2; 856 857 err = enetc_dma_alloc_bdr(rxr, size); 858 if (err) { 859 vfree(rxr->rx_swbd); 860 return err; 861 } 862 863 rxr->next_to_clean = 0; 864 rxr->next_to_use = 0; 865 rxr->next_to_alloc = 0; 866 rxr->ext_en = extended; 867 868 return 0; 869 } 870 871 static void enetc_free_rxbdr(struct enetc_bdr *rxr) 872 { 873 int size; 874 875 size = rxr->bd_count * sizeof(union enetc_rx_bd); 876 877 dma_free_coherent(rxr->dev, size, rxr->bd_base, rxr->bd_dma_base); 878 rxr->bd_base = NULL; 879 880 vfree(rxr->rx_swbd); 881 rxr->rx_swbd = NULL; 882 } 883 884 static int enetc_alloc_rx_resources(struct enetc_ndev_priv *priv) 885 { 886 bool extended = !!(priv->active_offloads & ENETC_F_RX_TSTAMP); 887 int i, err; 888 889 for (i = 0; i < priv->num_rx_rings; i++) { 890 err = enetc_alloc_rxbdr(priv->rx_ring[i], extended); 891 892 if (err) 893 goto fail; 894 } 895 896 return 0; 897 898 fail: 899 while (i-- > 0) 900 enetc_free_rxbdr(priv->rx_ring[i]); 901 902 return err; 903 } 904 905 static void enetc_free_rx_resources(struct enetc_ndev_priv *priv) 906 { 907 int i; 908 909 for (i = 0; i < priv->num_rx_rings; i++) 910 enetc_free_rxbdr(priv->rx_ring[i]); 911 } 912 913 static void enetc_free_tx_ring(struct enetc_bdr *tx_ring) 914 { 915 int i; 916 917 if (!tx_ring->tx_swbd) 918 return; 919 920 for (i = 0; i < tx_ring->bd_count; i++) { 921 struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i]; 922 923 enetc_free_tx_skb(tx_ring, tx_swbd); 924 } 925 926 tx_ring->next_to_clean = 0; 927 tx_ring->next_to_use = 0; 928 } 929 930 static void enetc_free_rx_ring(struct enetc_bdr *rx_ring) 931 { 932 int i; 933 934 if (!rx_ring->rx_swbd) 935 return; 936 937 for (i = 0; i < rx_ring->bd_count; i++) { 938 struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i]; 939 940 if (!rx_swbd->page) 941 continue; 942 943 dma_unmap_page(rx_ring->dev, rx_swbd->dma, 944 PAGE_SIZE, DMA_FROM_DEVICE); 945 __free_page(rx_swbd->page); 946 rx_swbd->page = NULL; 947 } 948 949 rx_ring->next_to_clean = 0; 950 rx_ring->next_to_use = 0; 951 rx_ring->next_to_alloc = 0; 952 } 953 954 static void enetc_free_rxtx_rings(struct enetc_ndev_priv *priv) 955 { 956 int i; 957 958 for (i = 0; i < priv->num_rx_rings; i++) 959 enetc_free_rx_ring(priv->rx_ring[i]); 960 961 for (i = 0; i < priv->num_tx_rings; i++) 962 enetc_free_tx_ring(priv->tx_ring[i]); 963 } 964 965 static int enetc_alloc_cbdr(struct device *dev, struct enetc_cbdr *cbdr) 966 { 967 int size = cbdr->bd_count * sizeof(struct enetc_cbd); 968 969 cbdr->bd_base = dma_alloc_coherent(dev, size, &cbdr->bd_dma_base, 970 GFP_KERNEL); 971 if (!cbdr->bd_base) 972 return -ENOMEM; 973 974 /* h/w requires 128B alignment */ 975 if (!IS_ALIGNED(cbdr->bd_dma_base, 128)) { 976 dma_free_coherent(dev, size, cbdr->bd_base, cbdr->bd_dma_base); 977 return -EINVAL; 978 } 979 980 cbdr->next_to_clean = 0; 981 cbdr->next_to_use = 0; 982 983 return 0; 984 } 985 986 static void enetc_free_cbdr(struct device *dev, struct enetc_cbdr *cbdr) 987 { 988 int size = cbdr->bd_count * sizeof(struct enetc_cbd); 989 990 dma_free_coherent(dev, size, cbdr->bd_base, cbdr->bd_dma_base); 991 cbdr->bd_base = NULL; 992 } 993 994 static void enetc_setup_cbdr(struct enetc_hw *hw, struct enetc_cbdr *cbdr) 995 { 996 /* set CBDR cache attributes */ 997 enetc_wr(hw, ENETC_SICAR2, 998 ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT); 999 1000 enetc_wr(hw, ENETC_SICBDRBAR0, lower_32_bits(cbdr->bd_dma_base)); 1001 enetc_wr(hw, ENETC_SICBDRBAR1, upper_32_bits(cbdr->bd_dma_base)); 1002 enetc_wr(hw, ENETC_SICBDRLENR, ENETC_RTBLENR_LEN(cbdr->bd_count)); 1003 1004 enetc_wr(hw, ENETC_SICBDRPIR, 0); 1005 enetc_wr(hw, ENETC_SICBDRCIR, 0); 1006 1007 /* enable ring */ 1008 enetc_wr(hw, ENETC_SICBDRMR, BIT(31)); 1009 1010 cbdr->pir = hw->reg + ENETC_SICBDRPIR; 1011 cbdr->cir = hw->reg + ENETC_SICBDRCIR; 1012 } 1013 1014 static void enetc_clear_cbdr(struct enetc_hw *hw) 1015 { 1016 enetc_wr(hw, ENETC_SICBDRMR, 0); 1017 } 1018 1019 static int enetc_setup_default_rss_table(struct enetc_si *si, int num_groups) 1020 { 1021 int *rss_table; 1022 int i; 1023 1024 rss_table = kmalloc_array(si->num_rss, sizeof(*rss_table), GFP_KERNEL); 1025 if (!rss_table) 1026 return -ENOMEM; 1027 1028 /* Set up RSS table defaults */ 1029 for (i = 0; i < si->num_rss; i++) 1030 rss_table[i] = i % num_groups; 1031 1032 enetc_set_rss_table(si, rss_table, si->num_rss); 1033 1034 kfree(rss_table); 1035 1036 return 0; 1037 } 1038 1039 static int enetc_configure_si(struct enetc_ndev_priv *priv) 1040 { 1041 struct enetc_si *si = priv->si; 1042 struct enetc_hw *hw = &si->hw; 1043 int err; 1044 1045 enetc_setup_cbdr(hw, &si->cbd_ring); 1046 /* set SI cache attributes */ 1047 enetc_wr(hw, ENETC_SICAR0, 1048 ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT); 1049 enetc_wr(hw, ENETC_SICAR1, ENETC_SICAR_MSI); 1050 /* enable SI */ 1051 enetc_wr(hw, ENETC_SIMR, ENETC_SIMR_EN); 1052 1053 if (si->num_rss) { 1054 err = enetc_setup_default_rss_table(si, priv->num_rx_rings); 1055 if (err) 1056 return err; 1057 } 1058 1059 return 0; 1060 } 1061 1062 void enetc_init_si_rings_params(struct enetc_ndev_priv *priv) 1063 { 1064 struct enetc_si *si = priv->si; 1065 int cpus = num_online_cpus(); 1066 1067 priv->tx_bd_count = ENETC_BDR_DEFAULT_SIZE; 1068 priv->rx_bd_count = ENETC_BDR_DEFAULT_SIZE; 1069 1070 /* Enable all available TX rings in order to configure as many 1071 * priorities as possible, when needed. 1072 * TODO: Make # of TX rings run-time configurable 1073 */ 1074 priv->num_rx_rings = min_t(int, cpus, si->num_rx_rings); 1075 priv->num_tx_rings = si->num_tx_rings; 1076 priv->bdr_int_num = cpus; 1077 1078 /* SI specific */ 1079 si->cbd_ring.bd_count = ENETC_CBDR_DEFAULT_SIZE; 1080 } 1081 1082 int enetc_alloc_si_resources(struct enetc_ndev_priv *priv) 1083 { 1084 struct enetc_si *si = priv->si; 1085 int err; 1086 1087 err = enetc_alloc_cbdr(priv->dev, &si->cbd_ring); 1088 if (err) 1089 return err; 1090 1091 priv->cls_rules = kcalloc(si->num_fs_entries, sizeof(*priv->cls_rules), 1092 GFP_KERNEL); 1093 if (!priv->cls_rules) { 1094 err = -ENOMEM; 1095 goto err_alloc_cls; 1096 } 1097 1098 err = enetc_configure_si(priv); 1099 if (err) 1100 goto err_config_si; 1101 1102 return 0; 1103 1104 err_config_si: 1105 kfree(priv->cls_rules); 1106 err_alloc_cls: 1107 enetc_clear_cbdr(&si->hw); 1108 enetc_free_cbdr(priv->dev, &si->cbd_ring); 1109 1110 return err; 1111 } 1112 1113 void enetc_free_si_resources(struct enetc_ndev_priv *priv) 1114 { 1115 struct enetc_si *si = priv->si; 1116 1117 enetc_clear_cbdr(&si->hw); 1118 enetc_free_cbdr(priv->dev, &si->cbd_ring); 1119 1120 kfree(priv->cls_rules); 1121 } 1122 1123 static void enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring) 1124 { 1125 int idx = tx_ring->index; 1126 u32 tbmr; 1127 1128 enetc_txbdr_wr(hw, idx, ENETC_TBBAR0, 1129 lower_32_bits(tx_ring->bd_dma_base)); 1130 1131 enetc_txbdr_wr(hw, idx, ENETC_TBBAR1, 1132 upper_32_bits(tx_ring->bd_dma_base)); 1133 1134 WARN_ON(!IS_ALIGNED(tx_ring->bd_count, 64)); /* multiple of 64 */ 1135 enetc_txbdr_wr(hw, idx, ENETC_TBLENR, 1136 ENETC_RTBLENR_LEN(tx_ring->bd_count)); 1137 1138 /* clearing PI/CI registers for Tx not supported, adjust sw indexes */ 1139 tx_ring->next_to_use = enetc_txbdr_rd(hw, idx, ENETC_TBPIR); 1140 tx_ring->next_to_clean = enetc_txbdr_rd(hw, idx, ENETC_TBCIR); 1141 1142 /* enable Tx ints by setting pkt thr to 1 */ 1143 enetc_txbdr_wr(hw, idx, ENETC_TBICIR0, ENETC_TBICIR0_ICEN | 0x1); 1144 1145 tbmr = ENETC_TBMR_EN; 1146 if (tx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_TX) 1147 tbmr |= ENETC_TBMR_VIH; 1148 1149 /* enable ring */ 1150 enetc_txbdr_wr(hw, idx, ENETC_TBMR, tbmr); 1151 1152 tx_ring->tpir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBPIR); 1153 tx_ring->tcir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBCIR); 1154 tx_ring->idr = hw->reg + ENETC_SITXIDR; 1155 } 1156 1157 static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring) 1158 { 1159 int idx = rx_ring->index; 1160 u32 rbmr; 1161 1162 enetc_rxbdr_wr(hw, idx, ENETC_RBBAR0, 1163 lower_32_bits(rx_ring->bd_dma_base)); 1164 1165 enetc_rxbdr_wr(hw, idx, ENETC_RBBAR1, 1166 upper_32_bits(rx_ring->bd_dma_base)); 1167 1168 WARN_ON(!IS_ALIGNED(rx_ring->bd_count, 64)); /* multiple of 64 */ 1169 enetc_rxbdr_wr(hw, idx, ENETC_RBLENR, 1170 ENETC_RTBLENR_LEN(rx_ring->bd_count)); 1171 1172 enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE); 1173 1174 enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0); 1175 1176 /* enable Rx ints by setting pkt thr to 1 */ 1177 enetc_rxbdr_wr(hw, idx, ENETC_RBICIR0, ENETC_RBICIR0_ICEN | 0x1); 1178 1179 rbmr = ENETC_RBMR_EN; 1180 1181 if (rx_ring->ext_en) 1182 rbmr |= ENETC_RBMR_BDS; 1183 1184 if (rx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) 1185 rbmr |= ENETC_RBMR_VTE; 1186 1187 rx_ring->rcir = hw->reg + ENETC_BDR(RX, idx, ENETC_RBCIR); 1188 rx_ring->idr = hw->reg + ENETC_SIRXIDR; 1189 1190 enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring)); 1191 1192 /* enable ring */ 1193 enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr); 1194 } 1195 1196 static void enetc_setup_bdrs(struct enetc_ndev_priv *priv) 1197 { 1198 int i; 1199 1200 for (i = 0; i < priv->num_tx_rings; i++) 1201 enetc_setup_txbdr(&priv->si->hw, priv->tx_ring[i]); 1202 1203 for (i = 0; i < priv->num_rx_rings; i++) 1204 enetc_setup_rxbdr(&priv->si->hw, priv->rx_ring[i]); 1205 } 1206 1207 static void enetc_clear_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring) 1208 { 1209 int idx = rx_ring->index; 1210 1211 /* disable EN bit on ring */ 1212 enetc_rxbdr_wr(hw, idx, ENETC_RBMR, 0); 1213 } 1214 1215 static void enetc_clear_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring) 1216 { 1217 int delay = 8, timeout = 100; 1218 int idx = tx_ring->index; 1219 1220 /* disable EN bit on ring */ 1221 enetc_txbdr_wr(hw, idx, ENETC_TBMR, 0); 1222 1223 /* wait for busy to clear */ 1224 while (delay < timeout && 1225 enetc_txbdr_rd(hw, idx, ENETC_TBSR) & ENETC_TBSR_BUSY) { 1226 msleep(delay); 1227 delay *= 2; 1228 } 1229 1230 if (delay >= timeout) 1231 netdev_warn(tx_ring->ndev, "timeout for tx ring #%d clear\n", 1232 idx); 1233 } 1234 1235 static void enetc_clear_bdrs(struct enetc_ndev_priv *priv) 1236 { 1237 int i; 1238 1239 for (i = 0; i < priv->num_tx_rings; i++) 1240 enetc_clear_txbdr(&priv->si->hw, priv->tx_ring[i]); 1241 1242 for (i = 0; i < priv->num_rx_rings; i++) 1243 enetc_clear_rxbdr(&priv->si->hw, priv->rx_ring[i]); 1244 1245 udelay(1); 1246 } 1247 1248 static int enetc_setup_irqs(struct enetc_ndev_priv *priv) 1249 { 1250 struct pci_dev *pdev = priv->si->pdev; 1251 cpumask_t cpu_mask; 1252 int i, j, err; 1253 1254 for (i = 0; i < priv->bdr_int_num; i++) { 1255 int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i); 1256 struct enetc_int_vector *v = priv->int_vector[i]; 1257 int entry = ENETC_BDR_INT_BASE_IDX + i; 1258 struct enetc_hw *hw = &priv->si->hw; 1259 1260 snprintf(v->name, sizeof(v->name), "%s-rxtx%d", 1261 priv->ndev->name, i); 1262 err = request_irq(irq, enetc_msix, 0, v->name, v); 1263 if (err) { 1264 dev_err(priv->dev, "request_irq() failed!\n"); 1265 goto irq_err; 1266 } 1267 1268 v->tbier_base = hw->reg + ENETC_BDR(TX, 0, ENETC_TBIER); 1269 v->rbier = hw->reg + ENETC_BDR(RX, i, ENETC_RBIER); 1270 1271 enetc_wr(hw, ENETC_SIMSIRRV(i), entry); 1272 1273 for (j = 0; j < v->count_tx_rings; j++) { 1274 int idx = v->tx_ring[j].index; 1275 1276 enetc_wr(hw, ENETC_SIMSITRV(idx), entry); 1277 } 1278 cpumask_clear(&cpu_mask); 1279 cpumask_set_cpu(i % num_online_cpus(), &cpu_mask); 1280 irq_set_affinity_hint(irq, &cpu_mask); 1281 } 1282 1283 return 0; 1284 1285 irq_err: 1286 while (i--) { 1287 int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i); 1288 1289 irq_set_affinity_hint(irq, NULL); 1290 free_irq(irq, priv->int_vector[i]); 1291 } 1292 1293 return err; 1294 } 1295 1296 static void enetc_free_irqs(struct enetc_ndev_priv *priv) 1297 { 1298 struct pci_dev *pdev = priv->si->pdev; 1299 int i; 1300 1301 for (i = 0; i < priv->bdr_int_num; i++) { 1302 int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i); 1303 1304 irq_set_affinity_hint(irq, NULL); 1305 free_irq(irq, priv->int_vector[i]); 1306 } 1307 } 1308 1309 static void enetc_enable_interrupts(struct enetc_ndev_priv *priv) 1310 { 1311 int i; 1312 1313 /* enable Tx & Rx event indication */ 1314 for (i = 0; i < priv->num_rx_rings; i++) { 1315 enetc_rxbdr_wr(&priv->si->hw, i, 1316 ENETC_RBIER, ENETC_RBIER_RXTIE); 1317 } 1318 1319 for (i = 0; i < priv->num_tx_rings; i++) { 1320 enetc_txbdr_wr(&priv->si->hw, i, 1321 ENETC_TBIER, ENETC_TBIER_TXTIE); 1322 } 1323 } 1324 1325 static void enetc_disable_interrupts(struct enetc_ndev_priv *priv) 1326 { 1327 int i; 1328 1329 for (i = 0; i < priv->num_tx_rings; i++) 1330 enetc_txbdr_wr(&priv->si->hw, i, ENETC_TBIER, 0); 1331 1332 for (i = 0; i < priv->num_rx_rings; i++) 1333 enetc_rxbdr_wr(&priv->si->hw, i, ENETC_RBIER, 0); 1334 } 1335 1336 static void adjust_link(struct net_device *ndev) 1337 { 1338 struct enetc_ndev_priv *priv = netdev_priv(ndev); 1339 struct phy_device *phydev = ndev->phydev; 1340 1341 if (priv->active_offloads & ENETC_F_QBV) 1342 enetc_sched_speed_set(ndev); 1343 1344 phy_print_status(phydev); 1345 } 1346 1347 static int enetc_phy_connect(struct net_device *ndev) 1348 { 1349 struct enetc_ndev_priv *priv = netdev_priv(ndev); 1350 struct phy_device *phydev; 1351 struct ethtool_eee edata; 1352 1353 if (!priv->phy_node) 1354 return 0; /* phy-less mode */ 1355 1356 phydev = of_phy_connect(ndev, priv->phy_node, &adjust_link, 1357 0, priv->if_mode); 1358 if (!phydev) { 1359 dev_err(&ndev->dev, "could not attach to PHY\n"); 1360 return -ENODEV; 1361 } 1362 1363 phy_attached_info(phydev); 1364 1365 /* disable EEE autoneg, until ENETC driver supports it */ 1366 memset(&edata, 0, sizeof(struct ethtool_eee)); 1367 phy_ethtool_set_eee(phydev, &edata); 1368 1369 return 0; 1370 } 1371 1372 int enetc_open(struct net_device *ndev) 1373 { 1374 struct enetc_ndev_priv *priv = netdev_priv(ndev); 1375 int i, err; 1376 1377 err = enetc_setup_irqs(priv); 1378 if (err) 1379 return err; 1380 1381 err = enetc_phy_connect(ndev); 1382 if (err) 1383 goto err_phy_connect; 1384 1385 err = enetc_alloc_tx_resources(priv); 1386 if (err) 1387 goto err_alloc_tx; 1388 1389 err = enetc_alloc_rx_resources(priv); 1390 if (err) 1391 goto err_alloc_rx; 1392 1393 enetc_setup_bdrs(priv); 1394 1395 err = netif_set_real_num_tx_queues(ndev, priv->num_tx_rings); 1396 if (err) 1397 goto err_set_queues; 1398 1399 err = netif_set_real_num_rx_queues(ndev, priv->num_rx_rings); 1400 if (err) 1401 goto err_set_queues; 1402 1403 for (i = 0; i < priv->bdr_int_num; i++) 1404 napi_enable(&priv->int_vector[i]->napi); 1405 1406 enetc_enable_interrupts(priv); 1407 1408 if (ndev->phydev) 1409 phy_start(ndev->phydev); 1410 else 1411 netif_carrier_on(ndev); 1412 1413 netif_tx_start_all_queues(ndev); 1414 1415 return 0; 1416 1417 err_set_queues: 1418 enetc_free_rx_resources(priv); 1419 err_alloc_rx: 1420 enetc_free_tx_resources(priv); 1421 err_alloc_tx: 1422 if (ndev->phydev) 1423 phy_disconnect(ndev->phydev); 1424 err_phy_connect: 1425 enetc_free_irqs(priv); 1426 1427 return err; 1428 } 1429 1430 int enetc_close(struct net_device *ndev) 1431 { 1432 struct enetc_ndev_priv *priv = netdev_priv(ndev); 1433 int i; 1434 1435 netif_tx_stop_all_queues(ndev); 1436 1437 if (ndev->phydev) { 1438 phy_stop(ndev->phydev); 1439 phy_disconnect(ndev->phydev); 1440 } else { 1441 netif_carrier_off(ndev); 1442 } 1443 1444 for (i = 0; i < priv->bdr_int_num; i++) { 1445 napi_synchronize(&priv->int_vector[i]->napi); 1446 napi_disable(&priv->int_vector[i]->napi); 1447 } 1448 1449 enetc_disable_interrupts(priv); 1450 enetc_clear_bdrs(priv); 1451 1452 enetc_free_rxtx_rings(priv); 1453 enetc_free_rx_resources(priv); 1454 enetc_free_tx_resources(priv); 1455 enetc_free_irqs(priv); 1456 1457 return 0; 1458 } 1459 1460 static int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data) 1461 { 1462 struct enetc_ndev_priv *priv = netdev_priv(ndev); 1463 struct tc_mqprio_qopt *mqprio = type_data; 1464 struct enetc_bdr *tx_ring; 1465 u8 num_tc; 1466 int i; 1467 1468 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; 1469 num_tc = mqprio->num_tc; 1470 1471 if (!num_tc) { 1472 netdev_reset_tc(ndev); 1473 netif_set_real_num_tx_queues(ndev, priv->num_tx_rings); 1474 1475 /* Reset all ring priorities to 0 */ 1476 for (i = 0; i < priv->num_tx_rings; i++) { 1477 tx_ring = priv->tx_ring[i]; 1478 enetc_set_bdr_prio(&priv->si->hw, tx_ring->index, 0); 1479 } 1480 1481 return 0; 1482 } 1483 1484 /* Check if we have enough BD rings available to accommodate all TCs */ 1485 if (num_tc > priv->num_tx_rings) { 1486 netdev_err(ndev, "Max %d traffic classes supported\n", 1487 priv->num_tx_rings); 1488 return -EINVAL; 1489 } 1490 1491 /* For the moment, we use only one BD ring per TC. 1492 * 1493 * Configure num_tc BD rings with increasing priorities. 1494 */ 1495 for (i = 0; i < num_tc; i++) { 1496 tx_ring = priv->tx_ring[i]; 1497 enetc_set_bdr_prio(&priv->si->hw, tx_ring->index, i); 1498 } 1499 1500 /* Reset the number of netdev queues based on the TC count */ 1501 netif_set_real_num_tx_queues(ndev, num_tc); 1502 1503 netdev_set_num_tc(ndev, num_tc); 1504 1505 /* Each TC is associated with one netdev queue */ 1506 for (i = 0; i < num_tc; i++) 1507 netdev_set_tc_queue(ndev, i, 1, i); 1508 1509 return 0; 1510 } 1511 1512 int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type, 1513 void *type_data) 1514 { 1515 switch (type) { 1516 case TC_SETUP_QDISC_MQPRIO: 1517 return enetc_setup_tc_mqprio(ndev, type_data); 1518 case TC_SETUP_QDISC_TAPRIO: 1519 return enetc_setup_tc_taprio(ndev, type_data); 1520 case TC_SETUP_QDISC_CBS: 1521 return enetc_setup_tc_cbs(ndev, type_data); 1522 case TC_SETUP_QDISC_ETF: 1523 return enetc_setup_tc_txtime(ndev, type_data); 1524 case TC_SETUP_BLOCK: 1525 return enetc_setup_tc_psfp(ndev, type_data); 1526 default: 1527 return -EOPNOTSUPP; 1528 } 1529 } 1530 1531 struct net_device_stats *enetc_get_stats(struct net_device *ndev) 1532 { 1533 struct enetc_ndev_priv *priv = netdev_priv(ndev); 1534 struct net_device_stats *stats = &ndev->stats; 1535 unsigned long packets = 0, bytes = 0; 1536 int i; 1537 1538 for (i = 0; i < priv->num_rx_rings; i++) { 1539 packets += priv->rx_ring[i]->stats.packets; 1540 bytes += priv->rx_ring[i]->stats.bytes; 1541 } 1542 1543 stats->rx_packets = packets; 1544 stats->rx_bytes = bytes; 1545 bytes = 0; 1546 packets = 0; 1547 1548 for (i = 0; i < priv->num_tx_rings; i++) { 1549 packets += priv->tx_ring[i]->stats.packets; 1550 bytes += priv->tx_ring[i]->stats.bytes; 1551 } 1552 1553 stats->tx_packets = packets; 1554 stats->tx_bytes = bytes; 1555 1556 return stats; 1557 } 1558 1559 static int enetc_set_rss(struct net_device *ndev, int en) 1560 { 1561 struct enetc_ndev_priv *priv = netdev_priv(ndev); 1562 struct enetc_hw *hw = &priv->si->hw; 1563 u32 reg; 1564 1565 enetc_wr(hw, ENETC_SIRBGCR, priv->num_rx_rings); 1566 1567 reg = enetc_rd(hw, ENETC_SIMR); 1568 reg &= ~ENETC_SIMR_RSSE; 1569 reg |= (en) ? ENETC_SIMR_RSSE : 0; 1570 enetc_wr(hw, ENETC_SIMR, reg); 1571 1572 return 0; 1573 } 1574 1575 static int enetc_set_psfp(struct net_device *ndev, int en) 1576 { 1577 struct enetc_ndev_priv *priv = netdev_priv(ndev); 1578 int err; 1579 1580 if (en) { 1581 err = enetc_psfp_enable(priv); 1582 if (err) 1583 return err; 1584 1585 priv->active_offloads |= ENETC_F_QCI; 1586 return 0; 1587 } 1588 1589 err = enetc_psfp_disable(priv); 1590 if (err) 1591 return err; 1592 1593 priv->active_offloads &= ~ENETC_F_QCI; 1594 1595 return 0; 1596 } 1597 1598 int enetc_set_features(struct net_device *ndev, 1599 netdev_features_t features) 1600 { 1601 netdev_features_t changed = ndev->features ^ features; 1602 int err = 0; 1603 1604 if (changed & NETIF_F_RXHASH) 1605 enetc_set_rss(ndev, !!(features & NETIF_F_RXHASH)); 1606 1607 if (changed & NETIF_F_HW_TC) 1608 err = enetc_set_psfp(ndev, !!(features & NETIF_F_HW_TC)); 1609 1610 return err; 1611 } 1612 1613 #ifdef CONFIG_FSL_ENETC_PTP_CLOCK 1614 static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr) 1615 { 1616 struct enetc_ndev_priv *priv = netdev_priv(ndev); 1617 struct hwtstamp_config config; 1618 int ao; 1619 1620 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 1621 return -EFAULT; 1622 1623 switch (config.tx_type) { 1624 case HWTSTAMP_TX_OFF: 1625 priv->active_offloads &= ~ENETC_F_TX_TSTAMP; 1626 break; 1627 case HWTSTAMP_TX_ON: 1628 priv->active_offloads |= ENETC_F_TX_TSTAMP; 1629 break; 1630 default: 1631 return -ERANGE; 1632 } 1633 1634 ao = priv->active_offloads; 1635 switch (config.rx_filter) { 1636 case HWTSTAMP_FILTER_NONE: 1637 priv->active_offloads &= ~ENETC_F_RX_TSTAMP; 1638 break; 1639 default: 1640 priv->active_offloads |= ENETC_F_RX_TSTAMP; 1641 config.rx_filter = HWTSTAMP_FILTER_ALL; 1642 } 1643 1644 if (netif_running(ndev) && ao != priv->active_offloads) { 1645 enetc_close(ndev); 1646 enetc_open(ndev); 1647 } 1648 1649 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? 1650 -EFAULT : 0; 1651 } 1652 1653 static int enetc_hwtstamp_get(struct net_device *ndev, struct ifreq *ifr) 1654 { 1655 struct enetc_ndev_priv *priv = netdev_priv(ndev); 1656 struct hwtstamp_config config; 1657 1658 config.flags = 0; 1659 1660 if (priv->active_offloads & ENETC_F_TX_TSTAMP) 1661 config.tx_type = HWTSTAMP_TX_ON; 1662 else 1663 config.tx_type = HWTSTAMP_TX_OFF; 1664 1665 config.rx_filter = (priv->active_offloads & ENETC_F_RX_TSTAMP) ? 1666 HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE; 1667 1668 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? 1669 -EFAULT : 0; 1670 } 1671 #endif 1672 1673 int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) 1674 { 1675 #ifdef CONFIG_FSL_ENETC_PTP_CLOCK 1676 if (cmd == SIOCSHWTSTAMP) 1677 return enetc_hwtstamp_set(ndev, rq); 1678 if (cmd == SIOCGHWTSTAMP) 1679 return enetc_hwtstamp_get(ndev, rq); 1680 #endif 1681 1682 if (!ndev->phydev) 1683 return -EOPNOTSUPP; 1684 return phy_mii_ioctl(ndev->phydev, rq, cmd); 1685 } 1686 1687 int enetc_alloc_msix(struct enetc_ndev_priv *priv) 1688 { 1689 struct pci_dev *pdev = priv->si->pdev; 1690 int size, v_tx_rings; 1691 int i, n, err, nvec; 1692 1693 nvec = ENETC_BDR_INT_BASE_IDX + priv->bdr_int_num; 1694 /* allocate MSIX for both messaging and Rx/Tx interrupts */ 1695 n = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX); 1696 1697 if (n < 0) 1698 return n; 1699 1700 if (n != nvec) 1701 return -EPERM; 1702 1703 /* # of tx rings per int vector */ 1704 v_tx_rings = priv->num_tx_rings / priv->bdr_int_num; 1705 size = sizeof(struct enetc_int_vector) + 1706 sizeof(struct enetc_bdr) * v_tx_rings; 1707 1708 for (i = 0; i < priv->bdr_int_num; i++) { 1709 struct enetc_int_vector *v; 1710 struct enetc_bdr *bdr; 1711 int j; 1712 1713 v = kzalloc(size, GFP_KERNEL); 1714 if (!v) { 1715 err = -ENOMEM; 1716 goto fail; 1717 } 1718 1719 priv->int_vector[i] = v; 1720 1721 netif_napi_add(priv->ndev, &v->napi, enetc_poll, 1722 NAPI_POLL_WEIGHT); 1723 v->count_tx_rings = v_tx_rings; 1724 1725 for (j = 0; j < v_tx_rings; j++) { 1726 int idx; 1727 1728 /* default tx ring mapping policy */ 1729 if (priv->bdr_int_num == ENETC_MAX_BDR_INT) 1730 idx = 2 * j + i; /* 2 CPUs */ 1731 else 1732 idx = j + i * v_tx_rings; /* default */ 1733 1734 __set_bit(idx, &v->tx_rings_map); 1735 bdr = &v->tx_ring[j]; 1736 bdr->index = idx; 1737 bdr->ndev = priv->ndev; 1738 bdr->dev = priv->dev; 1739 bdr->bd_count = priv->tx_bd_count; 1740 priv->tx_ring[idx] = bdr; 1741 } 1742 1743 bdr = &v->rx_ring; 1744 bdr->index = i; 1745 bdr->ndev = priv->ndev; 1746 bdr->dev = priv->dev; 1747 bdr->bd_count = priv->rx_bd_count; 1748 priv->rx_ring[i] = bdr; 1749 } 1750 1751 return 0; 1752 1753 fail: 1754 while (i--) { 1755 netif_napi_del(&priv->int_vector[i]->napi); 1756 kfree(priv->int_vector[i]); 1757 } 1758 1759 pci_free_irq_vectors(pdev); 1760 1761 return err; 1762 } 1763 1764 void enetc_free_msix(struct enetc_ndev_priv *priv) 1765 { 1766 int i; 1767 1768 for (i = 0; i < priv->bdr_int_num; i++) { 1769 struct enetc_int_vector *v = priv->int_vector[i]; 1770 1771 netif_napi_del(&v->napi); 1772 } 1773 1774 for (i = 0; i < priv->num_rx_rings; i++) 1775 priv->rx_ring[i] = NULL; 1776 1777 for (i = 0; i < priv->num_tx_rings; i++) 1778 priv->tx_ring[i] = NULL; 1779 1780 for (i = 0; i < priv->bdr_int_num; i++) { 1781 kfree(priv->int_vector[i]); 1782 priv->int_vector[i] = NULL; 1783 } 1784 1785 /* disable all MSIX for this device */ 1786 pci_free_irq_vectors(priv->si->pdev); 1787 } 1788 1789 static void enetc_kfree_si(struct enetc_si *si) 1790 { 1791 char *p = (char *)si - si->pad; 1792 1793 kfree(p); 1794 } 1795 1796 static void enetc_detect_errata(struct enetc_si *si) 1797 { 1798 if (si->pdev->revision == ENETC_REV1) 1799 si->errata = ENETC_ERR_TXCSUM | ENETC_ERR_VLAN_ISOL | 1800 ENETC_ERR_UCMCSWP; 1801 } 1802 1803 int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv) 1804 { 1805 struct enetc_si *si, *p; 1806 struct enetc_hw *hw; 1807 size_t alloc_size; 1808 int err, len; 1809 1810 pcie_flr(pdev); 1811 err = pci_enable_device_mem(pdev); 1812 if (err) { 1813 dev_err(&pdev->dev, "device enable failed\n"); 1814 return err; 1815 } 1816 1817 /* set up for high or low dma */ 1818 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 1819 if (err) { 1820 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 1821 if (err) { 1822 dev_err(&pdev->dev, 1823 "DMA configuration failed: 0x%x\n", err); 1824 goto err_dma; 1825 } 1826 } 1827 1828 err = pci_request_mem_regions(pdev, name); 1829 if (err) { 1830 dev_err(&pdev->dev, "pci_request_regions failed err=%d\n", err); 1831 goto err_pci_mem_reg; 1832 } 1833 1834 pci_set_master(pdev); 1835 1836 alloc_size = sizeof(struct enetc_si); 1837 if (sizeof_priv) { 1838 /* align priv to 32B */ 1839 alloc_size = ALIGN(alloc_size, ENETC_SI_ALIGN); 1840 alloc_size += sizeof_priv; 1841 } 1842 /* force 32B alignment for enetc_si */ 1843 alloc_size += ENETC_SI_ALIGN - 1; 1844 1845 p = kzalloc(alloc_size, GFP_KERNEL); 1846 if (!p) { 1847 err = -ENOMEM; 1848 goto err_alloc_si; 1849 } 1850 1851 si = PTR_ALIGN(p, ENETC_SI_ALIGN); 1852 si->pad = (char *)si - (char *)p; 1853 1854 pci_set_drvdata(pdev, si); 1855 si->pdev = pdev; 1856 hw = &si->hw; 1857 1858 len = pci_resource_len(pdev, ENETC_BAR_REGS); 1859 hw->reg = ioremap(pci_resource_start(pdev, ENETC_BAR_REGS), len); 1860 if (!hw->reg) { 1861 err = -ENXIO; 1862 dev_err(&pdev->dev, "ioremap() failed\n"); 1863 goto err_ioremap; 1864 } 1865 if (len > ENETC_PORT_BASE) 1866 hw->port = hw->reg + ENETC_PORT_BASE; 1867 if (len > ENETC_GLOBAL_BASE) 1868 hw->global = hw->reg + ENETC_GLOBAL_BASE; 1869 1870 enetc_detect_errata(si); 1871 1872 return 0; 1873 1874 err_ioremap: 1875 enetc_kfree_si(si); 1876 err_alloc_si: 1877 pci_release_mem_regions(pdev); 1878 err_pci_mem_reg: 1879 err_dma: 1880 pci_disable_device(pdev); 1881 1882 return err; 1883 } 1884 1885 void enetc_pci_remove(struct pci_dev *pdev) 1886 { 1887 struct enetc_si *si = pci_get_drvdata(pdev); 1888 struct enetc_hw *hw = &si->hw; 1889 1890 iounmap(hw->reg); 1891 enetc_kfree_si(si); 1892 pci_release_mem_regions(pdev); 1893 pci_disable_device(pdev); 1894 } 1895