1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Atlantic Network Driver 3 * 4 * Copyright (C) 2014-2019 aQuantia Corporation 5 * Copyright (C) 2019-2020 Marvell International Ltd. 6 */ 7 8 /* File aq_ring.c: Definition of functions for Rx/Tx rings. */ 9 10 #include "aq_ring.h" 11 #include "aq_nic.h" 12 #include "aq_hw.h" 13 #include "aq_hw_utils.h" 14 #include "aq_ptp.h" 15 16 #include <linux/netdevice.h> 17 #include <linux/etherdevice.h> 18 19 static inline void aq_free_rxpage(struct aq_rxpage *rxpage, struct device *dev) 20 { 21 unsigned int len = PAGE_SIZE << rxpage->order; 22 23 dma_unmap_page(dev, rxpage->daddr, len, DMA_FROM_DEVICE); 24 25 /* Drop the ref for being in the ring. */ 26 __free_pages(rxpage->page, rxpage->order); 27 rxpage->page = NULL; 28 } 29 30 static int aq_get_rxpage(struct aq_rxpage *rxpage, unsigned int order, 31 struct device *dev) 32 { 33 struct page *page; 34 int ret = -ENOMEM; 35 dma_addr_t daddr; 36 37 page = dev_alloc_pages(order); 38 if (unlikely(!page)) 39 goto err_exit; 40 41 daddr = dma_map_page(dev, page, 0, PAGE_SIZE << order, 42 DMA_FROM_DEVICE); 43 44 if (unlikely(dma_mapping_error(dev, daddr))) 45 goto free_page; 46 47 rxpage->page = page; 48 rxpage->daddr = daddr; 49 rxpage->order = order; 50 rxpage->pg_off = 0; 51 52 return 0; 53 54 free_page: 55 __free_pages(page, order); 56 57 err_exit: 58 return ret; 59 } 60 61 static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf, 62 int order) 63 { 64 int ret; 65 66 if (rxbuf->rxdata.page) { 67 /* One means ring is the only user and can reuse */ 68 if (page_ref_count(rxbuf->rxdata.page) > 1) { 69 /* Try reuse buffer */ 70 rxbuf->rxdata.pg_off += AQ_CFG_RX_FRAME_MAX; 71 if (rxbuf->rxdata.pg_off + AQ_CFG_RX_FRAME_MAX <= 72 (PAGE_SIZE << order)) { 73 u64_stats_update_begin(&self->stats.rx.syncp); 74 self->stats.rx.pg_flips++; 75 u64_stats_update_end(&self->stats.rx.syncp); 76 } else { 77 /* Buffer exhausted. We have other users and 78 * should release this page and realloc 79 */ 80 aq_free_rxpage(&rxbuf->rxdata, 81 aq_nic_get_dev(self->aq_nic)); 82 u64_stats_update_begin(&self->stats.rx.syncp); 83 self->stats.rx.pg_losts++; 84 u64_stats_update_end(&self->stats.rx.syncp); 85 } 86 } else { 87 rxbuf->rxdata.pg_off = 0; 88 u64_stats_update_begin(&self->stats.rx.syncp); 89 self->stats.rx.pg_reuses++; 90 u64_stats_update_end(&self->stats.rx.syncp); 91 } 92 } 93 94 if (!rxbuf->rxdata.page) { 95 ret = aq_get_rxpage(&rxbuf->rxdata, order, 96 aq_nic_get_dev(self->aq_nic)); 97 if (ret) { 98 u64_stats_update_begin(&self->stats.rx.syncp); 99 self->stats.rx.alloc_fails++; 100 u64_stats_update_end(&self->stats.rx.syncp); 101 } 102 return ret; 103 } 104 105 return 0; 106 } 107 108 static struct aq_ring_s *aq_ring_alloc(struct aq_ring_s *self, 109 struct aq_nic_s *aq_nic) 110 { 111 int err = 0; 112 113 self->buff_ring = 114 kcalloc(self->size, sizeof(struct aq_ring_buff_s), GFP_KERNEL); 115 116 if (!self->buff_ring) { 117 err = -ENOMEM; 118 goto err_exit; 119 } 120 self->dx_ring = dma_alloc_coherent(aq_nic_get_dev(aq_nic), 121 self->size * self->dx_size, 122 &self->dx_ring_pa, GFP_KERNEL); 123 if (!self->dx_ring) { 124 err = -ENOMEM; 125 goto err_exit; 126 } 127 128 err_exit: 129 if (err < 0) { 130 aq_ring_free(self); 131 self = NULL; 132 } 133 134 return self; 135 } 136 137 struct aq_ring_s *aq_ring_tx_alloc(struct aq_ring_s *self, 138 struct aq_nic_s *aq_nic, 139 unsigned int idx, 140 struct aq_nic_cfg_s *aq_nic_cfg) 141 { 142 int err = 0; 143 144 self->aq_nic = aq_nic; 145 self->idx = idx; 146 self->size = aq_nic_cfg->txds; 147 self->dx_size = aq_nic_cfg->aq_hw_caps->txd_size; 148 149 self = aq_ring_alloc(self, aq_nic); 150 if (!self) { 151 err = -ENOMEM; 152 goto err_exit; 153 } 154 155 err_exit: 156 if (err < 0) { 157 aq_ring_free(self); 158 self = NULL; 159 } 160 161 return self; 162 } 163 164 struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self, 165 struct aq_nic_s *aq_nic, 166 unsigned int idx, 167 struct aq_nic_cfg_s *aq_nic_cfg) 168 { 169 int err = 0; 170 171 self->aq_nic = aq_nic; 172 self->idx = idx; 173 self->size = aq_nic_cfg->rxds; 174 self->dx_size = aq_nic_cfg->aq_hw_caps->rxd_size; 175 self->page_order = fls(AQ_CFG_RX_FRAME_MAX / PAGE_SIZE + 176 (AQ_CFG_RX_FRAME_MAX % PAGE_SIZE ? 1 : 0)) - 1; 177 178 if (aq_nic_cfg->rxpageorder > self->page_order) 179 self->page_order = aq_nic_cfg->rxpageorder; 180 181 self = aq_ring_alloc(self, aq_nic); 182 if (!self) { 183 err = -ENOMEM; 184 goto err_exit; 185 } 186 187 err_exit: 188 if (err < 0) { 189 aq_ring_free(self); 190 self = NULL; 191 } 192 193 return self; 194 } 195 196 struct aq_ring_s * 197 aq_ring_hwts_rx_alloc(struct aq_ring_s *self, struct aq_nic_s *aq_nic, 198 unsigned int idx, unsigned int size, unsigned int dx_size) 199 { 200 struct device *dev = aq_nic_get_dev(aq_nic); 201 size_t sz = size * dx_size + AQ_CFG_RXDS_DEF; 202 203 memset(self, 0, sizeof(*self)); 204 205 self->aq_nic = aq_nic; 206 self->idx = idx; 207 self->size = size; 208 self->dx_size = dx_size; 209 210 self->dx_ring = dma_alloc_coherent(dev, sz, &self->dx_ring_pa, 211 GFP_KERNEL); 212 if (!self->dx_ring) { 213 aq_ring_free(self); 214 return NULL; 215 } 216 217 return self; 218 } 219 220 int aq_ring_init(struct aq_ring_s *self, const enum atl_ring_type ring_type) 221 { 222 self->hw_head = 0; 223 self->sw_head = 0; 224 self->sw_tail = 0; 225 self->ring_type = ring_type; 226 227 if (self->ring_type == ATL_RING_RX) 228 u64_stats_init(&self->stats.rx.syncp); 229 else 230 u64_stats_init(&self->stats.tx.syncp); 231 232 return 0; 233 } 234 235 static inline bool aq_ring_dx_in_range(unsigned int h, unsigned int i, 236 unsigned int t) 237 { 238 return (h < t) ? ((h < i) && (i < t)) : ((h < i) || (i < t)); 239 } 240 241 void aq_ring_update_queue_state(struct aq_ring_s *ring) 242 { 243 if (aq_ring_avail_dx(ring) <= AQ_CFG_SKB_FRAGS_MAX) 244 aq_ring_queue_stop(ring); 245 else if (aq_ring_avail_dx(ring) > AQ_CFG_RESTART_DESC_THRES) 246 aq_ring_queue_wake(ring); 247 } 248 249 void aq_ring_queue_wake(struct aq_ring_s *ring) 250 { 251 struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic); 252 253 if (__netif_subqueue_stopped(ndev, 254 AQ_NIC_RING2QMAP(ring->aq_nic, 255 ring->idx))) { 256 netif_wake_subqueue(ndev, 257 AQ_NIC_RING2QMAP(ring->aq_nic, ring->idx)); 258 u64_stats_update_begin(&ring->stats.tx.syncp); 259 ring->stats.tx.queue_restarts++; 260 u64_stats_update_end(&ring->stats.tx.syncp); 261 } 262 } 263 264 void aq_ring_queue_stop(struct aq_ring_s *ring) 265 { 266 struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic); 267 268 if (!__netif_subqueue_stopped(ndev, 269 AQ_NIC_RING2QMAP(ring->aq_nic, 270 ring->idx))) 271 netif_stop_subqueue(ndev, 272 AQ_NIC_RING2QMAP(ring->aq_nic, ring->idx)); 273 } 274 275 bool aq_ring_tx_clean(struct aq_ring_s *self) 276 { 277 struct device *dev = aq_nic_get_dev(self->aq_nic); 278 unsigned int budget; 279 280 for (budget = AQ_CFG_TX_CLEAN_BUDGET; 281 budget && self->sw_head != self->hw_head; budget--) { 282 struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head]; 283 284 if (likely(buff->is_mapped)) { 285 if (unlikely(buff->is_sop)) { 286 if (!buff->is_eop && 287 buff->eop_index != 0xffffU && 288 (!aq_ring_dx_in_range(self->sw_head, 289 buff->eop_index, 290 self->hw_head))) 291 break; 292 293 dma_unmap_single(dev, buff->pa, buff->len, 294 DMA_TO_DEVICE); 295 } else { 296 dma_unmap_page(dev, buff->pa, buff->len, 297 DMA_TO_DEVICE); 298 } 299 } 300 301 if (unlikely(buff->is_eop)) { 302 u64_stats_update_begin(&self->stats.tx.syncp); 303 ++self->stats.tx.packets; 304 self->stats.tx.bytes += buff->skb->len; 305 u64_stats_update_end(&self->stats.tx.syncp); 306 307 dev_kfree_skb_any(buff->skb); 308 } 309 buff->pa = 0U; 310 buff->eop_index = 0xffffU; 311 self->sw_head = aq_ring_next_dx(self, self->sw_head); 312 } 313 314 return !!budget; 315 } 316 317 static void aq_rx_checksum(struct aq_ring_s *self, 318 struct aq_ring_buff_s *buff, 319 struct sk_buff *skb) 320 { 321 if (!(self->aq_nic->ndev->features & NETIF_F_RXCSUM)) 322 return; 323 324 if (unlikely(buff->is_cso_err)) { 325 u64_stats_update_begin(&self->stats.rx.syncp); 326 ++self->stats.rx.errors; 327 u64_stats_update_end(&self->stats.rx.syncp); 328 skb->ip_summed = CHECKSUM_NONE; 329 return; 330 } 331 if (buff->is_ip_cso) { 332 __skb_incr_checksum_unnecessary(skb); 333 } else { 334 skb->ip_summed = CHECKSUM_NONE; 335 } 336 337 if (buff->is_udp_cso || buff->is_tcp_cso) 338 __skb_incr_checksum_unnecessary(skb); 339 } 340 341 #define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) 342 int aq_ring_rx_clean(struct aq_ring_s *self, 343 struct napi_struct *napi, 344 int *work_done, 345 int budget) 346 { 347 struct net_device *ndev = aq_nic_get_ndev(self->aq_nic); 348 bool is_rsc_completed = true; 349 int err = 0; 350 351 for (; (self->sw_head != self->hw_head) && budget; 352 self->sw_head = aq_ring_next_dx(self, self->sw_head), 353 --budget, ++(*work_done)) { 354 struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head]; 355 bool is_ptp_ring = aq_ptp_ring(self->aq_nic, self); 356 struct aq_ring_buff_s *buff_ = NULL; 357 struct sk_buff *skb = NULL; 358 unsigned int next_ = 0U; 359 unsigned int i = 0U; 360 u16 hdr_len; 361 362 if (buff->is_cleaned) 363 continue; 364 365 if (!buff->is_eop) { 366 buff_ = buff; 367 do { 368 next_ = buff_->next, 369 buff_ = &self->buff_ring[next_]; 370 is_rsc_completed = 371 aq_ring_dx_in_range(self->sw_head, 372 next_, 373 self->hw_head); 374 375 if (unlikely(!is_rsc_completed)) 376 break; 377 378 buff->is_error |= buff_->is_error; 379 buff->is_cso_err |= buff_->is_cso_err; 380 381 } while (!buff_->is_eop); 382 383 if (!is_rsc_completed) { 384 err = 0; 385 goto err_exit; 386 } 387 if (buff->is_error || 388 (buff->is_lro && buff->is_cso_err)) { 389 buff_ = buff; 390 do { 391 next_ = buff_->next, 392 buff_ = &self->buff_ring[next_]; 393 394 buff_->is_cleaned = true; 395 } while (!buff_->is_eop); 396 397 u64_stats_update_begin(&self->stats.rx.syncp); 398 ++self->stats.rx.errors; 399 u64_stats_update_end(&self->stats.rx.syncp); 400 continue; 401 } 402 } 403 404 if (buff->is_error) { 405 u64_stats_update_begin(&self->stats.rx.syncp); 406 ++self->stats.rx.errors; 407 u64_stats_update_end(&self->stats.rx.syncp); 408 continue; 409 } 410 411 dma_sync_single_range_for_cpu(aq_nic_get_dev(self->aq_nic), 412 buff->rxdata.daddr, 413 buff->rxdata.pg_off, 414 buff->len, DMA_FROM_DEVICE); 415 416 /* for single fragment packets use build_skb() */ 417 if (buff->is_eop && 418 buff->len <= AQ_CFG_RX_FRAME_MAX - AQ_SKB_ALIGN) { 419 skb = build_skb(aq_buf_vaddr(&buff->rxdata), 420 AQ_CFG_RX_FRAME_MAX); 421 if (unlikely(!skb)) { 422 u64_stats_update_begin(&self->stats.rx.syncp); 423 self->stats.rx.skb_alloc_fails++; 424 u64_stats_update_end(&self->stats.rx.syncp); 425 err = -ENOMEM; 426 goto err_exit; 427 } 428 if (is_ptp_ring) 429 buff->len -= 430 aq_ptp_extract_ts(self->aq_nic, skb, 431 aq_buf_vaddr(&buff->rxdata), 432 buff->len); 433 skb_put(skb, buff->len); 434 page_ref_inc(buff->rxdata.page); 435 } else { 436 skb = napi_alloc_skb(napi, AQ_CFG_RX_HDR_SIZE); 437 if (unlikely(!skb)) { 438 u64_stats_update_begin(&self->stats.rx.syncp); 439 self->stats.rx.skb_alloc_fails++; 440 u64_stats_update_end(&self->stats.rx.syncp); 441 err = -ENOMEM; 442 goto err_exit; 443 } 444 if (is_ptp_ring) 445 buff->len -= 446 aq_ptp_extract_ts(self->aq_nic, skb, 447 aq_buf_vaddr(&buff->rxdata), 448 buff->len); 449 450 hdr_len = buff->len; 451 if (hdr_len > AQ_CFG_RX_HDR_SIZE) 452 hdr_len = eth_get_headlen(skb->dev, 453 aq_buf_vaddr(&buff->rxdata), 454 AQ_CFG_RX_HDR_SIZE); 455 456 memcpy(__skb_put(skb, hdr_len), aq_buf_vaddr(&buff->rxdata), 457 ALIGN(hdr_len, sizeof(long))); 458 459 if (buff->len - hdr_len > 0) { 460 skb_add_rx_frag(skb, 0, buff->rxdata.page, 461 buff->rxdata.pg_off + hdr_len, 462 buff->len - hdr_len, 463 AQ_CFG_RX_FRAME_MAX); 464 page_ref_inc(buff->rxdata.page); 465 } 466 467 if (!buff->is_eop) { 468 buff_ = buff; 469 i = 1U; 470 do { 471 next_ = buff_->next, 472 buff_ = &self->buff_ring[next_]; 473 474 dma_sync_single_range_for_cpu( 475 aq_nic_get_dev(self->aq_nic), 476 buff_->rxdata.daddr, 477 buff_->rxdata.pg_off, 478 buff_->len, 479 DMA_FROM_DEVICE); 480 skb_add_rx_frag(skb, i++, 481 buff_->rxdata.page, 482 buff_->rxdata.pg_off, 483 buff_->len, 484 AQ_CFG_RX_FRAME_MAX); 485 page_ref_inc(buff_->rxdata.page); 486 buff_->is_cleaned = 1; 487 488 buff->is_ip_cso &= buff_->is_ip_cso; 489 buff->is_udp_cso &= buff_->is_udp_cso; 490 buff->is_tcp_cso &= buff_->is_tcp_cso; 491 buff->is_cso_err |= buff_->is_cso_err; 492 493 } while (!buff_->is_eop); 494 } 495 } 496 497 if (buff->is_vlan) 498 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 499 buff->vlan_rx_tag); 500 501 skb->protocol = eth_type_trans(skb, ndev); 502 503 aq_rx_checksum(self, buff, skb); 504 505 skb_set_hash(skb, buff->rss_hash, 506 buff->is_hash_l4 ? PKT_HASH_TYPE_L4 : 507 PKT_HASH_TYPE_NONE); 508 /* Send all PTP traffic to 0 queue */ 509 skb_record_rx_queue(skb, 510 is_ptp_ring ? 0 511 : AQ_NIC_RING2QMAP(self->aq_nic, 512 self->idx)); 513 514 u64_stats_update_begin(&self->stats.rx.syncp); 515 ++self->stats.rx.packets; 516 self->stats.rx.bytes += skb->len; 517 u64_stats_update_end(&self->stats.rx.syncp); 518 519 napi_gro_receive(napi, skb); 520 } 521 522 err_exit: 523 return err; 524 } 525 526 void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic) 527 { 528 #if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) 529 while (self->sw_head != self->hw_head) { 530 u64 ns; 531 532 aq_nic->aq_hw_ops->extract_hwts(aq_nic->aq_hw, 533 self->dx_ring + 534 (self->sw_head * self->dx_size), 535 self->dx_size, &ns); 536 aq_ptp_tx_hwtstamp(aq_nic, ns); 537 538 self->sw_head = aq_ring_next_dx(self, self->sw_head); 539 } 540 #endif 541 } 542 543 int aq_ring_rx_fill(struct aq_ring_s *self) 544 { 545 unsigned int page_order = self->page_order; 546 struct aq_ring_buff_s *buff = NULL; 547 int err = 0; 548 int i = 0; 549 550 if (aq_ring_avail_dx(self) < min_t(unsigned int, AQ_CFG_RX_REFILL_THRES, 551 self->size / 2)) 552 return err; 553 554 for (i = aq_ring_avail_dx(self); i--; 555 self->sw_tail = aq_ring_next_dx(self, self->sw_tail)) { 556 buff = &self->buff_ring[self->sw_tail]; 557 558 buff->flags = 0U; 559 buff->len = AQ_CFG_RX_FRAME_MAX; 560 561 err = aq_get_rxpages(self, buff, page_order); 562 if (err) 563 goto err_exit; 564 565 buff->pa = aq_buf_daddr(&buff->rxdata); 566 buff = NULL; 567 } 568 569 err_exit: 570 return err; 571 } 572 573 void aq_ring_rx_deinit(struct aq_ring_s *self) 574 { 575 if (!self) 576 return; 577 578 for (; self->sw_head != self->sw_tail; 579 self->sw_head = aq_ring_next_dx(self, self->sw_head)) { 580 struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head]; 581 582 aq_free_rxpage(&buff->rxdata, aq_nic_get_dev(self->aq_nic)); 583 } 584 } 585 586 void aq_ring_free(struct aq_ring_s *self) 587 { 588 if (!self) 589 return; 590 591 kfree(self->buff_ring); 592 593 if (self->dx_ring) 594 dma_free_coherent(aq_nic_get_dev(self->aq_nic), 595 self->size * self->dx_size, self->dx_ring, 596 self->dx_ring_pa); 597 } 598 599 unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data) 600 { 601 unsigned int count; 602 unsigned int start; 603 604 if (self->ring_type == ATL_RING_RX) { 605 /* This data should mimic aq_ethtool_queue_rx_stat_names structure */ 606 do { 607 count = 0; 608 start = u64_stats_fetch_begin_irq(&self->stats.rx.syncp); 609 data[count] = self->stats.rx.packets; 610 data[++count] = self->stats.rx.jumbo_packets; 611 data[++count] = self->stats.rx.lro_packets; 612 data[++count] = self->stats.rx.errors; 613 data[++count] = self->stats.rx.alloc_fails; 614 data[++count] = self->stats.rx.skb_alloc_fails; 615 data[++count] = self->stats.rx.polls; 616 } while (u64_stats_fetch_retry_irq(&self->stats.rx.syncp, start)); 617 } else { 618 /* This data should mimic aq_ethtool_queue_tx_stat_names structure */ 619 do { 620 count = 0; 621 start = u64_stats_fetch_begin_irq(&self->stats.tx.syncp); 622 data[count] = self->stats.tx.packets; 623 data[++count] = self->stats.tx.queue_restarts; 624 } while (u64_stats_fetch_retry_irq(&self->stats.tx.syncp, start)); 625 } 626 627 return ++count; 628 } 629