1 // SPDX-License-Identifier: (GPL-2.0 OR MIT) 2 /* Google virtual Ethernet (gve) driver 3 * 4 * Copyright (C) 2015-2021 Google, Inc. 5 */ 6 7 #include "gve.h" 8 #include "gve_adminq.h" 9 #include "gve_utils.h" 10 #include <linux/ip.h> 11 #include <linux/tcp.h> 12 #include <linux/vmalloc.h> 13 #include <linux/skbuff.h> 14 #include <net/xdp_sock_drv.h> 15 16 static inline void gve_tx_put_doorbell(struct gve_priv *priv, 17 struct gve_queue_resources *q_resources, 18 u32 val) 19 { 20 iowrite32be(val, &priv->db_bar2[be32_to_cpu(q_resources->db_index)]); 21 } 22 23 void gve_xdp_tx_flush(struct gve_priv *priv, u32 xdp_qid) 24 { 25 u32 tx_qid = gve_xdp_tx_queue_id(priv, xdp_qid); 26 struct gve_tx_ring *tx = &priv->tx[tx_qid]; 27 28 gve_tx_put_doorbell(priv, tx->q_resources, tx->req); 29 } 30 31 /* gvnic can only transmit from a Registered Segment. 32 * We copy skb payloads into the registered segment before writing Tx 33 * descriptors and ringing the Tx doorbell. 34 * 35 * gve_tx_fifo_* manages the Registered Segment as a FIFO - clients must 36 * free allocations in the order they were allocated. 37 */ 38 39 static int gve_tx_fifo_init(struct gve_priv *priv, struct gve_tx_fifo *fifo) 40 { 41 fifo->base = vmap(fifo->qpl->pages, fifo->qpl->num_entries, VM_MAP, 42 PAGE_KERNEL); 43 if (unlikely(!fifo->base)) { 44 netif_err(priv, drv, priv->dev, "Failed to vmap fifo, qpl_id = %d\n", 45 fifo->qpl->id); 46 return -ENOMEM; 47 } 48 49 fifo->size = fifo->qpl->num_entries * PAGE_SIZE; 50 atomic_set(&fifo->available, fifo->size); 51 fifo->head = 0; 52 return 0; 53 } 54 55 static void gve_tx_fifo_release(struct gve_priv *priv, struct gve_tx_fifo *fifo) 56 { 57 WARN(atomic_read(&fifo->available) != fifo->size, 58 "Releasing non-empty fifo"); 59 60 vunmap(fifo->base); 61 } 62 63 static int gve_tx_fifo_pad_alloc_one_frag(struct gve_tx_fifo *fifo, 64 size_t bytes) 65 { 66 return (fifo->head + bytes < fifo->size) ? 0 : fifo->size - fifo->head; 67 } 68 69 static bool gve_tx_fifo_can_alloc(struct gve_tx_fifo *fifo, size_t bytes) 70 { 71 return (atomic_read(&fifo->available) <= bytes) ? false : true; 72 } 73 74 /* gve_tx_alloc_fifo - Allocate fragment(s) from Tx FIFO 75 * @fifo: FIFO to allocate from 76 * @bytes: Allocation size 77 * @iov: Scatter-gather elements to fill with allocation fragment base/len 78 * 79 * Returns number of valid elements in iov[] or negative on error. 80 * 81 * Allocations from a given FIFO must be externally synchronized but concurrent 82 * allocation and frees are allowed. 83 */ 84 static int gve_tx_alloc_fifo(struct gve_tx_fifo *fifo, size_t bytes, 85 struct gve_tx_iovec iov[2]) 86 { 87 size_t overflow, padding; 88 u32 aligned_head; 89 int nfrags = 0; 90 91 if (!bytes) 92 return 0; 93 94 /* This check happens before we know how much padding is needed to 95 * align to a cacheline boundary for the payload, but that is fine, 96 * because the FIFO head always start aligned, and the FIFO's boundaries 97 * are aligned, so if there is space for the data, there is space for 98 * the padding to the next alignment. 99 */ 100 WARN(!gve_tx_fifo_can_alloc(fifo, bytes), 101 "Reached %s when there's not enough space in the fifo", __func__); 102 103 nfrags++; 104 105 iov[0].iov_offset = fifo->head; 106 iov[0].iov_len = bytes; 107 fifo->head += bytes; 108 109 if (fifo->head > fifo->size) { 110 /* If the allocation did not fit in the tail fragment of the 111 * FIFO, also use the head fragment. 112 */ 113 nfrags++; 114 overflow = fifo->head - fifo->size; 115 iov[0].iov_len -= overflow; 116 iov[1].iov_offset = 0; /* Start of fifo*/ 117 iov[1].iov_len = overflow; 118 119 fifo->head = overflow; 120 } 121 122 /* Re-align to a cacheline boundary */ 123 aligned_head = L1_CACHE_ALIGN(fifo->head); 124 padding = aligned_head - fifo->head; 125 iov[nfrags - 1].iov_padding = padding; 126 atomic_sub(bytes + padding, &fifo->available); 127 fifo->head = aligned_head; 128 129 if (fifo->head == fifo->size) 130 fifo->head = 0; 131 132 return nfrags; 133 } 134 135 /* gve_tx_free_fifo - Return space to Tx FIFO 136 * @fifo: FIFO to return fragments to 137 * @bytes: Bytes to free 138 */ 139 static void gve_tx_free_fifo(struct gve_tx_fifo *fifo, size_t bytes) 140 { 141 atomic_add(bytes, &fifo->available); 142 } 143 144 static size_t gve_tx_clear_buffer_state(struct gve_tx_buffer_state *info) 145 { 146 size_t space_freed = 0; 147 int i; 148 149 for (i = 0; i < ARRAY_SIZE(info->iov); i++) { 150 space_freed += info->iov[i].iov_len + info->iov[i].iov_padding; 151 info->iov[i].iov_len = 0; 152 info->iov[i].iov_padding = 0; 153 } 154 return space_freed; 155 } 156 157 static int gve_clean_xdp_done(struct gve_priv *priv, struct gve_tx_ring *tx, 158 u32 to_do) 159 { 160 struct gve_tx_buffer_state *info; 161 u64 pkts = 0, bytes = 0; 162 size_t space_freed = 0; 163 u32 xsk_complete = 0; 164 u32 idx; 165 int i; 166 167 for (i = 0; i < to_do; i++) { 168 idx = tx->done & tx->mask; 169 info = &tx->info[idx]; 170 tx->done++; 171 172 if (unlikely(!info->xdp.size)) 173 continue; 174 175 bytes += info->xdp.size; 176 pkts++; 177 xsk_complete += info->xdp.is_xsk; 178 179 info->xdp.size = 0; 180 if (info->xdp_frame) { 181 xdp_return_frame(info->xdp_frame); 182 info->xdp_frame = NULL; 183 } 184 space_freed += gve_tx_clear_buffer_state(info); 185 } 186 187 gve_tx_free_fifo(&tx->tx_fifo, space_freed); 188 if (xsk_complete > 0 && tx->xsk_pool) 189 xsk_tx_completed(tx->xsk_pool, xsk_complete); 190 u64_stats_update_begin(&tx->statss); 191 tx->bytes_done += bytes; 192 tx->pkt_done += pkts; 193 u64_stats_update_end(&tx->statss); 194 return pkts; 195 } 196 197 static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx, 198 u32 to_do, bool try_to_wake); 199 200 static void gve_tx_free_ring(struct gve_priv *priv, int idx) 201 { 202 struct gve_tx_ring *tx = &priv->tx[idx]; 203 struct device *hdev = &priv->pdev->dev; 204 size_t bytes; 205 u32 slots; 206 207 gve_tx_remove_from_block(priv, idx); 208 slots = tx->mask + 1; 209 if (tx->q_num < priv->tx_cfg.num_queues) { 210 gve_clean_tx_done(priv, tx, priv->tx_desc_cnt, false); 211 netdev_tx_reset_queue(tx->netdev_txq); 212 } else { 213 gve_clean_xdp_done(priv, tx, priv->tx_desc_cnt); 214 } 215 216 dma_free_coherent(hdev, sizeof(*tx->q_resources), 217 tx->q_resources, tx->q_resources_bus); 218 tx->q_resources = NULL; 219 220 if (!tx->raw_addressing) { 221 gve_tx_fifo_release(priv, &tx->tx_fifo); 222 gve_unassign_qpl(priv, tx->tx_fifo.qpl->id); 223 tx->tx_fifo.qpl = NULL; 224 } 225 226 bytes = sizeof(*tx->desc) * slots; 227 dma_free_coherent(hdev, bytes, tx->desc, tx->bus); 228 tx->desc = NULL; 229 230 vfree(tx->info); 231 tx->info = NULL; 232 233 netif_dbg(priv, drv, priv->dev, "freed tx queue %d\n", idx); 234 } 235 236 static int gve_tx_alloc_ring(struct gve_priv *priv, int idx) 237 { 238 struct gve_tx_ring *tx = &priv->tx[idx]; 239 struct device *hdev = &priv->pdev->dev; 240 u32 slots = priv->tx_desc_cnt; 241 size_t bytes; 242 243 /* Make sure everything is zeroed to start */ 244 memset(tx, 0, sizeof(*tx)); 245 spin_lock_init(&tx->clean_lock); 246 spin_lock_init(&tx->xdp_lock); 247 tx->q_num = idx; 248 249 tx->mask = slots - 1; 250 251 /* alloc metadata */ 252 tx->info = vcalloc(slots, sizeof(*tx->info)); 253 if (!tx->info) 254 return -ENOMEM; 255 256 /* alloc tx queue */ 257 bytes = sizeof(*tx->desc) * slots; 258 tx->desc = dma_alloc_coherent(hdev, bytes, &tx->bus, GFP_KERNEL); 259 if (!tx->desc) 260 goto abort_with_info; 261 262 tx->raw_addressing = priv->queue_format == GVE_GQI_RDA_FORMAT; 263 tx->dev = &priv->pdev->dev; 264 if (!tx->raw_addressing) { 265 tx->tx_fifo.qpl = gve_assign_tx_qpl(priv, idx); 266 if (!tx->tx_fifo.qpl) 267 goto abort_with_desc; 268 /* map Tx FIFO */ 269 if (gve_tx_fifo_init(priv, &tx->tx_fifo)) 270 goto abort_with_qpl; 271 } 272 273 tx->q_resources = 274 dma_alloc_coherent(hdev, 275 sizeof(*tx->q_resources), 276 &tx->q_resources_bus, 277 GFP_KERNEL); 278 if (!tx->q_resources) 279 goto abort_with_fifo; 280 281 netif_dbg(priv, drv, priv->dev, "tx[%d]->bus=%lx\n", idx, 282 (unsigned long)tx->bus); 283 if (idx < priv->tx_cfg.num_queues) 284 tx->netdev_txq = netdev_get_tx_queue(priv->dev, idx); 285 gve_tx_add_to_block(priv, idx); 286 287 return 0; 288 289 abort_with_fifo: 290 if (!tx->raw_addressing) 291 gve_tx_fifo_release(priv, &tx->tx_fifo); 292 abort_with_qpl: 293 if (!tx->raw_addressing) 294 gve_unassign_qpl(priv, tx->tx_fifo.qpl->id); 295 abort_with_desc: 296 dma_free_coherent(hdev, bytes, tx->desc, tx->bus); 297 tx->desc = NULL; 298 abort_with_info: 299 vfree(tx->info); 300 tx->info = NULL; 301 return -ENOMEM; 302 } 303 304 int gve_tx_alloc_rings(struct gve_priv *priv, int start_id, int num_rings) 305 { 306 int err = 0; 307 int i; 308 309 for (i = start_id; i < start_id + num_rings; i++) { 310 err = gve_tx_alloc_ring(priv, i); 311 if (err) { 312 netif_err(priv, drv, priv->dev, 313 "Failed to alloc tx ring=%d: err=%d\n", 314 i, err); 315 break; 316 } 317 } 318 /* Unallocate if there was an error */ 319 if (err) { 320 int j; 321 322 for (j = start_id; j < i; j++) 323 gve_tx_free_ring(priv, j); 324 } 325 return err; 326 } 327 328 void gve_tx_free_rings_gqi(struct gve_priv *priv, int start_id, int num_rings) 329 { 330 int i; 331 332 for (i = start_id; i < start_id + num_rings; i++) 333 gve_tx_free_ring(priv, i); 334 } 335 336 /* gve_tx_avail - Calculates the number of slots available in the ring 337 * @tx: tx ring to check 338 * 339 * Returns the number of slots available 340 * 341 * The capacity of the queue is mask + 1. We don't need to reserve an entry. 342 **/ 343 static inline u32 gve_tx_avail(struct gve_tx_ring *tx) 344 { 345 return tx->mask + 1 - (tx->req - tx->done); 346 } 347 348 static inline int gve_skb_fifo_bytes_required(struct gve_tx_ring *tx, 349 struct sk_buff *skb) 350 { 351 int pad_bytes, align_hdr_pad; 352 int bytes; 353 int hlen; 354 355 hlen = skb_is_gso(skb) ? skb_checksum_start_offset(skb) + tcp_hdrlen(skb) : 356 min_t(int, GVE_GQ_TX_MIN_PKT_DESC_BYTES, skb->len); 357 358 pad_bytes = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo, 359 hlen); 360 /* We need to take into account the header alignment padding. */ 361 align_hdr_pad = L1_CACHE_ALIGN(hlen) - hlen; 362 bytes = align_hdr_pad + pad_bytes + skb->len; 363 364 return bytes; 365 } 366 367 /* The most descriptors we could need is MAX_SKB_FRAGS + 4 : 368 * 1 for each skb frag 369 * 1 for the skb linear portion 370 * 1 for when tcp hdr needs to be in separate descriptor 371 * 1 if the payload wraps to the beginning of the FIFO 372 * 1 for metadata descriptor 373 */ 374 #define MAX_TX_DESC_NEEDED (MAX_SKB_FRAGS + 4) 375 static void gve_tx_unmap_buf(struct device *dev, struct gve_tx_buffer_state *info) 376 { 377 if (info->skb) { 378 dma_unmap_single(dev, dma_unmap_addr(info, dma), 379 dma_unmap_len(info, len), 380 DMA_TO_DEVICE); 381 dma_unmap_len_set(info, len, 0); 382 } else { 383 dma_unmap_page(dev, dma_unmap_addr(info, dma), 384 dma_unmap_len(info, len), 385 DMA_TO_DEVICE); 386 dma_unmap_len_set(info, len, 0); 387 } 388 } 389 390 /* Check if sufficient resources (descriptor ring space, FIFO space) are 391 * available to transmit the given number of bytes. 392 */ 393 static inline bool gve_can_tx(struct gve_tx_ring *tx, int bytes_required) 394 { 395 bool can_alloc = true; 396 397 if (!tx->raw_addressing) 398 can_alloc = gve_tx_fifo_can_alloc(&tx->tx_fifo, bytes_required); 399 400 return (gve_tx_avail(tx) >= MAX_TX_DESC_NEEDED && can_alloc); 401 } 402 403 static_assert(NAPI_POLL_WEIGHT >= MAX_TX_DESC_NEEDED); 404 405 /* Stops the queue if the skb cannot be transmitted. */ 406 static int gve_maybe_stop_tx(struct gve_priv *priv, struct gve_tx_ring *tx, 407 struct sk_buff *skb) 408 { 409 int bytes_required = 0; 410 u32 nic_done; 411 u32 to_do; 412 int ret; 413 414 if (!tx->raw_addressing) 415 bytes_required = gve_skb_fifo_bytes_required(tx, skb); 416 417 if (likely(gve_can_tx(tx, bytes_required))) 418 return 0; 419 420 ret = -EBUSY; 421 spin_lock(&tx->clean_lock); 422 nic_done = gve_tx_load_event_counter(priv, tx); 423 to_do = nic_done - tx->done; 424 425 /* Only try to clean if there is hope for TX */ 426 if (to_do + gve_tx_avail(tx) >= MAX_TX_DESC_NEEDED) { 427 if (to_do > 0) { 428 to_do = min_t(u32, to_do, NAPI_POLL_WEIGHT); 429 gve_clean_tx_done(priv, tx, to_do, false); 430 } 431 if (likely(gve_can_tx(tx, bytes_required))) 432 ret = 0; 433 } 434 if (ret) { 435 /* No space, so stop the queue */ 436 tx->stop_queue++; 437 netif_tx_stop_queue(tx->netdev_txq); 438 } 439 spin_unlock(&tx->clean_lock); 440 441 return ret; 442 } 443 444 static void gve_tx_fill_pkt_desc(union gve_tx_desc *pkt_desc, 445 u16 csum_offset, u8 ip_summed, bool is_gso, 446 int l4_hdr_offset, u32 desc_cnt, 447 u16 hlen, u64 addr, u16 pkt_len) 448 { 449 /* l4_hdr_offset and csum_offset are in units of 16-bit words */ 450 if (is_gso) { 451 pkt_desc->pkt.type_flags = GVE_TXD_TSO | GVE_TXF_L4CSUM; 452 pkt_desc->pkt.l4_csum_offset = csum_offset >> 1; 453 pkt_desc->pkt.l4_hdr_offset = l4_hdr_offset >> 1; 454 } else if (likely(ip_summed == CHECKSUM_PARTIAL)) { 455 pkt_desc->pkt.type_flags = GVE_TXD_STD | GVE_TXF_L4CSUM; 456 pkt_desc->pkt.l4_csum_offset = csum_offset >> 1; 457 pkt_desc->pkt.l4_hdr_offset = l4_hdr_offset >> 1; 458 } else { 459 pkt_desc->pkt.type_flags = GVE_TXD_STD; 460 pkt_desc->pkt.l4_csum_offset = 0; 461 pkt_desc->pkt.l4_hdr_offset = 0; 462 } 463 pkt_desc->pkt.desc_cnt = desc_cnt; 464 pkt_desc->pkt.len = cpu_to_be16(pkt_len); 465 pkt_desc->pkt.seg_len = cpu_to_be16(hlen); 466 pkt_desc->pkt.seg_addr = cpu_to_be64(addr); 467 } 468 469 static void gve_tx_fill_mtd_desc(union gve_tx_desc *mtd_desc, 470 struct sk_buff *skb) 471 { 472 BUILD_BUG_ON(sizeof(mtd_desc->mtd) != sizeof(mtd_desc->pkt)); 473 474 mtd_desc->mtd.type_flags = GVE_TXD_MTD | GVE_MTD_SUBTYPE_PATH; 475 mtd_desc->mtd.path_state = GVE_MTD_PATH_STATE_DEFAULT | 476 GVE_MTD_PATH_HASH_L4; 477 mtd_desc->mtd.path_hash = cpu_to_be32(skb->hash); 478 mtd_desc->mtd.reserved0 = 0; 479 mtd_desc->mtd.reserved1 = 0; 480 } 481 482 static void gve_tx_fill_seg_desc(union gve_tx_desc *seg_desc, 483 u16 l3_offset, u16 gso_size, 484 bool is_gso_v6, bool is_gso, 485 u16 len, u64 addr) 486 { 487 seg_desc->seg.type_flags = GVE_TXD_SEG; 488 if (is_gso) { 489 if (is_gso_v6) 490 seg_desc->seg.type_flags |= GVE_TXSF_IPV6; 491 seg_desc->seg.l3_offset = l3_offset >> 1; 492 seg_desc->seg.mss = cpu_to_be16(gso_size); 493 } 494 seg_desc->seg.seg_len = cpu_to_be16(len); 495 seg_desc->seg.seg_addr = cpu_to_be64(addr); 496 } 497 498 static void gve_dma_sync_for_device(struct device *dev, dma_addr_t *page_buses, 499 u64 iov_offset, u64 iov_len) 500 { 501 u64 last_page = (iov_offset + iov_len - 1) / PAGE_SIZE; 502 u64 first_page = iov_offset / PAGE_SIZE; 503 u64 page; 504 505 for (page = first_page; page <= last_page; page++) 506 dma_sync_single_for_device(dev, page_buses[page], PAGE_SIZE, DMA_TO_DEVICE); 507 } 508 509 static int gve_tx_add_skb_copy(struct gve_priv *priv, struct gve_tx_ring *tx, struct sk_buff *skb) 510 { 511 int pad_bytes, hlen, hdr_nfrags, payload_nfrags, l4_hdr_offset; 512 union gve_tx_desc *pkt_desc, *seg_desc; 513 struct gve_tx_buffer_state *info; 514 int mtd_desc_nr = !!skb->l4_hash; 515 bool is_gso = skb_is_gso(skb); 516 u32 idx = tx->req & tx->mask; 517 int payload_iov = 2; 518 int copy_offset; 519 u32 next_idx; 520 int i; 521 522 info = &tx->info[idx]; 523 pkt_desc = &tx->desc[idx]; 524 525 l4_hdr_offset = skb_checksum_start_offset(skb); 526 /* If the skb is gso, then we want the tcp header alone in the first segment 527 * otherwise we want the minimum required by the gVNIC spec. 528 */ 529 hlen = is_gso ? l4_hdr_offset + tcp_hdrlen(skb) : 530 min_t(int, GVE_GQ_TX_MIN_PKT_DESC_BYTES, skb->len); 531 532 info->skb = skb; 533 /* We don't want to split the header, so if necessary, pad to the end 534 * of the fifo and then put the header at the beginning of the fifo. 535 */ 536 pad_bytes = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo, hlen); 537 hdr_nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, hlen + pad_bytes, 538 &info->iov[0]); 539 WARN(!hdr_nfrags, "hdr_nfrags should never be 0!"); 540 payload_nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, skb->len - hlen, 541 &info->iov[payload_iov]); 542 543 gve_tx_fill_pkt_desc(pkt_desc, skb->csum_offset, skb->ip_summed, 544 is_gso, l4_hdr_offset, 545 1 + mtd_desc_nr + payload_nfrags, hlen, 546 info->iov[hdr_nfrags - 1].iov_offset, skb->len); 547 548 skb_copy_bits(skb, 0, 549 tx->tx_fifo.base + info->iov[hdr_nfrags - 1].iov_offset, 550 hlen); 551 gve_dma_sync_for_device(&priv->pdev->dev, tx->tx_fifo.qpl->page_buses, 552 info->iov[hdr_nfrags - 1].iov_offset, 553 info->iov[hdr_nfrags - 1].iov_len); 554 copy_offset = hlen; 555 556 if (mtd_desc_nr) { 557 next_idx = (tx->req + 1) & tx->mask; 558 gve_tx_fill_mtd_desc(&tx->desc[next_idx], skb); 559 } 560 561 for (i = payload_iov; i < payload_nfrags + payload_iov; i++) { 562 next_idx = (tx->req + 1 + mtd_desc_nr + i - payload_iov) & tx->mask; 563 seg_desc = &tx->desc[next_idx]; 564 565 gve_tx_fill_seg_desc(seg_desc, skb_network_offset(skb), 566 skb_shinfo(skb)->gso_size, 567 skb_is_gso_v6(skb), is_gso, 568 info->iov[i].iov_len, 569 info->iov[i].iov_offset); 570 571 skb_copy_bits(skb, copy_offset, 572 tx->tx_fifo.base + info->iov[i].iov_offset, 573 info->iov[i].iov_len); 574 gve_dma_sync_for_device(&priv->pdev->dev, tx->tx_fifo.qpl->page_buses, 575 info->iov[i].iov_offset, 576 info->iov[i].iov_len); 577 copy_offset += info->iov[i].iov_len; 578 } 579 580 return 1 + mtd_desc_nr + payload_nfrags; 581 } 582 583 static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx, 584 struct sk_buff *skb) 585 { 586 const struct skb_shared_info *shinfo = skb_shinfo(skb); 587 int hlen, num_descriptors, l4_hdr_offset; 588 union gve_tx_desc *pkt_desc, *mtd_desc, *seg_desc; 589 struct gve_tx_buffer_state *info; 590 int mtd_desc_nr = !!skb->l4_hash; 591 bool is_gso = skb_is_gso(skb); 592 u32 idx = tx->req & tx->mask; 593 u64 addr; 594 u32 len; 595 int i; 596 597 info = &tx->info[idx]; 598 pkt_desc = &tx->desc[idx]; 599 600 l4_hdr_offset = skb_checksum_start_offset(skb); 601 /* If the skb is gso, then we want only up to the tcp header in the first segment 602 * to efficiently replicate on each segment otherwise we want the linear portion 603 * of the skb (which will contain the checksum because skb->csum_start and 604 * skb->csum_offset are given relative to skb->head) in the first segment. 605 */ 606 hlen = is_gso ? l4_hdr_offset + tcp_hdrlen(skb) : skb_headlen(skb); 607 len = skb_headlen(skb); 608 609 info->skb = skb; 610 611 addr = dma_map_single(tx->dev, skb->data, len, DMA_TO_DEVICE); 612 if (unlikely(dma_mapping_error(tx->dev, addr))) { 613 tx->dma_mapping_error++; 614 goto drop; 615 } 616 dma_unmap_len_set(info, len, len); 617 dma_unmap_addr_set(info, dma, addr); 618 619 num_descriptors = 1 + shinfo->nr_frags; 620 if (hlen < len) 621 num_descriptors++; 622 if (mtd_desc_nr) 623 num_descriptors++; 624 625 gve_tx_fill_pkt_desc(pkt_desc, skb->csum_offset, skb->ip_summed, 626 is_gso, l4_hdr_offset, 627 num_descriptors, hlen, addr, skb->len); 628 629 if (mtd_desc_nr) { 630 idx = (idx + 1) & tx->mask; 631 mtd_desc = &tx->desc[idx]; 632 gve_tx_fill_mtd_desc(mtd_desc, skb); 633 } 634 635 if (hlen < len) { 636 /* For gso the rest of the linear portion of the skb needs to 637 * be in its own descriptor. 638 */ 639 len -= hlen; 640 addr += hlen; 641 idx = (idx + 1) & tx->mask; 642 seg_desc = &tx->desc[idx]; 643 gve_tx_fill_seg_desc(seg_desc, skb_network_offset(skb), 644 skb_shinfo(skb)->gso_size, 645 skb_is_gso_v6(skb), is_gso, len, addr); 646 } 647 648 for (i = 0; i < shinfo->nr_frags; i++) { 649 const skb_frag_t *frag = &shinfo->frags[i]; 650 651 idx = (idx + 1) & tx->mask; 652 seg_desc = &tx->desc[idx]; 653 len = skb_frag_size(frag); 654 addr = skb_frag_dma_map(tx->dev, frag, 0, len, DMA_TO_DEVICE); 655 if (unlikely(dma_mapping_error(tx->dev, addr))) { 656 tx->dma_mapping_error++; 657 goto unmap_drop; 658 } 659 tx->info[idx].skb = NULL; 660 dma_unmap_len_set(&tx->info[idx], len, len); 661 dma_unmap_addr_set(&tx->info[idx], dma, addr); 662 663 gve_tx_fill_seg_desc(seg_desc, skb_network_offset(skb), 664 skb_shinfo(skb)->gso_size, 665 skb_is_gso_v6(skb), is_gso, len, addr); 666 } 667 668 return num_descriptors; 669 670 unmap_drop: 671 i += num_descriptors - shinfo->nr_frags; 672 while (i--) { 673 /* Skip metadata descriptor, if set */ 674 if (i == 1 && mtd_desc_nr == 1) 675 continue; 676 idx--; 677 gve_tx_unmap_buf(tx->dev, &tx->info[idx & tx->mask]); 678 } 679 drop: 680 tx->dropped_pkt++; 681 return 0; 682 } 683 684 netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev) 685 { 686 struct gve_priv *priv = netdev_priv(dev); 687 struct gve_tx_ring *tx; 688 int nsegs; 689 690 WARN(skb_get_queue_mapping(skb) >= priv->tx_cfg.num_queues, 691 "skb queue index out of range"); 692 tx = &priv->tx[skb_get_queue_mapping(skb)]; 693 if (unlikely(gve_maybe_stop_tx(priv, tx, skb))) { 694 /* We need to ring the txq doorbell -- we have stopped the Tx 695 * queue for want of resources, but prior calls to gve_tx() 696 * may have added descriptors without ringing the doorbell. 697 */ 698 699 gve_tx_put_doorbell(priv, tx->q_resources, tx->req); 700 return NETDEV_TX_BUSY; 701 } 702 if (tx->raw_addressing) 703 nsegs = gve_tx_add_skb_no_copy(priv, tx, skb); 704 else 705 nsegs = gve_tx_add_skb_copy(priv, tx, skb); 706 707 /* If the packet is getting sent, we need to update the skb */ 708 if (nsegs) { 709 netdev_tx_sent_queue(tx->netdev_txq, skb->len); 710 skb_tx_timestamp(skb); 711 tx->req += nsegs; 712 } else { 713 dev_kfree_skb_any(skb); 714 } 715 716 if (!netif_xmit_stopped(tx->netdev_txq) && netdev_xmit_more()) 717 return NETDEV_TX_OK; 718 719 /* Give packets to NIC. Even if this packet failed to send the doorbell 720 * might need to be rung because of xmit_more. 721 */ 722 gve_tx_put_doorbell(priv, tx->q_resources, tx->req); 723 return NETDEV_TX_OK; 724 } 725 726 static int gve_tx_fill_xdp(struct gve_priv *priv, struct gve_tx_ring *tx, 727 void *data, int len, void *frame_p, bool is_xsk) 728 { 729 int pad, nfrags, ndescs, iovi, offset; 730 struct gve_tx_buffer_state *info; 731 u32 reqi = tx->req; 732 733 pad = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo, len); 734 if (pad >= GVE_GQ_TX_MIN_PKT_DESC_BYTES) 735 pad = 0; 736 info = &tx->info[reqi & tx->mask]; 737 info->xdp_frame = frame_p; 738 info->xdp.size = len; 739 info->xdp.is_xsk = is_xsk; 740 741 nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, pad + len, 742 &info->iov[0]); 743 iovi = pad > 0; 744 ndescs = nfrags - iovi; 745 offset = 0; 746 747 while (iovi < nfrags) { 748 if (!offset) 749 gve_tx_fill_pkt_desc(&tx->desc[reqi & tx->mask], 0, 750 CHECKSUM_NONE, false, 0, ndescs, 751 info->iov[iovi].iov_len, 752 info->iov[iovi].iov_offset, len); 753 else 754 gve_tx_fill_seg_desc(&tx->desc[reqi & tx->mask], 755 0, 0, false, false, 756 info->iov[iovi].iov_len, 757 info->iov[iovi].iov_offset); 758 759 memcpy(tx->tx_fifo.base + info->iov[iovi].iov_offset, 760 data + offset, info->iov[iovi].iov_len); 761 gve_dma_sync_for_device(&priv->pdev->dev, 762 tx->tx_fifo.qpl->page_buses, 763 info->iov[iovi].iov_offset, 764 info->iov[iovi].iov_len); 765 offset += info->iov[iovi].iov_len; 766 iovi++; 767 reqi++; 768 } 769 770 return ndescs; 771 } 772 773 int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, 774 u32 flags) 775 { 776 struct gve_priv *priv = netdev_priv(dev); 777 struct gve_tx_ring *tx; 778 int i, err = 0, qid; 779 780 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 781 return -EINVAL; 782 783 qid = gve_xdp_tx_queue_id(priv, 784 smp_processor_id() % priv->num_xdp_queues); 785 786 tx = &priv->tx[qid]; 787 788 spin_lock(&tx->xdp_lock); 789 for (i = 0; i < n; i++) { 790 err = gve_xdp_xmit_one(priv, tx, frames[i]->data, 791 frames[i]->len, frames[i]); 792 if (err) 793 break; 794 } 795 796 if (flags & XDP_XMIT_FLUSH) 797 gve_tx_put_doorbell(priv, tx->q_resources, tx->req); 798 799 spin_unlock(&tx->xdp_lock); 800 801 u64_stats_update_begin(&tx->statss); 802 tx->xdp_xmit += n; 803 tx->xdp_xmit_errors += n - i; 804 u64_stats_update_end(&tx->statss); 805 806 return i ? i : err; 807 } 808 809 int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx, 810 void *data, int len, void *frame_p) 811 { 812 int nsegs; 813 814 if (!gve_can_tx(tx, len + GVE_GQ_TX_MIN_PKT_DESC_BYTES - 1)) 815 return -EBUSY; 816 817 nsegs = gve_tx_fill_xdp(priv, tx, data, len, frame_p, false); 818 tx->req += nsegs; 819 820 return 0; 821 } 822 823 #define GVE_TX_START_THRESH PAGE_SIZE 824 825 static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx, 826 u32 to_do, bool try_to_wake) 827 { 828 struct gve_tx_buffer_state *info; 829 u64 pkts = 0, bytes = 0; 830 size_t space_freed = 0; 831 struct sk_buff *skb; 832 u32 idx; 833 int j; 834 835 for (j = 0; j < to_do; j++) { 836 idx = tx->done & tx->mask; 837 netif_info(priv, tx_done, priv->dev, 838 "[%d] %s: idx=%d (req=%u done=%u)\n", 839 tx->q_num, __func__, idx, tx->req, tx->done); 840 info = &tx->info[idx]; 841 skb = info->skb; 842 843 /* Unmap the buffer */ 844 if (tx->raw_addressing) 845 gve_tx_unmap_buf(tx->dev, info); 846 tx->done++; 847 /* Mark as free */ 848 if (skb) { 849 info->skb = NULL; 850 bytes += skb->len; 851 pkts++; 852 dev_consume_skb_any(skb); 853 if (tx->raw_addressing) 854 continue; 855 space_freed += gve_tx_clear_buffer_state(info); 856 } 857 } 858 859 if (!tx->raw_addressing) 860 gve_tx_free_fifo(&tx->tx_fifo, space_freed); 861 u64_stats_update_begin(&tx->statss); 862 tx->bytes_done += bytes; 863 tx->pkt_done += pkts; 864 u64_stats_update_end(&tx->statss); 865 netdev_tx_completed_queue(tx->netdev_txq, pkts, bytes); 866 867 /* start the queue if we've stopped it */ 868 #ifndef CONFIG_BQL 869 /* Make sure that the doorbells are synced */ 870 smp_mb(); 871 #endif 872 if (try_to_wake && netif_tx_queue_stopped(tx->netdev_txq) && 873 likely(gve_can_tx(tx, GVE_TX_START_THRESH))) { 874 tx->wake_queue++; 875 netif_tx_wake_queue(tx->netdev_txq); 876 } 877 878 return pkts; 879 } 880 881 u32 gve_tx_load_event_counter(struct gve_priv *priv, 882 struct gve_tx_ring *tx) 883 { 884 u32 counter_index = be32_to_cpu(tx->q_resources->counter_index); 885 __be32 counter = READ_ONCE(priv->counter_array[counter_index]); 886 887 return be32_to_cpu(counter); 888 } 889 890 static int gve_xsk_tx(struct gve_priv *priv, struct gve_tx_ring *tx, 891 int budget) 892 { 893 struct xdp_desc desc; 894 int sent = 0, nsegs; 895 void *data; 896 897 spin_lock(&tx->xdp_lock); 898 while (sent < budget) { 899 if (!gve_can_tx(tx, GVE_TX_START_THRESH)) 900 goto out; 901 902 if (!xsk_tx_peek_desc(tx->xsk_pool, &desc)) { 903 tx->xdp_xsk_done = tx->xdp_xsk_wakeup; 904 goto out; 905 } 906 907 data = xsk_buff_raw_get_data(tx->xsk_pool, desc.addr); 908 nsegs = gve_tx_fill_xdp(priv, tx, data, desc.len, NULL, true); 909 tx->req += nsegs; 910 sent++; 911 } 912 out: 913 if (sent > 0) { 914 gve_tx_put_doorbell(priv, tx->q_resources, tx->req); 915 xsk_tx_release(tx->xsk_pool); 916 } 917 spin_unlock(&tx->xdp_lock); 918 return sent; 919 } 920 921 bool gve_xdp_poll(struct gve_notify_block *block, int budget) 922 { 923 struct gve_priv *priv = block->priv; 924 struct gve_tx_ring *tx = block->tx; 925 u32 nic_done; 926 bool repoll; 927 u32 to_do; 928 929 /* Find out how much work there is to be done */ 930 nic_done = gve_tx_load_event_counter(priv, tx); 931 to_do = min_t(u32, (nic_done - tx->done), budget); 932 gve_clean_xdp_done(priv, tx, to_do); 933 repoll = nic_done != tx->done; 934 935 if (tx->xsk_pool) { 936 int sent = gve_xsk_tx(priv, tx, budget); 937 938 u64_stats_update_begin(&tx->statss); 939 tx->xdp_xsk_sent += sent; 940 u64_stats_update_end(&tx->statss); 941 repoll |= (sent == budget); 942 if (xsk_uses_need_wakeup(tx->xsk_pool)) 943 xsk_set_tx_need_wakeup(tx->xsk_pool); 944 } 945 946 /* If we still have work we want to repoll */ 947 return repoll; 948 } 949 950 bool gve_tx_poll(struct gve_notify_block *block, int budget) 951 { 952 struct gve_priv *priv = block->priv; 953 struct gve_tx_ring *tx = block->tx; 954 u32 nic_done; 955 u32 to_do; 956 957 /* If budget is 0, do all the work */ 958 if (budget == 0) 959 budget = INT_MAX; 960 961 /* In TX path, it may try to clean completed pkts in order to xmit, 962 * to avoid cleaning conflict, use spin_lock(), it yields better 963 * concurrency between xmit/clean than netif's lock. 964 */ 965 spin_lock(&tx->clean_lock); 966 /* Find out how much work there is to be done */ 967 nic_done = gve_tx_load_event_counter(priv, tx); 968 to_do = min_t(u32, (nic_done - tx->done), budget); 969 gve_clean_tx_done(priv, tx, to_do, true); 970 spin_unlock(&tx->clean_lock); 971 /* If we still have work we want to repoll */ 972 return nic_done != tx->done; 973 } 974 975 bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx) 976 { 977 u32 nic_done = gve_tx_load_event_counter(priv, tx); 978 979 return nic_done != tx->done; 980 } 981