1 // SPDX-License-Identifier: (GPL-2.0 OR MIT) 2 /* Google virtual Ethernet (gve) driver 3 * 4 * Copyright (C) 2015-2019 Google, Inc. 5 */ 6 7 #include "gve.h" 8 #include "gve_adminq.h" 9 #include <linux/ip.h> 10 #include <linux/tcp.h> 11 #include <linux/vmalloc.h> 12 #include <linux/skbuff.h> 13 14 static inline void gve_tx_put_doorbell(struct gve_priv *priv, 15 struct gve_queue_resources *q_resources, 16 u32 val) 17 { 18 iowrite32be(val, &priv->db_bar2[be32_to_cpu(q_resources->db_index)]); 19 } 20 21 /* gvnic can only transmit from a Registered Segment. 22 * We copy skb payloads into the registered segment before writing Tx 23 * descriptors and ringing the Tx doorbell. 24 * 25 * gve_tx_fifo_* manages the Registered Segment as a FIFO - clients must 26 * free allocations in the order they were allocated. 27 */ 28 29 static int gve_tx_fifo_init(struct gve_priv *priv, struct gve_tx_fifo *fifo) 30 { 31 fifo->base = vmap(fifo->qpl->pages, fifo->qpl->num_entries, VM_MAP, 32 PAGE_KERNEL); 33 if (unlikely(!fifo->base)) { 34 netif_err(priv, drv, priv->dev, "Failed to vmap fifo, qpl_id = %d\n", 35 fifo->qpl->id); 36 return -ENOMEM; 37 } 38 39 fifo->size = fifo->qpl->num_entries * PAGE_SIZE; 40 atomic_set(&fifo->available, fifo->size); 41 fifo->head = 0; 42 return 0; 43 } 44 45 static void gve_tx_fifo_release(struct gve_priv *priv, struct gve_tx_fifo *fifo) 46 { 47 WARN(atomic_read(&fifo->available) != fifo->size, 48 "Releasing non-empty fifo"); 49 50 vunmap(fifo->base); 51 } 52 53 static int gve_tx_fifo_pad_alloc_one_frag(struct gve_tx_fifo *fifo, 54 size_t bytes) 55 { 56 return (fifo->head + bytes < fifo->size) ? 0 : fifo->size - fifo->head; 57 } 58 59 static bool gve_tx_fifo_can_alloc(struct gve_tx_fifo *fifo, size_t bytes) 60 { 61 return (atomic_read(&fifo->available) <= bytes) ? false : true; 62 } 63 64 /* gve_tx_alloc_fifo - Allocate fragment(s) from Tx FIFO 65 * @fifo: FIFO to allocate from 66 * @bytes: Allocation size 67 * @iov: Scatter-gather elements to fill with allocation fragment base/len 68 * 69 * Returns number of valid elements in iov[] or negative on error. 70 * 71 * Allocations from a given FIFO must be externally synchronized but concurrent 72 * allocation and frees are allowed. 73 */ 74 static int gve_tx_alloc_fifo(struct gve_tx_fifo *fifo, size_t bytes, 75 struct gve_tx_iovec iov[2]) 76 { 77 size_t overflow, padding; 78 u32 aligned_head; 79 int nfrags = 0; 80 81 if (!bytes) 82 return 0; 83 84 /* This check happens before we know how much padding is needed to 85 * align to a cacheline boundary for the payload, but that is fine, 86 * because the FIFO head always start aligned, and the FIFO's boundaries 87 * are aligned, so if there is space for the data, there is space for 88 * the padding to the next alignment. 89 */ 90 WARN(!gve_tx_fifo_can_alloc(fifo, bytes), 91 "Reached %s when there's not enough space in the fifo", __func__); 92 93 nfrags++; 94 95 iov[0].iov_offset = fifo->head; 96 iov[0].iov_len = bytes; 97 fifo->head += bytes; 98 99 if (fifo->head > fifo->size) { 100 /* If the allocation did not fit in the tail fragment of the 101 * FIFO, also use the head fragment. 102 */ 103 nfrags++; 104 overflow = fifo->head - fifo->size; 105 iov[0].iov_len -= overflow; 106 iov[1].iov_offset = 0; /* Start of fifo*/ 107 iov[1].iov_len = overflow; 108 109 fifo->head = overflow; 110 } 111 112 /* Re-align to a cacheline boundary */ 113 aligned_head = L1_CACHE_ALIGN(fifo->head); 114 padding = aligned_head - fifo->head; 115 iov[nfrags - 1].iov_padding = padding; 116 atomic_sub(bytes + padding, &fifo->available); 117 fifo->head = aligned_head; 118 119 if (fifo->head == fifo->size) 120 fifo->head = 0; 121 122 return nfrags; 123 } 124 125 /* gve_tx_free_fifo - Return space to Tx FIFO 126 * @fifo: FIFO to return fragments to 127 * @bytes: Bytes to free 128 */ 129 static void gve_tx_free_fifo(struct gve_tx_fifo *fifo, size_t bytes) 130 { 131 atomic_add(bytes, &fifo->available); 132 } 133 134 static void gve_tx_remove_from_block(struct gve_priv *priv, int queue_idx) 135 { 136 struct gve_notify_block *block = 137 &priv->ntfy_blocks[gve_tx_idx_to_ntfy(priv, queue_idx)]; 138 139 block->tx = NULL; 140 } 141 142 static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx, 143 u32 to_do, bool try_to_wake); 144 145 static void gve_tx_free_ring(struct gve_priv *priv, int idx) 146 { 147 struct gve_tx_ring *tx = &priv->tx[idx]; 148 struct device *hdev = &priv->pdev->dev; 149 size_t bytes; 150 u32 slots; 151 152 gve_tx_remove_from_block(priv, idx); 153 slots = tx->mask + 1; 154 gve_clean_tx_done(priv, tx, tx->req, false); 155 netdev_tx_reset_queue(tx->netdev_txq); 156 157 dma_free_coherent(hdev, sizeof(*tx->q_resources), 158 tx->q_resources, tx->q_resources_bus); 159 tx->q_resources = NULL; 160 161 if (!tx->raw_addressing) { 162 gve_tx_fifo_release(priv, &tx->tx_fifo); 163 gve_unassign_qpl(priv, tx->tx_fifo.qpl->id); 164 tx->tx_fifo.qpl = NULL; 165 } 166 167 bytes = sizeof(*tx->desc) * slots; 168 dma_free_coherent(hdev, bytes, tx->desc, tx->bus); 169 tx->desc = NULL; 170 171 vfree(tx->info); 172 tx->info = NULL; 173 174 netif_dbg(priv, drv, priv->dev, "freed tx queue %d\n", idx); 175 } 176 177 static void gve_tx_add_to_block(struct gve_priv *priv, int queue_idx) 178 { 179 int ntfy_idx = gve_tx_idx_to_ntfy(priv, queue_idx); 180 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; 181 struct gve_tx_ring *tx = &priv->tx[queue_idx]; 182 183 block->tx = tx; 184 tx->ntfy_id = ntfy_idx; 185 } 186 187 static int gve_tx_alloc_ring(struct gve_priv *priv, int idx) 188 { 189 struct gve_tx_ring *tx = &priv->tx[idx]; 190 struct device *hdev = &priv->pdev->dev; 191 u32 slots = priv->tx_desc_cnt; 192 size_t bytes; 193 194 /* Make sure everything is zeroed to start */ 195 memset(tx, 0, sizeof(*tx)); 196 tx->q_num = idx; 197 198 tx->mask = slots - 1; 199 200 /* alloc metadata */ 201 tx->info = vzalloc(sizeof(*tx->info) * slots); 202 if (!tx->info) 203 return -ENOMEM; 204 205 /* alloc tx queue */ 206 bytes = sizeof(*tx->desc) * slots; 207 tx->desc = dma_alloc_coherent(hdev, bytes, &tx->bus, GFP_KERNEL); 208 if (!tx->desc) 209 goto abort_with_info; 210 211 tx->raw_addressing = priv->raw_addressing; 212 tx->dev = &priv->pdev->dev; 213 if (!tx->raw_addressing) { 214 tx->tx_fifo.qpl = gve_assign_tx_qpl(priv); 215 if (!tx->tx_fifo.qpl) 216 goto abort_with_desc; 217 /* map Tx FIFO */ 218 if (gve_tx_fifo_init(priv, &tx->tx_fifo)) 219 goto abort_with_qpl; 220 } 221 222 tx->q_resources = 223 dma_alloc_coherent(hdev, 224 sizeof(*tx->q_resources), 225 &tx->q_resources_bus, 226 GFP_KERNEL); 227 if (!tx->q_resources) 228 goto abort_with_fifo; 229 230 netif_dbg(priv, drv, priv->dev, "tx[%d]->bus=%lx\n", idx, 231 (unsigned long)tx->bus); 232 tx->netdev_txq = netdev_get_tx_queue(priv->dev, idx); 233 gve_tx_add_to_block(priv, idx); 234 235 return 0; 236 237 abort_with_fifo: 238 if (!tx->raw_addressing) 239 gve_tx_fifo_release(priv, &tx->tx_fifo); 240 abort_with_qpl: 241 if (!tx->raw_addressing) 242 gve_unassign_qpl(priv, tx->tx_fifo.qpl->id); 243 abort_with_desc: 244 dma_free_coherent(hdev, bytes, tx->desc, tx->bus); 245 tx->desc = NULL; 246 abort_with_info: 247 vfree(tx->info); 248 tx->info = NULL; 249 return -ENOMEM; 250 } 251 252 int gve_tx_alloc_rings(struct gve_priv *priv) 253 { 254 int err = 0; 255 int i; 256 257 for (i = 0; i < priv->tx_cfg.num_queues; i++) { 258 err = gve_tx_alloc_ring(priv, i); 259 if (err) { 260 netif_err(priv, drv, priv->dev, 261 "Failed to alloc tx ring=%d: err=%d\n", 262 i, err); 263 break; 264 } 265 } 266 /* Unallocate if there was an error */ 267 if (err) { 268 int j; 269 270 for (j = 0; j < i; j++) 271 gve_tx_free_ring(priv, j); 272 } 273 return err; 274 } 275 276 void gve_tx_free_rings(struct gve_priv *priv) 277 { 278 int i; 279 280 for (i = 0; i < priv->tx_cfg.num_queues; i++) 281 gve_tx_free_ring(priv, i); 282 } 283 284 /* gve_tx_avail - Calculates the number of slots available in the ring 285 * @tx: tx ring to check 286 * 287 * Returns the number of slots available 288 * 289 * The capacity of the queue is mask + 1. We don't need to reserve an entry. 290 **/ 291 static inline u32 gve_tx_avail(struct gve_tx_ring *tx) 292 { 293 return tx->mask + 1 - (tx->req - tx->done); 294 } 295 296 static inline int gve_skb_fifo_bytes_required(struct gve_tx_ring *tx, 297 struct sk_buff *skb) 298 { 299 int pad_bytes, align_hdr_pad; 300 int bytes; 301 int hlen; 302 303 hlen = skb_is_gso(skb) ? skb_checksum_start_offset(skb) + 304 tcp_hdrlen(skb) : skb_headlen(skb); 305 306 pad_bytes = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo, 307 hlen); 308 /* We need to take into account the header alignment padding. */ 309 align_hdr_pad = L1_CACHE_ALIGN(hlen) - hlen; 310 bytes = align_hdr_pad + pad_bytes + skb->len; 311 312 return bytes; 313 } 314 315 /* The most descriptors we could need is MAX_SKB_FRAGS + 3 : 1 for each skb frag, 316 * +1 for the skb linear portion, +1 for when tcp hdr needs to be in separate descriptor, 317 * and +1 if the payload wraps to the beginning of the FIFO. 318 */ 319 #define MAX_TX_DESC_NEEDED (MAX_SKB_FRAGS + 3) 320 static void gve_tx_unmap_buf(struct device *dev, struct gve_tx_buffer_state *info) 321 { 322 if (info->skb) { 323 dma_unmap_single(dev, dma_unmap_addr(&info->buf, dma), 324 dma_unmap_len(&info->buf, len), 325 DMA_TO_DEVICE); 326 dma_unmap_len_set(&info->buf, len, 0); 327 } else { 328 dma_unmap_page(dev, dma_unmap_addr(&info->buf, dma), 329 dma_unmap_len(&info->buf, len), 330 DMA_TO_DEVICE); 331 dma_unmap_len_set(&info->buf, len, 0); 332 } 333 } 334 335 /* Check if sufficient resources (descriptor ring space, FIFO space) are 336 * available to transmit the given number of bytes. 337 */ 338 static inline bool gve_can_tx(struct gve_tx_ring *tx, int bytes_required) 339 { 340 bool can_alloc = true; 341 342 if (!tx->raw_addressing) 343 can_alloc = gve_tx_fifo_can_alloc(&tx->tx_fifo, bytes_required); 344 345 return (gve_tx_avail(tx) >= MAX_TX_DESC_NEEDED && can_alloc); 346 } 347 348 /* Stops the queue if the skb cannot be transmitted. */ 349 static int gve_maybe_stop_tx(struct gve_tx_ring *tx, struct sk_buff *skb) 350 { 351 int bytes_required = 0; 352 353 if (!tx->raw_addressing) 354 bytes_required = gve_skb_fifo_bytes_required(tx, skb); 355 356 if (likely(gve_can_tx(tx, bytes_required))) 357 return 0; 358 359 /* No space, so stop the queue */ 360 tx->stop_queue++; 361 netif_tx_stop_queue(tx->netdev_txq); 362 smp_mb(); /* sync with restarting queue in gve_clean_tx_done() */ 363 364 /* Now check for resources again, in case gve_clean_tx_done() freed 365 * resources after we checked and we stopped the queue after 366 * gve_clean_tx_done() checked. 367 * 368 * gve_maybe_stop_tx() gve_clean_tx_done() 369 * nsegs/can_alloc test failed 370 * gve_tx_free_fifo() 371 * if (tx queue stopped) 372 * netif_tx_queue_wake() 373 * netif_tx_stop_queue() 374 * Need to check again for space here! 375 */ 376 if (likely(!gve_can_tx(tx, bytes_required))) 377 return -EBUSY; 378 379 netif_tx_start_queue(tx->netdev_txq); 380 tx->wake_queue++; 381 return 0; 382 } 383 384 static void gve_tx_fill_pkt_desc(union gve_tx_desc *pkt_desc, 385 struct sk_buff *skb, bool is_gso, 386 int l4_hdr_offset, u32 desc_cnt, 387 u16 hlen, u64 addr) 388 { 389 /* l4_hdr_offset and csum_offset are in units of 16-bit words */ 390 if (is_gso) { 391 pkt_desc->pkt.type_flags = GVE_TXD_TSO | GVE_TXF_L4CSUM; 392 pkt_desc->pkt.l4_csum_offset = skb->csum_offset >> 1; 393 pkt_desc->pkt.l4_hdr_offset = l4_hdr_offset >> 1; 394 } else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { 395 pkt_desc->pkt.type_flags = GVE_TXD_STD | GVE_TXF_L4CSUM; 396 pkt_desc->pkt.l4_csum_offset = skb->csum_offset >> 1; 397 pkt_desc->pkt.l4_hdr_offset = l4_hdr_offset >> 1; 398 } else { 399 pkt_desc->pkt.type_flags = GVE_TXD_STD; 400 pkt_desc->pkt.l4_csum_offset = 0; 401 pkt_desc->pkt.l4_hdr_offset = 0; 402 } 403 pkt_desc->pkt.desc_cnt = desc_cnt; 404 pkt_desc->pkt.len = cpu_to_be16(skb->len); 405 pkt_desc->pkt.seg_len = cpu_to_be16(hlen); 406 pkt_desc->pkt.seg_addr = cpu_to_be64(addr); 407 } 408 409 static void gve_tx_fill_seg_desc(union gve_tx_desc *seg_desc, 410 struct sk_buff *skb, bool is_gso, 411 u16 len, u64 addr) 412 { 413 seg_desc->seg.type_flags = GVE_TXD_SEG; 414 if (is_gso) { 415 if (skb_is_gso_v6(skb)) 416 seg_desc->seg.type_flags |= GVE_TXSF_IPV6; 417 seg_desc->seg.l3_offset = skb_network_offset(skb) >> 1; 418 seg_desc->seg.mss = cpu_to_be16(skb_shinfo(skb)->gso_size); 419 } 420 seg_desc->seg.seg_len = cpu_to_be16(len); 421 seg_desc->seg.seg_addr = cpu_to_be64(addr); 422 } 423 424 static void gve_dma_sync_for_device(struct device *dev, dma_addr_t *page_buses, 425 u64 iov_offset, u64 iov_len) 426 { 427 u64 last_page = (iov_offset + iov_len - 1) / PAGE_SIZE; 428 u64 first_page = iov_offset / PAGE_SIZE; 429 u64 page; 430 431 for (page = first_page; page <= last_page; page++) 432 dma_sync_single_for_device(dev, page_buses[page], PAGE_SIZE, DMA_TO_DEVICE); 433 } 434 435 static int gve_tx_add_skb_copy(struct gve_priv *priv, struct gve_tx_ring *tx, struct sk_buff *skb) 436 { 437 int pad_bytes, hlen, hdr_nfrags, payload_nfrags, l4_hdr_offset; 438 union gve_tx_desc *pkt_desc, *seg_desc; 439 struct gve_tx_buffer_state *info; 440 bool is_gso = skb_is_gso(skb); 441 u32 idx = tx->req & tx->mask; 442 int payload_iov = 2; 443 int copy_offset; 444 u32 next_idx; 445 int i; 446 447 info = &tx->info[idx]; 448 pkt_desc = &tx->desc[idx]; 449 450 l4_hdr_offset = skb_checksum_start_offset(skb); 451 /* If the skb is gso, then we want the tcp header in the first segment 452 * otherwise we want the linear portion of the skb (which will contain 453 * the checksum because skb->csum_start and skb->csum_offset are given 454 * relative to skb->head) in the first segment. 455 */ 456 hlen = is_gso ? l4_hdr_offset + tcp_hdrlen(skb) : 457 skb_headlen(skb); 458 459 info->skb = skb; 460 /* We don't want to split the header, so if necessary, pad to the end 461 * of the fifo and then put the header at the beginning of the fifo. 462 */ 463 pad_bytes = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo, hlen); 464 hdr_nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, hlen + pad_bytes, 465 &info->iov[0]); 466 WARN(!hdr_nfrags, "hdr_nfrags should never be 0!"); 467 payload_nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, skb->len - hlen, 468 &info->iov[payload_iov]); 469 470 gve_tx_fill_pkt_desc(pkt_desc, skb, is_gso, l4_hdr_offset, 471 1 + payload_nfrags, hlen, 472 info->iov[hdr_nfrags - 1].iov_offset); 473 474 skb_copy_bits(skb, 0, 475 tx->tx_fifo.base + info->iov[hdr_nfrags - 1].iov_offset, 476 hlen); 477 gve_dma_sync_for_device(&priv->pdev->dev, tx->tx_fifo.qpl->page_buses, 478 info->iov[hdr_nfrags - 1].iov_offset, 479 info->iov[hdr_nfrags - 1].iov_len); 480 copy_offset = hlen; 481 482 for (i = payload_iov; i < payload_nfrags + payload_iov; i++) { 483 next_idx = (tx->req + 1 + i - payload_iov) & tx->mask; 484 seg_desc = &tx->desc[next_idx]; 485 486 gve_tx_fill_seg_desc(seg_desc, skb, is_gso, 487 info->iov[i].iov_len, 488 info->iov[i].iov_offset); 489 490 skb_copy_bits(skb, copy_offset, 491 tx->tx_fifo.base + info->iov[i].iov_offset, 492 info->iov[i].iov_len); 493 gve_dma_sync_for_device(&priv->pdev->dev, tx->tx_fifo.qpl->page_buses, 494 info->iov[i].iov_offset, 495 info->iov[i].iov_len); 496 copy_offset += info->iov[i].iov_len; 497 } 498 499 return 1 + payload_nfrags; 500 } 501 502 static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx, 503 struct sk_buff *skb) 504 { 505 const struct skb_shared_info *shinfo = skb_shinfo(skb); 506 int hlen, payload_nfrags, l4_hdr_offset; 507 union gve_tx_desc *pkt_desc, *seg_desc; 508 struct gve_tx_buffer_state *info; 509 bool is_gso = skb_is_gso(skb); 510 u32 idx = tx->req & tx->mask; 511 struct gve_tx_dma_buf *buf; 512 u64 addr; 513 u32 len; 514 int i; 515 516 info = &tx->info[idx]; 517 pkt_desc = &tx->desc[idx]; 518 519 l4_hdr_offset = skb_checksum_start_offset(skb); 520 /* If the skb is gso, then we want only up to the tcp header in the first segment 521 * to efficiently replicate on each segment otherwise we want the linear portion 522 * of the skb (which will contain the checksum because skb->csum_start and 523 * skb->csum_offset are given relative to skb->head) in the first segment. 524 */ 525 hlen = is_gso ? l4_hdr_offset + tcp_hdrlen(skb) : skb_headlen(skb); 526 len = skb_headlen(skb); 527 528 info->skb = skb; 529 530 addr = dma_map_single(tx->dev, skb->data, len, DMA_TO_DEVICE); 531 if (unlikely(dma_mapping_error(tx->dev, addr))) { 532 tx->dma_mapping_error++; 533 goto drop; 534 } 535 buf = &info->buf; 536 dma_unmap_len_set(buf, len, len); 537 dma_unmap_addr_set(buf, dma, addr); 538 539 payload_nfrags = shinfo->nr_frags; 540 if (hlen < len) { 541 /* For gso the rest of the linear portion of the skb needs to 542 * be in its own descriptor. 543 */ 544 payload_nfrags++; 545 gve_tx_fill_pkt_desc(pkt_desc, skb, is_gso, l4_hdr_offset, 546 1 + payload_nfrags, hlen, addr); 547 548 len -= hlen; 549 addr += hlen; 550 idx = (tx->req + 1) & tx->mask; 551 seg_desc = &tx->desc[idx]; 552 gve_tx_fill_seg_desc(seg_desc, skb, is_gso, len, addr); 553 } else { 554 gve_tx_fill_pkt_desc(pkt_desc, skb, is_gso, l4_hdr_offset, 555 1 + payload_nfrags, hlen, addr); 556 } 557 558 for (i = 0; i < shinfo->nr_frags; i++) { 559 const skb_frag_t *frag = &shinfo->frags[i]; 560 561 idx = (idx + 1) & tx->mask; 562 seg_desc = &tx->desc[idx]; 563 len = skb_frag_size(frag); 564 addr = skb_frag_dma_map(tx->dev, frag, 0, len, DMA_TO_DEVICE); 565 if (unlikely(dma_mapping_error(tx->dev, addr))) { 566 tx->dma_mapping_error++; 567 goto unmap_drop; 568 } 569 buf = &tx->info[idx].buf; 570 tx->info[idx].skb = NULL; 571 dma_unmap_len_set(buf, len, len); 572 dma_unmap_addr_set(buf, dma, addr); 573 574 gve_tx_fill_seg_desc(seg_desc, skb, is_gso, len, addr); 575 } 576 577 return 1 + payload_nfrags; 578 579 unmap_drop: 580 i += (payload_nfrags == shinfo->nr_frags ? 1 : 2); 581 while (i--) { 582 idx--; 583 gve_tx_unmap_buf(tx->dev, &tx->info[idx & tx->mask]); 584 } 585 drop: 586 tx->dropped_pkt++; 587 return 0; 588 } 589 590 netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev) 591 { 592 struct gve_priv *priv = netdev_priv(dev); 593 struct gve_tx_ring *tx; 594 int nsegs; 595 596 WARN(skb_get_queue_mapping(skb) >= priv->tx_cfg.num_queues, 597 "skb queue index out of range"); 598 tx = &priv->tx[skb_get_queue_mapping(skb)]; 599 if (unlikely(gve_maybe_stop_tx(tx, skb))) { 600 /* We need to ring the txq doorbell -- we have stopped the Tx 601 * queue for want of resources, but prior calls to gve_tx() 602 * may have added descriptors without ringing the doorbell. 603 */ 604 605 gve_tx_put_doorbell(priv, tx->q_resources, tx->req); 606 return NETDEV_TX_BUSY; 607 } 608 if (tx->raw_addressing) 609 nsegs = gve_tx_add_skb_no_copy(priv, tx, skb); 610 else 611 nsegs = gve_tx_add_skb_copy(priv, tx, skb); 612 613 /* If the packet is getting sent, we need to update the skb */ 614 if (nsegs) { 615 netdev_tx_sent_queue(tx->netdev_txq, skb->len); 616 skb_tx_timestamp(skb); 617 tx->req += nsegs; 618 } else { 619 dev_kfree_skb_any(skb); 620 } 621 622 if (!netif_xmit_stopped(tx->netdev_txq) && netdev_xmit_more()) 623 return NETDEV_TX_OK; 624 625 /* Give packets to NIC. Even if this packet failed to send the doorbell 626 * might need to be rung because of xmit_more. 627 */ 628 gve_tx_put_doorbell(priv, tx->q_resources, tx->req); 629 return NETDEV_TX_OK; 630 } 631 632 #define GVE_TX_START_THRESH PAGE_SIZE 633 634 static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx, 635 u32 to_do, bool try_to_wake) 636 { 637 struct gve_tx_buffer_state *info; 638 u64 pkts = 0, bytes = 0; 639 size_t space_freed = 0; 640 struct sk_buff *skb; 641 int i, j; 642 u32 idx; 643 644 for (j = 0; j < to_do; j++) { 645 idx = tx->done & tx->mask; 646 netif_info(priv, tx_done, priv->dev, 647 "[%d] %s: idx=%d (req=%u done=%u)\n", 648 tx->q_num, __func__, idx, tx->req, tx->done); 649 info = &tx->info[idx]; 650 skb = info->skb; 651 652 /* Unmap the buffer */ 653 if (tx->raw_addressing) 654 gve_tx_unmap_buf(tx->dev, info); 655 tx->done++; 656 /* Mark as free */ 657 if (skb) { 658 info->skb = NULL; 659 bytes += skb->len; 660 pkts++; 661 dev_consume_skb_any(skb); 662 if (tx->raw_addressing) 663 continue; 664 /* FIFO free */ 665 for (i = 0; i < ARRAY_SIZE(info->iov); i++) { 666 space_freed += info->iov[i].iov_len + info->iov[i].iov_padding; 667 info->iov[i].iov_len = 0; 668 info->iov[i].iov_padding = 0; 669 } 670 } 671 } 672 673 if (!tx->raw_addressing) 674 gve_tx_free_fifo(&tx->tx_fifo, space_freed); 675 u64_stats_update_begin(&tx->statss); 676 tx->bytes_done += bytes; 677 tx->pkt_done += pkts; 678 u64_stats_update_end(&tx->statss); 679 netdev_tx_completed_queue(tx->netdev_txq, pkts, bytes); 680 681 /* start the queue if we've stopped it */ 682 #ifndef CONFIG_BQL 683 /* Make sure that the doorbells are synced */ 684 smp_mb(); 685 #endif 686 if (try_to_wake && netif_tx_queue_stopped(tx->netdev_txq) && 687 likely(gve_can_tx(tx, GVE_TX_START_THRESH))) { 688 tx->wake_queue++; 689 netif_tx_wake_queue(tx->netdev_txq); 690 } 691 692 return pkts; 693 } 694 695 __be32 gve_tx_load_event_counter(struct gve_priv *priv, 696 struct gve_tx_ring *tx) 697 { 698 u32 counter_index = be32_to_cpu((tx->q_resources->counter_index)); 699 700 return READ_ONCE(priv->counter_array[counter_index]); 701 } 702 703 bool gve_tx_poll(struct gve_notify_block *block, int budget) 704 { 705 struct gve_priv *priv = block->priv; 706 struct gve_tx_ring *tx = block->tx; 707 bool repoll = false; 708 u32 nic_done; 709 u32 to_do; 710 711 /* If budget is 0, do all the work */ 712 if (budget == 0) 713 budget = INT_MAX; 714 715 /* Find out how much work there is to be done */ 716 tx->last_nic_done = gve_tx_load_event_counter(priv, tx); 717 nic_done = be32_to_cpu(tx->last_nic_done); 718 if (budget > 0) { 719 /* Do as much work as we have that the budget will 720 * allow 721 */ 722 to_do = min_t(u32, (nic_done - tx->done), budget); 723 gve_clean_tx_done(priv, tx, to_do, true); 724 } 725 /* If we still have work we want to repoll */ 726 repoll |= (nic_done != tx->done); 727 return repoll; 728 } 729