1 // SPDX-License-Identifier: (GPL-2.0 OR MIT) 2 /* Google virtual Ethernet (gve) driver 3 * 4 * Copyright (C) 2015-2021 Google, Inc. 5 */ 6 7 #include "gve.h" 8 #include "gve_adminq.h" 9 #include "gve_utils.h" 10 #include "gve_dqo.h" 11 #include <net/ip.h> 12 #include <linux/tcp.h> 13 #include <linux/slab.h> 14 #include <linux/skbuff.h> 15 16 /* Returns true if tx_bufs are available. */ 17 static bool gve_has_free_tx_qpl_bufs(struct gve_tx_ring *tx, int count) 18 { 19 int num_avail; 20 21 if (!tx->dqo.qpl) 22 return true; 23 24 num_avail = tx->dqo.num_tx_qpl_bufs - 25 (tx->dqo_tx.alloc_tx_qpl_buf_cnt - 26 tx->dqo_tx.free_tx_qpl_buf_cnt); 27 28 if (count <= num_avail) 29 return true; 30 31 /* Update cached value from dqo_compl. */ 32 tx->dqo_tx.free_tx_qpl_buf_cnt = 33 atomic_read_acquire(&tx->dqo_compl.free_tx_qpl_buf_cnt); 34 35 num_avail = tx->dqo.num_tx_qpl_bufs - 36 (tx->dqo_tx.alloc_tx_qpl_buf_cnt - 37 tx->dqo_tx.free_tx_qpl_buf_cnt); 38 39 return count <= num_avail; 40 } 41 42 static s16 43 gve_alloc_tx_qpl_buf(struct gve_tx_ring *tx) 44 { 45 s16 index; 46 47 index = tx->dqo_tx.free_tx_qpl_buf_head; 48 49 /* No TX buffers available, try to steal the list from the 50 * completion handler. 51 */ 52 if (unlikely(index == -1)) { 53 tx->dqo_tx.free_tx_qpl_buf_head = 54 atomic_xchg(&tx->dqo_compl.free_tx_qpl_buf_head, -1); 55 index = tx->dqo_tx.free_tx_qpl_buf_head; 56 57 if (unlikely(index == -1)) 58 return index; 59 } 60 61 /* Remove TX buf from free list */ 62 tx->dqo_tx.free_tx_qpl_buf_head = tx->dqo.tx_qpl_buf_next[index]; 63 64 return index; 65 } 66 67 static void 68 gve_free_tx_qpl_bufs(struct gve_tx_ring *tx, 69 struct gve_tx_pending_packet_dqo *pkt) 70 { 71 s16 index; 72 int i; 73 74 if (!pkt->num_bufs) 75 return; 76 77 index = pkt->tx_qpl_buf_ids[0]; 78 /* Create a linked list of buffers to be added to the free list */ 79 for (i = 1; i < pkt->num_bufs; i++) { 80 tx->dqo.tx_qpl_buf_next[index] = pkt->tx_qpl_buf_ids[i]; 81 index = pkt->tx_qpl_buf_ids[i]; 82 } 83 84 while (true) { 85 s16 old_head = atomic_read_acquire(&tx->dqo_compl.free_tx_qpl_buf_head); 86 87 tx->dqo.tx_qpl_buf_next[index] = old_head; 88 if (atomic_cmpxchg(&tx->dqo_compl.free_tx_qpl_buf_head, 89 old_head, 90 pkt->tx_qpl_buf_ids[0]) == old_head) { 91 break; 92 } 93 } 94 95 atomic_add(pkt->num_bufs, &tx->dqo_compl.free_tx_qpl_buf_cnt); 96 pkt->num_bufs = 0; 97 } 98 99 /* Returns true if a gve_tx_pending_packet_dqo object is available. */ 100 static bool gve_has_pending_packet(struct gve_tx_ring *tx) 101 { 102 /* Check TX path's list. */ 103 if (tx->dqo_tx.free_pending_packets != -1) 104 return true; 105 106 /* Check completion handler's list. */ 107 if (atomic_read_acquire(&tx->dqo_compl.free_pending_packets) != -1) 108 return true; 109 110 return false; 111 } 112 113 static struct gve_tx_pending_packet_dqo * 114 gve_alloc_pending_packet(struct gve_tx_ring *tx) 115 { 116 struct gve_tx_pending_packet_dqo *pending_packet; 117 s16 index; 118 119 index = tx->dqo_tx.free_pending_packets; 120 121 /* No pending_packets available, try to steal the list from the 122 * completion handler. 123 */ 124 if (unlikely(index == -1)) { 125 tx->dqo_tx.free_pending_packets = 126 atomic_xchg(&tx->dqo_compl.free_pending_packets, -1); 127 index = tx->dqo_tx.free_pending_packets; 128 129 if (unlikely(index == -1)) 130 return NULL; 131 } 132 133 pending_packet = &tx->dqo.pending_packets[index]; 134 135 /* Remove pending_packet from free list */ 136 tx->dqo_tx.free_pending_packets = pending_packet->next; 137 pending_packet->state = GVE_PACKET_STATE_PENDING_DATA_COMPL; 138 139 return pending_packet; 140 } 141 142 static void 143 gve_free_pending_packet(struct gve_tx_ring *tx, 144 struct gve_tx_pending_packet_dqo *pending_packet) 145 { 146 s16 index = pending_packet - tx->dqo.pending_packets; 147 148 pending_packet->state = GVE_PACKET_STATE_UNALLOCATED; 149 while (true) { 150 s16 old_head = atomic_read_acquire(&tx->dqo_compl.free_pending_packets); 151 152 pending_packet->next = old_head; 153 if (atomic_cmpxchg(&tx->dqo_compl.free_pending_packets, 154 old_head, index) == old_head) { 155 break; 156 } 157 } 158 } 159 160 /* gve_tx_free_desc - Cleans up all pending tx requests and buffers. 161 */ 162 static void gve_tx_clean_pending_packets(struct gve_tx_ring *tx) 163 { 164 int i; 165 166 for (i = 0; i < tx->dqo.num_pending_packets; i++) { 167 struct gve_tx_pending_packet_dqo *cur_state = 168 &tx->dqo.pending_packets[i]; 169 int j; 170 171 for (j = 0; j < cur_state->num_bufs; j++) { 172 if (j == 0) { 173 dma_unmap_single(tx->dev, 174 dma_unmap_addr(cur_state, dma[j]), 175 dma_unmap_len(cur_state, len[j]), 176 DMA_TO_DEVICE); 177 } else { 178 dma_unmap_page(tx->dev, 179 dma_unmap_addr(cur_state, dma[j]), 180 dma_unmap_len(cur_state, len[j]), 181 DMA_TO_DEVICE); 182 } 183 } 184 if (cur_state->skb) { 185 dev_consume_skb_any(cur_state->skb); 186 cur_state->skb = NULL; 187 } 188 } 189 } 190 191 static void gve_tx_free_ring_dqo(struct gve_priv *priv, int idx) 192 { 193 struct gve_tx_ring *tx = &priv->tx[idx]; 194 struct device *hdev = &priv->pdev->dev; 195 size_t bytes; 196 197 gve_tx_remove_from_block(priv, idx); 198 199 if (tx->q_resources) { 200 dma_free_coherent(hdev, sizeof(*tx->q_resources), 201 tx->q_resources, tx->q_resources_bus); 202 tx->q_resources = NULL; 203 } 204 205 if (tx->dqo.compl_ring) { 206 bytes = sizeof(tx->dqo.compl_ring[0]) * 207 (tx->dqo.complq_mask + 1); 208 dma_free_coherent(hdev, bytes, tx->dqo.compl_ring, 209 tx->complq_bus_dqo); 210 tx->dqo.compl_ring = NULL; 211 } 212 213 if (tx->dqo.tx_ring) { 214 bytes = sizeof(tx->dqo.tx_ring[0]) * (tx->mask + 1); 215 dma_free_coherent(hdev, bytes, tx->dqo.tx_ring, tx->bus); 216 tx->dqo.tx_ring = NULL; 217 } 218 219 kvfree(tx->dqo.pending_packets); 220 tx->dqo.pending_packets = NULL; 221 222 kvfree(tx->dqo.tx_qpl_buf_next); 223 tx->dqo.tx_qpl_buf_next = NULL; 224 225 if (tx->dqo.qpl) { 226 gve_unassign_qpl(priv, tx->dqo.qpl->id); 227 tx->dqo.qpl = NULL; 228 } 229 230 netif_dbg(priv, drv, priv->dev, "freed tx queue %d\n", idx); 231 } 232 233 static int gve_tx_qpl_buf_init(struct gve_tx_ring *tx) 234 { 235 int num_tx_qpl_bufs = GVE_TX_BUFS_PER_PAGE_DQO * 236 tx->dqo.qpl->num_entries; 237 int i; 238 239 tx->dqo.tx_qpl_buf_next = kvcalloc(num_tx_qpl_bufs, 240 sizeof(tx->dqo.tx_qpl_buf_next[0]), 241 GFP_KERNEL); 242 if (!tx->dqo.tx_qpl_buf_next) 243 return -ENOMEM; 244 245 tx->dqo.num_tx_qpl_bufs = num_tx_qpl_bufs; 246 247 /* Generate free TX buf list */ 248 for (i = 0; i < num_tx_qpl_bufs - 1; i++) 249 tx->dqo.tx_qpl_buf_next[i] = i + 1; 250 tx->dqo.tx_qpl_buf_next[num_tx_qpl_bufs - 1] = -1; 251 252 atomic_set_release(&tx->dqo_compl.free_tx_qpl_buf_head, -1); 253 return 0; 254 } 255 256 static int gve_tx_alloc_ring_dqo(struct gve_priv *priv, int idx) 257 { 258 struct gve_tx_ring *tx = &priv->tx[idx]; 259 struct device *hdev = &priv->pdev->dev; 260 int num_pending_packets; 261 size_t bytes; 262 int i; 263 264 memset(tx, 0, sizeof(*tx)); 265 tx->q_num = idx; 266 tx->dev = &priv->pdev->dev; 267 tx->netdev_txq = netdev_get_tx_queue(priv->dev, idx); 268 atomic_set_release(&tx->dqo_compl.hw_tx_head, 0); 269 270 /* Queue sizes must be a power of 2 */ 271 tx->mask = priv->tx_desc_cnt - 1; 272 tx->dqo.complq_mask = priv->queue_format == GVE_DQO_RDA_FORMAT ? 273 priv->options_dqo_rda.tx_comp_ring_entries - 1 : 274 tx->mask; 275 276 /* The max number of pending packets determines the maximum number of 277 * descriptors which maybe written to the completion queue. 278 * 279 * We must set the number small enough to make sure we never overrun the 280 * completion queue. 281 */ 282 num_pending_packets = tx->dqo.complq_mask + 1; 283 284 /* Reserve space for descriptor completions, which will be reported at 285 * most every GVE_TX_MIN_RE_INTERVAL packets. 286 */ 287 num_pending_packets -= 288 (tx->dqo.complq_mask + 1) / GVE_TX_MIN_RE_INTERVAL; 289 290 /* Each packet may have at most 2 buffer completions if it receives both 291 * a miss and reinjection completion. 292 */ 293 num_pending_packets /= 2; 294 295 tx->dqo.num_pending_packets = min_t(int, num_pending_packets, S16_MAX); 296 tx->dqo.pending_packets = kvcalloc(tx->dqo.num_pending_packets, 297 sizeof(tx->dqo.pending_packets[0]), 298 GFP_KERNEL); 299 if (!tx->dqo.pending_packets) 300 goto err; 301 302 /* Set up linked list of pending packets */ 303 for (i = 0; i < tx->dqo.num_pending_packets - 1; i++) 304 tx->dqo.pending_packets[i].next = i + 1; 305 306 tx->dqo.pending_packets[tx->dqo.num_pending_packets - 1].next = -1; 307 atomic_set_release(&tx->dqo_compl.free_pending_packets, -1); 308 tx->dqo_compl.miss_completions.head = -1; 309 tx->dqo_compl.miss_completions.tail = -1; 310 tx->dqo_compl.timed_out_completions.head = -1; 311 tx->dqo_compl.timed_out_completions.tail = -1; 312 313 bytes = sizeof(tx->dqo.tx_ring[0]) * (tx->mask + 1); 314 tx->dqo.tx_ring = dma_alloc_coherent(hdev, bytes, &tx->bus, GFP_KERNEL); 315 if (!tx->dqo.tx_ring) 316 goto err; 317 318 bytes = sizeof(tx->dqo.compl_ring[0]) * (tx->dqo.complq_mask + 1); 319 tx->dqo.compl_ring = dma_alloc_coherent(hdev, bytes, 320 &tx->complq_bus_dqo, 321 GFP_KERNEL); 322 if (!tx->dqo.compl_ring) 323 goto err; 324 325 tx->q_resources = dma_alloc_coherent(hdev, sizeof(*tx->q_resources), 326 &tx->q_resources_bus, GFP_KERNEL); 327 if (!tx->q_resources) 328 goto err; 329 330 if (gve_is_qpl(priv)) { 331 tx->dqo.qpl = gve_assign_tx_qpl(priv, idx); 332 if (!tx->dqo.qpl) 333 goto err; 334 335 if (gve_tx_qpl_buf_init(tx)) 336 goto err; 337 } 338 339 gve_tx_add_to_block(priv, idx); 340 341 return 0; 342 343 err: 344 gve_tx_free_ring_dqo(priv, idx); 345 return -ENOMEM; 346 } 347 348 int gve_tx_alloc_rings_dqo(struct gve_priv *priv) 349 { 350 int err = 0; 351 int i; 352 353 for (i = 0; i < priv->tx_cfg.num_queues; i++) { 354 err = gve_tx_alloc_ring_dqo(priv, i); 355 if (err) { 356 netif_err(priv, drv, priv->dev, 357 "Failed to alloc tx ring=%d: err=%d\n", 358 i, err); 359 goto err; 360 } 361 } 362 363 return 0; 364 365 err: 366 for (i--; i >= 0; i--) 367 gve_tx_free_ring_dqo(priv, i); 368 369 return err; 370 } 371 372 void gve_tx_free_rings_dqo(struct gve_priv *priv) 373 { 374 int i; 375 376 for (i = 0; i < priv->tx_cfg.num_queues; i++) { 377 struct gve_tx_ring *tx = &priv->tx[i]; 378 379 gve_clean_tx_done_dqo(priv, tx, /*napi=*/NULL); 380 netdev_tx_reset_queue(tx->netdev_txq); 381 gve_tx_clean_pending_packets(tx); 382 383 gve_tx_free_ring_dqo(priv, i); 384 } 385 } 386 387 /* Returns the number of slots available in the ring */ 388 static u32 num_avail_tx_slots(const struct gve_tx_ring *tx) 389 { 390 u32 num_used = (tx->dqo_tx.tail - tx->dqo_tx.head) & tx->mask; 391 392 return tx->mask - num_used; 393 } 394 395 static bool gve_has_avail_slots_tx_dqo(struct gve_tx_ring *tx, 396 int desc_count, int buf_count) 397 { 398 return gve_has_pending_packet(tx) && 399 num_avail_tx_slots(tx) >= desc_count && 400 gve_has_free_tx_qpl_bufs(tx, buf_count); 401 } 402 403 /* Stops the queue if available descriptors is less than 'count'. 404 * Return: 0 if stop is not required. 405 */ 406 static int gve_maybe_stop_tx_dqo(struct gve_tx_ring *tx, 407 int desc_count, int buf_count) 408 { 409 if (likely(gve_has_avail_slots_tx_dqo(tx, desc_count, buf_count))) 410 return 0; 411 412 /* Update cached TX head pointer */ 413 tx->dqo_tx.head = atomic_read_acquire(&tx->dqo_compl.hw_tx_head); 414 415 if (likely(gve_has_avail_slots_tx_dqo(tx, desc_count, buf_count))) 416 return 0; 417 418 /* No space, so stop the queue */ 419 tx->stop_queue++; 420 netif_tx_stop_queue(tx->netdev_txq); 421 422 /* Sync with restarting queue in `gve_tx_poll_dqo()` */ 423 mb(); 424 425 /* After stopping queue, check if we can transmit again in order to 426 * avoid TOCTOU bug. 427 */ 428 tx->dqo_tx.head = atomic_read_acquire(&tx->dqo_compl.hw_tx_head); 429 430 if (likely(!gve_has_avail_slots_tx_dqo(tx, desc_count, buf_count))) 431 return -EBUSY; 432 433 netif_tx_start_queue(tx->netdev_txq); 434 tx->wake_queue++; 435 return 0; 436 } 437 438 static void gve_extract_tx_metadata_dqo(const struct sk_buff *skb, 439 struct gve_tx_metadata_dqo *metadata) 440 { 441 memset(metadata, 0, sizeof(*metadata)); 442 metadata->version = GVE_TX_METADATA_VERSION_DQO; 443 444 if (skb->l4_hash) { 445 u16 path_hash = skb->hash ^ (skb->hash >> 16); 446 447 path_hash &= (1 << 15) - 1; 448 if (unlikely(path_hash == 0)) 449 path_hash = ~path_hash; 450 451 metadata->path_hash = path_hash; 452 } 453 } 454 455 static void gve_tx_fill_pkt_desc_dqo(struct gve_tx_ring *tx, u32 *desc_idx, 456 struct sk_buff *skb, u32 len, u64 addr, 457 s16 compl_tag, bool eop, bool is_gso) 458 { 459 const bool checksum_offload_en = skb->ip_summed == CHECKSUM_PARTIAL; 460 461 while (len > 0) { 462 struct gve_tx_pkt_desc_dqo *desc = 463 &tx->dqo.tx_ring[*desc_idx].pkt; 464 u32 cur_len = min_t(u32, len, GVE_TX_MAX_BUF_SIZE_DQO); 465 bool cur_eop = eop && cur_len == len; 466 467 *desc = (struct gve_tx_pkt_desc_dqo){ 468 .buf_addr = cpu_to_le64(addr), 469 .dtype = GVE_TX_PKT_DESC_DTYPE_DQO, 470 .end_of_packet = cur_eop, 471 .checksum_offload_enable = checksum_offload_en, 472 .compl_tag = cpu_to_le16(compl_tag), 473 .buf_size = cur_len, 474 }; 475 476 addr += cur_len; 477 len -= cur_len; 478 *desc_idx = (*desc_idx + 1) & tx->mask; 479 } 480 } 481 482 /* Validates and prepares `skb` for TSO. 483 * 484 * Returns header length, or < 0 if invalid. 485 */ 486 static int gve_prep_tso(struct sk_buff *skb) 487 { 488 struct tcphdr *tcp; 489 int header_len; 490 u32 paylen; 491 int err; 492 493 /* Note: HW requires MSS (gso_size) to be <= 9728 and the total length 494 * of the TSO to be <= 262143. 495 * 496 * However, we don't validate these because: 497 * - Hypervisor enforces a limit of 9K MTU 498 * - Kernel will not produce a TSO larger than 64k 499 */ 500 501 if (unlikely(skb_shinfo(skb)->gso_size < GVE_TX_MIN_TSO_MSS_DQO)) 502 return -1; 503 504 if (!(skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) 505 return -EINVAL; 506 507 /* Needed because we will modify header. */ 508 err = skb_cow_head(skb, 0); 509 if (err < 0) 510 return err; 511 512 tcp = tcp_hdr(skb); 513 paylen = skb->len - skb_transport_offset(skb); 514 csum_replace_by_diff(&tcp->check, (__force __wsum)htonl(paylen)); 515 header_len = skb_tcp_all_headers(skb); 516 517 if (unlikely(header_len > GVE_TX_MAX_HDR_SIZE_DQO)) 518 return -EINVAL; 519 520 return header_len; 521 } 522 523 static void gve_tx_fill_tso_ctx_desc(struct gve_tx_tso_context_desc_dqo *desc, 524 const struct sk_buff *skb, 525 const struct gve_tx_metadata_dqo *metadata, 526 int header_len) 527 { 528 *desc = (struct gve_tx_tso_context_desc_dqo){ 529 .header_len = header_len, 530 .cmd_dtype = { 531 .dtype = GVE_TX_TSO_CTX_DESC_DTYPE_DQO, 532 .tso = 1, 533 }, 534 .flex0 = metadata->bytes[0], 535 .flex5 = metadata->bytes[5], 536 .flex6 = metadata->bytes[6], 537 .flex7 = metadata->bytes[7], 538 .flex8 = metadata->bytes[8], 539 .flex9 = metadata->bytes[9], 540 .flex10 = metadata->bytes[10], 541 .flex11 = metadata->bytes[11], 542 }; 543 desc->tso_total_len = skb->len - header_len; 544 desc->mss = skb_shinfo(skb)->gso_size; 545 } 546 547 static void 548 gve_tx_fill_general_ctx_desc(struct gve_tx_general_context_desc_dqo *desc, 549 const struct gve_tx_metadata_dqo *metadata) 550 { 551 *desc = (struct gve_tx_general_context_desc_dqo){ 552 .flex0 = metadata->bytes[0], 553 .flex1 = metadata->bytes[1], 554 .flex2 = metadata->bytes[2], 555 .flex3 = metadata->bytes[3], 556 .flex4 = metadata->bytes[4], 557 .flex5 = metadata->bytes[5], 558 .flex6 = metadata->bytes[6], 559 .flex7 = metadata->bytes[7], 560 .flex8 = metadata->bytes[8], 561 .flex9 = metadata->bytes[9], 562 .flex10 = metadata->bytes[10], 563 .flex11 = metadata->bytes[11], 564 .cmd_dtype = {.dtype = GVE_TX_GENERAL_CTX_DESC_DTYPE_DQO}, 565 }; 566 } 567 568 static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx, 569 struct sk_buff *skb, 570 struct gve_tx_pending_packet_dqo *pkt, 571 s16 completion_tag, 572 u32 *desc_idx, 573 bool is_gso) 574 { 575 const struct skb_shared_info *shinfo = skb_shinfo(skb); 576 int i; 577 578 /* Note: HW requires that the size of a non-TSO packet be within the 579 * range of [17, 9728]. 580 * 581 * We don't double check because 582 * - We limited `netdev->min_mtu` to ETH_MIN_MTU. 583 * - Hypervisor won't allow MTU larger than 9216. 584 */ 585 586 pkt->num_bufs = 0; 587 /* Map the linear portion of skb */ 588 { 589 u32 len = skb_headlen(skb); 590 dma_addr_t addr; 591 592 addr = dma_map_single(tx->dev, skb->data, len, DMA_TO_DEVICE); 593 if (unlikely(dma_mapping_error(tx->dev, addr))) 594 goto err; 595 596 dma_unmap_len_set(pkt, len[pkt->num_bufs], len); 597 dma_unmap_addr_set(pkt, dma[pkt->num_bufs], addr); 598 ++pkt->num_bufs; 599 600 gve_tx_fill_pkt_desc_dqo(tx, desc_idx, skb, len, addr, 601 completion_tag, 602 /*eop=*/shinfo->nr_frags == 0, is_gso); 603 } 604 605 for (i = 0; i < shinfo->nr_frags; i++) { 606 const skb_frag_t *frag = &shinfo->frags[i]; 607 bool is_eop = i == (shinfo->nr_frags - 1); 608 u32 len = skb_frag_size(frag); 609 dma_addr_t addr; 610 611 addr = skb_frag_dma_map(tx->dev, frag, 0, len, DMA_TO_DEVICE); 612 if (unlikely(dma_mapping_error(tx->dev, addr))) 613 goto err; 614 615 dma_unmap_len_set(pkt, len[pkt->num_bufs], len); 616 dma_unmap_addr_set(pkt, dma[pkt->num_bufs], addr); 617 ++pkt->num_bufs; 618 619 gve_tx_fill_pkt_desc_dqo(tx, desc_idx, skb, len, addr, 620 completion_tag, is_eop, is_gso); 621 } 622 623 return 0; 624 err: 625 for (i = 0; i < pkt->num_bufs; i++) { 626 if (i == 0) { 627 dma_unmap_single(tx->dev, 628 dma_unmap_addr(pkt, dma[i]), 629 dma_unmap_len(pkt, len[i]), 630 DMA_TO_DEVICE); 631 } else { 632 dma_unmap_page(tx->dev, 633 dma_unmap_addr(pkt, dma[i]), 634 dma_unmap_len(pkt, len[i]), 635 DMA_TO_DEVICE); 636 } 637 } 638 pkt->num_bufs = 0; 639 return -1; 640 } 641 642 /* Tx buffer i corresponds to 643 * qpl_page_id = i / GVE_TX_BUFS_PER_PAGE_DQO 644 * qpl_page_offset = (i % GVE_TX_BUFS_PER_PAGE_DQO) * GVE_TX_BUF_SIZE_DQO 645 */ 646 static void gve_tx_buf_get_addr(struct gve_tx_ring *tx, 647 s16 index, 648 void **va, dma_addr_t *dma_addr) 649 { 650 int page_id = index >> (PAGE_SHIFT - GVE_TX_BUF_SHIFT_DQO); 651 int offset = (index & (GVE_TX_BUFS_PER_PAGE_DQO - 1)) << GVE_TX_BUF_SHIFT_DQO; 652 653 *va = page_address(tx->dqo.qpl->pages[page_id]) + offset; 654 *dma_addr = tx->dqo.qpl->page_buses[page_id] + offset; 655 } 656 657 static int gve_tx_add_skb_copy_dqo(struct gve_tx_ring *tx, 658 struct sk_buff *skb, 659 struct gve_tx_pending_packet_dqo *pkt, 660 s16 completion_tag, 661 u32 *desc_idx, 662 bool is_gso) 663 { 664 u32 copy_offset = 0; 665 dma_addr_t dma_addr; 666 u32 copy_len; 667 s16 index; 668 void *va; 669 670 /* Break the packet into buffer size chunks */ 671 pkt->num_bufs = 0; 672 while (copy_offset < skb->len) { 673 index = gve_alloc_tx_qpl_buf(tx); 674 if (unlikely(index == -1)) 675 goto err; 676 677 gve_tx_buf_get_addr(tx, index, &va, &dma_addr); 678 copy_len = min_t(u32, GVE_TX_BUF_SIZE_DQO, 679 skb->len - copy_offset); 680 skb_copy_bits(skb, copy_offset, va, copy_len); 681 682 copy_offset += copy_len; 683 dma_sync_single_for_device(tx->dev, dma_addr, 684 copy_len, DMA_TO_DEVICE); 685 gve_tx_fill_pkt_desc_dqo(tx, desc_idx, skb, 686 copy_len, 687 dma_addr, 688 completion_tag, 689 copy_offset == skb->len, 690 is_gso); 691 692 pkt->tx_qpl_buf_ids[pkt->num_bufs] = index; 693 ++tx->dqo_tx.alloc_tx_qpl_buf_cnt; 694 ++pkt->num_bufs; 695 } 696 697 return 0; 698 err: 699 /* Should not be here if gve_has_free_tx_qpl_bufs() check is correct */ 700 gve_free_tx_qpl_bufs(tx, pkt); 701 return -ENOMEM; 702 } 703 704 /* Returns 0 on success, or < 0 on error. 705 * 706 * Before this function is called, the caller must ensure 707 * gve_has_pending_packet(tx) returns true. 708 */ 709 static int gve_tx_add_skb_dqo(struct gve_tx_ring *tx, 710 struct sk_buff *skb) 711 { 712 const bool is_gso = skb_is_gso(skb); 713 u32 desc_idx = tx->dqo_tx.tail; 714 struct gve_tx_pending_packet_dqo *pkt; 715 struct gve_tx_metadata_dqo metadata; 716 s16 completion_tag; 717 718 pkt = gve_alloc_pending_packet(tx); 719 pkt->skb = skb; 720 completion_tag = pkt - tx->dqo.pending_packets; 721 722 gve_extract_tx_metadata_dqo(skb, &metadata); 723 if (is_gso) { 724 int header_len = gve_prep_tso(skb); 725 726 if (unlikely(header_len < 0)) 727 goto err; 728 729 gve_tx_fill_tso_ctx_desc(&tx->dqo.tx_ring[desc_idx].tso_ctx, 730 skb, &metadata, header_len); 731 desc_idx = (desc_idx + 1) & tx->mask; 732 } 733 734 gve_tx_fill_general_ctx_desc(&tx->dqo.tx_ring[desc_idx].general_ctx, 735 &metadata); 736 desc_idx = (desc_idx + 1) & tx->mask; 737 738 if (tx->dqo.qpl) { 739 if (gve_tx_add_skb_copy_dqo(tx, skb, pkt, 740 completion_tag, 741 &desc_idx, is_gso)) 742 goto err; 743 } else { 744 if (gve_tx_add_skb_no_copy_dqo(tx, skb, pkt, 745 completion_tag, 746 &desc_idx, is_gso)) 747 goto err; 748 } 749 750 tx->dqo_tx.posted_packet_desc_cnt += pkt->num_bufs; 751 752 /* Commit the changes to our state */ 753 tx->dqo_tx.tail = desc_idx; 754 755 /* Request a descriptor completion on the last descriptor of the 756 * packet if we are allowed to by the HW enforced interval. 757 */ 758 { 759 u32 last_desc_idx = (desc_idx - 1) & tx->mask; 760 u32 last_report_event_interval = 761 (last_desc_idx - tx->dqo_tx.last_re_idx) & tx->mask; 762 763 if (unlikely(last_report_event_interval >= 764 GVE_TX_MIN_RE_INTERVAL)) { 765 tx->dqo.tx_ring[last_desc_idx].pkt.report_event = true; 766 tx->dqo_tx.last_re_idx = last_desc_idx; 767 } 768 } 769 770 return 0; 771 772 err: 773 pkt->skb = NULL; 774 gve_free_pending_packet(tx, pkt); 775 776 return -1; 777 } 778 779 static int gve_num_descs_per_buf(size_t size) 780 { 781 return DIV_ROUND_UP(size, GVE_TX_MAX_BUF_SIZE_DQO); 782 } 783 784 static int gve_num_buffer_descs_needed(const struct sk_buff *skb) 785 { 786 const struct skb_shared_info *shinfo = skb_shinfo(skb); 787 int num_descs; 788 int i; 789 790 num_descs = gve_num_descs_per_buf(skb_headlen(skb)); 791 792 for (i = 0; i < shinfo->nr_frags; i++) { 793 unsigned int frag_size = skb_frag_size(&shinfo->frags[i]); 794 795 num_descs += gve_num_descs_per_buf(frag_size); 796 } 797 798 return num_descs; 799 } 800 801 /* Returns true if HW is capable of sending TSO represented by `skb`. 802 * 803 * Each segment must not span more than GVE_TX_MAX_DATA_DESCS buffers. 804 * - The header is counted as one buffer for every single segment. 805 * - A buffer which is split between two segments is counted for both. 806 * - If a buffer contains both header and payload, it is counted as two buffers. 807 */ 808 static bool gve_can_send_tso(const struct sk_buff *skb) 809 { 810 const int max_bufs_per_seg = GVE_TX_MAX_DATA_DESCS - 1; 811 const struct skb_shared_info *shinfo = skb_shinfo(skb); 812 const int header_len = skb_tcp_all_headers(skb); 813 const int gso_size = shinfo->gso_size; 814 int cur_seg_num_bufs; 815 int prev_frag_size; 816 int cur_seg_size; 817 int i; 818 819 cur_seg_size = skb_headlen(skb) - header_len; 820 prev_frag_size = skb_headlen(skb); 821 cur_seg_num_bufs = cur_seg_size > 0; 822 823 for (i = 0; i < shinfo->nr_frags; i++) { 824 if (cur_seg_size >= gso_size) { 825 cur_seg_size %= gso_size; 826 cur_seg_num_bufs = cur_seg_size > 0; 827 828 if (prev_frag_size > GVE_TX_MAX_BUF_SIZE_DQO) { 829 int prev_frag_remain = prev_frag_size % 830 GVE_TX_MAX_BUF_SIZE_DQO; 831 832 /* If the last descriptor of the previous frag 833 * is less than cur_seg_size, the segment will 834 * span two descriptors in the previous frag. 835 * Since max gso size (9728) is less than 836 * GVE_TX_MAX_BUF_SIZE_DQO, it is impossible 837 * for the segment to span more than two 838 * descriptors. 839 */ 840 if (prev_frag_remain && 841 cur_seg_size > prev_frag_remain) 842 cur_seg_num_bufs++; 843 } 844 } 845 846 if (unlikely(++cur_seg_num_bufs > max_bufs_per_seg)) 847 return false; 848 849 prev_frag_size = skb_frag_size(&shinfo->frags[i]); 850 cur_seg_size += prev_frag_size; 851 } 852 853 return true; 854 } 855 856 /* Attempt to transmit specified SKB. 857 * 858 * Returns 0 if the SKB was transmitted or dropped. 859 * Returns -1 if there is not currently enough space to transmit the SKB. 860 */ 861 static int gve_try_tx_skb(struct gve_priv *priv, struct gve_tx_ring *tx, 862 struct sk_buff *skb) 863 { 864 int num_buffer_descs; 865 int total_num_descs; 866 867 if (tx->dqo.qpl) { 868 if (skb_is_gso(skb)) 869 if (unlikely(ipv6_hopopt_jumbo_remove(skb))) 870 goto drop; 871 872 /* We do not need to verify the number of buffers used per 873 * packet or per segment in case of TSO as with 2K size buffers 874 * none of the TX packet rules would be violated. 875 * 876 * gve_can_send_tso() checks that each TCP segment of gso_size is 877 * not distributed over more than 9 SKB frags.. 878 */ 879 num_buffer_descs = DIV_ROUND_UP(skb->len, GVE_TX_BUF_SIZE_DQO); 880 } else { 881 if (skb_is_gso(skb)) { 882 /* If TSO doesn't meet HW requirements, attempt to linearize the 883 * packet. 884 */ 885 if (unlikely(!gve_can_send_tso(skb) && 886 skb_linearize(skb) < 0)) { 887 net_err_ratelimited("%s: Failed to transmit TSO packet\n", 888 priv->dev->name); 889 goto drop; 890 } 891 892 if (unlikely(ipv6_hopopt_jumbo_remove(skb))) 893 goto drop; 894 895 num_buffer_descs = gve_num_buffer_descs_needed(skb); 896 } else { 897 num_buffer_descs = gve_num_buffer_descs_needed(skb); 898 899 if (unlikely(num_buffer_descs > GVE_TX_MAX_DATA_DESCS)) { 900 if (unlikely(skb_linearize(skb) < 0)) 901 goto drop; 902 903 num_buffer_descs = 1; 904 } 905 } 906 } 907 908 /* Metadata + (optional TSO) + data descriptors. */ 909 total_num_descs = 1 + skb_is_gso(skb) + num_buffer_descs; 910 if (unlikely(gve_maybe_stop_tx_dqo(tx, total_num_descs + 911 GVE_TX_MIN_DESC_PREVENT_CACHE_OVERLAP, 912 num_buffer_descs))) { 913 return -1; 914 } 915 916 if (unlikely(gve_tx_add_skb_dqo(tx, skb) < 0)) 917 goto drop; 918 919 netdev_tx_sent_queue(tx->netdev_txq, skb->len); 920 skb_tx_timestamp(skb); 921 return 0; 922 923 drop: 924 tx->dropped_pkt++; 925 dev_kfree_skb_any(skb); 926 return 0; 927 } 928 929 /* Transmit a given skb and ring the doorbell. */ 930 netdev_tx_t gve_tx_dqo(struct sk_buff *skb, struct net_device *dev) 931 { 932 struct gve_priv *priv = netdev_priv(dev); 933 struct gve_tx_ring *tx; 934 935 tx = &priv->tx[skb_get_queue_mapping(skb)]; 936 if (unlikely(gve_try_tx_skb(priv, tx, skb) < 0)) { 937 /* We need to ring the txq doorbell -- we have stopped the Tx 938 * queue for want of resources, but prior calls to gve_tx() 939 * may have added descriptors without ringing the doorbell. 940 */ 941 gve_tx_put_doorbell_dqo(priv, tx->q_resources, tx->dqo_tx.tail); 942 return NETDEV_TX_BUSY; 943 } 944 945 if (!netif_xmit_stopped(tx->netdev_txq) && netdev_xmit_more()) 946 return NETDEV_TX_OK; 947 948 gve_tx_put_doorbell_dqo(priv, tx->q_resources, tx->dqo_tx.tail); 949 return NETDEV_TX_OK; 950 } 951 952 static void add_to_list(struct gve_tx_ring *tx, struct gve_index_list *list, 953 struct gve_tx_pending_packet_dqo *pending_packet) 954 { 955 s16 old_tail, index; 956 957 index = pending_packet - tx->dqo.pending_packets; 958 old_tail = list->tail; 959 list->tail = index; 960 if (old_tail == -1) 961 list->head = index; 962 else 963 tx->dqo.pending_packets[old_tail].next = index; 964 965 pending_packet->next = -1; 966 pending_packet->prev = old_tail; 967 } 968 969 static void remove_from_list(struct gve_tx_ring *tx, 970 struct gve_index_list *list, 971 struct gve_tx_pending_packet_dqo *pkt) 972 { 973 s16 prev_index, next_index; 974 975 prev_index = pkt->prev; 976 next_index = pkt->next; 977 978 if (prev_index == -1) { 979 /* Node is head */ 980 list->head = next_index; 981 } else { 982 tx->dqo.pending_packets[prev_index].next = next_index; 983 } 984 if (next_index == -1) { 985 /* Node is tail */ 986 list->tail = prev_index; 987 } else { 988 tx->dqo.pending_packets[next_index].prev = prev_index; 989 } 990 } 991 992 static void gve_unmap_packet(struct device *dev, 993 struct gve_tx_pending_packet_dqo *pkt) 994 { 995 int i; 996 997 /* SKB linear portion is guaranteed to be mapped */ 998 dma_unmap_single(dev, dma_unmap_addr(pkt, dma[0]), 999 dma_unmap_len(pkt, len[0]), DMA_TO_DEVICE); 1000 for (i = 1; i < pkt->num_bufs; i++) { 1001 dma_unmap_page(dev, dma_unmap_addr(pkt, dma[i]), 1002 dma_unmap_len(pkt, len[i]), DMA_TO_DEVICE); 1003 } 1004 pkt->num_bufs = 0; 1005 } 1006 1007 /* Completion types and expected behavior: 1008 * No Miss compl + Packet compl = Packet completed normally. 1009 * Miss compl + Re-inject compl = Packet completed normally. 1010 * No Miss compl + Re-inject compl = Skipped i.e. packet not completed. 1011 * Miss compl + Packet compl = Skipped i.e. packet not completed. 1012 */ 1013 static void gve_handle_packet_completion(struct gve_priv *priv, 1014 struct gve_tx_ring *tx, bool is_napi, 1015 u16 compl_tag, u64 *bytes, u64 *pkts, 1016 bool is_reinjection) 1017 { 1018 struct gve_tx_pending_packet_dqo *pending_packet; 1019 1020 if (unlikely(compl_tag >= tx->dqo.num_pending_packets)) { 1021 net_err_ratelimited("%s: Invalid TX completion tag: %d\n", 1022 priv->dev->name, (int)compl_tag); 1023 return; 1024 } 1025 1026 pending_packet = &tx->dqo.pending_packets[compl_tag]; 1027 1028 if (unlikely(is_reinjection)) { 1029 if (unlikely(pending_packet->state == 1030 GVE_PACKET_STATE_TIMED_OUT_COMPL)) { 1031 net_err_ratelimited("%s: Re-injection completion: %d received after timeout.\n", 1032 priv->dev->name, (int)compl_tag); 1033 /* Packet was already completed as a result of timeout, 1034 * so just remove from list and free pending packet. 1035 */ 1036 remove_from_list(tx, 1037 &tx->dqo_compl.timed_out_completions, 1038 pending_packet); 1039 gve_free_pending_packet(tx, pending_packet); 1040 return; 1041 } 1042 if (unlikely(pending_packet->state != 1043 GVE_PACKET_STATE_PENDING_REINJECT_COMPL)) { 1044 /* No outstanding miss completion but packet allocated 1045 * implies packet receives a re-injection completion 1046 * without a prior miss completion. Return without 1047 * completing the packet. 1048 */ 1049 net_err_ratelimited("%s: Re-injection completion received without corresponding miss completion: %d\n", 1050 priv->dev->name, (int)compl_tag); 1051 return; 1052 } 1053 remove_from_list(tx, &tx->dqo_compl.miss_completions, 1054 pending_packet); 1055 } else { 1056 /* Packet is allocated but not a pending data completion. */ 1057 if (unlikely(pending_packet->state != 1058 GVE_PACKET_STATE_PENDING_DATA_COMPL)) { 1059 net_err_ratelimited("%s: No pending data completion: %d\n", 1060 priv->dev->name, (int)compl_tag); 1061 return; 1062 } 1063 } 1064 tx->dqo_tx.completed_packet_desc_cnt += pending_packet->num_bufs; 1065 if (tx->dqo.qpl) 1066 gve_free_tx_qpl_bufs(tx, pending_packet); 1067 else 1068 gve_unmap_packet(tx->dev, pending_packet); 1069 1070 *bytes += pending_packet->skb->len; 1071 (*pkts)++; 1072 napi_consume_skb(pending_packet->skb, is_napi); 1073 pending_packet->skb = NULL; 1074 gve_free_pending_packet(tx, pending_packet); 1075 } 1076 1077 static void gve_handle_miss_completion(struct gve_priv *priv, 1078 struct gve_tx_ring *tx, u16 compl_tag, 1079 u64 *bytes, u64 *pkts) 1080 { 1081 struct gve_tx_pending_packet_dqo *pending_packet; 1082 1083 if (unlikely(compl_tag >= tx->dqo.num_pending_packets)) { 1084 net_err_ratelimited("%s: Invalid TX completion tag: %d\n", 1085 priv->dev->name, (int)compl_tag); 1086 return; 1087 } 1088 1089 pending_packet = &tx->dqo.pending_packets[compl_tag]; 1090 if (unlikely(pending_packet->state != 1091 GVE_PACKET_STATE_PENDING_DATA_COMPL)) { 1092 net_err_ratelimited("%s: Unexpected packet state: %d for completion tag : %d\n", 1093 priv->dev->name, (int)pending_packet->state, 1094 (int)compl_tag); 1095 return; 1096 } 1097 1098 pending_packet->state = GVE_PACKET_STATE_PENDING_REINJECT_COMPL; 1099 /* jiffies can wraparound but time comparisons can handle overflows. */ 1100 pending_packet->timeout_jiffies = 1101 jiffies + 1102 msecs_to_jiffies(GVE_REINJECT_COMPL_TIMEOUT * 1103 MSEC_PER_SEC); 1104 add_to_list(tx, &tx->dqo_compl.miss_completions, pending_packet); 1105 1106 *bytes += pending_packet->skb->len; 1107 (*pkts)++; 1108 } 1109 1110 static void remove_miss_completions(struct gve_priv *priv, 1111 struct gve_tx_ring *tx) 1112 { 1113 struct gve_tx_pending_packet_dqo *pending_packet; 1114 s16 next_index; 1115 1116 next_index = tx->dqo_compl.miss_completions.head; 1117 while (next_index != -1) { 1118 pending_packet = &tx->dqo.pending_packets[next_index]; 1119 next_index = pending_packet->next; 1120 /* Break early because packets should timeout in order. */ 1121 if (time_is_after_jiffies(pending_packet->timeout_jiffies)) 1122 break; 1123 1124 remove_from_list(tx, &tx->dqo_compl.miss_completions, 1125 pending_packet); 1126 /* Unmap/free TX buffers and free skb but do not unallocate packet i.e. 1127 * the completion tag is not freed to ensure that the driver 1128 * can take appropriate action if a corresponding valid 1129 * completion is received later. 1130 */ 1131 if (tx->dqo.qpl) 1132 gve_free_tx_qpl_bufs(tx, pending_packet); 1133 else 1134 gve_unmap_packet(tx->dev, pending_packet); 1135 1136 /* This indicates the packet was dropped. */ 1137 dev_kfree_skb_any(pending_packet->skb); 1138 pending_packet->skb = NULL; 1139 tx->dropped_pkt++; 1140 net_err_ratelimited("%s: No reinjection completion was received for: %d.\n", 1141 priv->dev->name, 1142 (int)(pending_packet - tx->dqo.pending_packets)); 1143 1144 pending_packet->state = GVE_PACKET_STATE_TIMED_OUT_COMPL; 1145 pending_packet->timeout_jiffies = 1146 jiffies + 1147 msecs_to_jiffies(GVE_DEALLOCATE_COMPL_TIMEOUT * 1148 MSEC_PER_SEC); 1149 /* Maintain pending packet in another list so the packet can be 1150 * unallocated at a later time. 1151 */ 1152 add_to_list(tx, &tx->dqo_compl.timed_out_completions, 1153 pending_packet); 1154 } 1155 } 1156 1157 static void remove_timed_out_completions(struct gve_priv *priv, 1158 struct gve_tx_ring *tx) 1159 { 1160 struct gve_tx_pending_packet_dqo *pending_packet; 1161 s16 next_index; 1162 1163 next_index = tx->dqo_compl.timed_out_completions.head; 1164 while (next_index != -1) { 1165 pending_packet = &tx->dqo.pending_packets[next_index]; 1166 next_index = pending_packet->next; 1167 /* Break early because packets should timeout in order. */ 1168 if (time_is_after_jiffies(pending_packet->timeout_jiffies)) 1169 break; 1170 1171 remove_from_list(tx, &tx->dqo_compl.timed_out_completions, 1172 pending_packet); 1173 gve_free_pending_packet(tx, pending_packet); 1174 } 1175 } 1176 1177 int gve_clean_tx_done_dqo(struct gve_priv *priv, struct gve_tx_ring *tx, 1178 struct napi_struct *napi) 1179 { 1180 u64 reinject_compl_bytes = 0; 1181 u64 reinject_compl_pkts = 0; 1182 int num_descs_cleaned = 0; 1183 u64 miss_compl_bytes = 0; 1184 u64 miss_compl_pkts = 0; 1185 u64 pkt_compl_bytes = 0; 1186 u64 pkt_compl_pkts = 0; 1187 1188 /* Limit in order to avoid blocking for too long */ 1189 while (!napi || pkt_compl_pkts < napi->weight) { 1190 struct gve_tx_compl_desc *compl_desc = 1191 &tx->dqo.compl_ring[tx->dqo_compl.head]; 1192 u16 type; 1193 1194 if (compl_desc->generation == tx->dqo_compl.cur_gen_bit) 1195 break; 1196 1197 /* Prefetch the next descriptor. */ 1198 prefetch(&tx->dqo.compl_ring[(tx->dqo_compl.head + 1) & 1199 tx->dqo.complq_mask]); 1200 1201 /* Do not read data until we own the descriptor */ 1202 dma_rmb(); 1203 type = compl_desc->type; 1204 1205 if (type == GVE_COMPL_TYPE_DQO_DESC) { 1206 /* This is the last descriptor fetched by HW plus one */ 1207 u16 tx_head = le16_to_cpu(compl_desc->tx_head); 1208 1209 atomic_set_release(&tx->dqo_compl.hw_tx_head, tx_head); 1210 } else if (type == GVE_COMPL_TYPE_DQO_PKT) { 1211 u16 compl_tag = le16_to_cpu(compl_desc->completion_tag); 1212 if (compl_tag & GVE_ALT_MISS_COMPL_BIT) { 1213 compl_tag &= ~GVE_ALT_MISS_COMPL_BIT; 1214 gve_handle_miss_completion(priv, tx, compl_tag, 1215 &miss_compl_bytes, 1216 &miss_compl_pkts); 1217 } else { 1218 gve_handle_packet_completion(priv, tx, !!napi, 1219 compl_tag, 1220 &pkt_compl_bytes, 1221 &pkt_compl_pkts, 1222 false); 1223 } 1224 } else if (type == GVE_COMPL_TYPE_DQO_MISS) { 1225 u16 compl_tag = le16_to_cpu(compl_desc->completion_tag); 1226 1227 gve_handle_miss_completion(priv, tx, compl_tag, 1228 &miss_compl_bytes, 1229 &miss_compl_pkts); 1230 } else if (type == GVE_COMPL_TYPE_DQO_REINJECTION) { 1231 u16 compl_tag = le16_to_cpu(compl_desc->completion_tag); 1232 1233 gve_handle_packet_completion(priv, tx, !!napi, 1234 compl_tag, 1235 &reinject_compl_bytes, 1236 &reinject_compl_pkts, 1237 true); 1238 } 1239 1240 tx->dqo_compl.head = 1241 (tx->dqo_compl.head + 1) & tx->dqo.complq_mask; 1242 /* Flip the generation bit when we wrap around */ 1243 tx->dqo_compl.cur_gen_bit ^= tx->dqo_compl.head == 0; 1244 num_descs_cleaned++; 1245 } 1246 1247 netdev_tx_completed_queue(tx->netdev_txq, 1248 pkt_compl_pkts + miss_compl_pkts, 1249 pkt_compl_bytes + miss_compl_bytes); 1250 1251 remove_miss_completions(priv, tx); 1252 remove_timed_out_completions(priv, tx); 1253 1254 u64_stats_update_begin(&tx->statss); 1255 tx->bytes_done += pkt_compl_bytes + reinject_compl_bytes; 1256 tx->pkt_done += pkt_compl_pkts + reinject_compl_pkts; 1257 u64_stats_update_end(&tx->statss); 1258 return num_descs_cleaned; 1259 } 1260 1261 bool gve_tx_poll_dqo(struct gve_notify_block *block, bool do_clean) 1262 { 1263 struct gve_tx_compl_desc *compl_desc; 1264 struct gve_tx_ring *tx = block->tx; 1265 struct gve_priv *priv = block->priv; 1266 1267 if (do_clean) { 1268 int num_descs_cleaned = gve_clean_tx_done_dqo(priv, tx, 1269 &block->napi); 1270 1271 /* Sync with queue being stopped in `gve_maybe_stop_tx_dqo()` */ 1272 mb(); 1273 1274 if (netif_tx_queue_stopped(tx->netdev_txq) && 1275 num_descs_cleaned > 0) { 1276 tx->wake_queue++; 1277 netif_tx_wake_queue(tx->netdev_txq); 1278 } 1279 } 1280 1281 /* Return true if we still have work. */ 1282 compl_desc = &tx->dqo.compl_ring[tx->dqo_compl.head]; 1283 return compl_desc->generation != tx->dqo_compl.cur_gen_bit; 1284 } 1285