1 /* 2 * QEMU TX packets abstractions 3 * 4 * Copyright (c) 2012 Ravello Systems LTD (http://ravellosystems.com) 5 * 6 * Developed by Daynix Computing LTD (http://www.daynix.com) 7 * 8 * Authors: 9 * Dmitry Fleytman <dmitry@daynix.com> 10 * Tamir Shomer <tamirs@daynix.com> 11 * Yan Vugenfirer <yan@daynix.com> 12 * 13 * This work is licensed under the terms of the GNU GPL, version 2 or later. 14 * See the COPYING file in the top-level directory. 15 * 16 */ 17 18 #include "qemu/osdep.h" 19 #include "net_tx_pkt.h" 20 #include "net/eth.h" 21 #include "net/checksum.h" 22 #include "net/tap.h" 23 #include "net/net.h" 24 #include "hw/pci/pci.h" 25 26 enum { 27 NET_TX_PKT_VHDR_FRAG = 0, 28 NET_TX_PKT_L2HDR_FRAG, 29 NET_TX_PKT_L3HDR_FRAG, 30 NET_TX_PKT_PL_START_FRAG 31 }; 32 33 /* TX packet private context */ 34 struct NetTxPkt { 35 PCIDevice *pci_dev; 36 37 struct virtio_net_hdr virt_hdr; 38 bool has_virt_hdr; 39 40 struct iovec *raw; 41 uint32_t raw_frags; 42 uint32_t max_raw_frags; 43 44 struct iovec *vec; 45 46 uint8_t l2_hdr[ETH_MAX_L2_HDR_LEN]; 47 uint8_t l3_hdr[ETH_MAX_IP_DGRAM_LEN]; 48 49 uint32_t payload_len; 50 51 uint32_t payload_frags; 52 uint32_t max_payload_frags; 53 54 uint16_t hdr_len; 55 eth_pkt_types_e packet_type; 56 uint8_t l4proto; 57 58 bool is_loopback; 59 }; 60 61 void net_tx_pkt_init(struct NetTxPkt **pkt, PCIDevice *pci_dev, 62 uint32_t max_frags, bool has_virt_hdr) 63 { 64 struct NetTxPkt *p = g_malloc0(sizeof *p); 65 66 p->pci_dev = pci_dev; 67 68 p->vec = g_malloc((sizeof *p->vec) * 69 (max_frags + NET_TX_PKT_PL_START_FRAG)); 70 71 p->raw = g_malloc((sizeof *p->raw) * max_frags); 72 73 p->max_payload_frags = max_frags; 74 p->max_raw_frags = max_frags; 75 p->has_virt_hdr = has_virt_hdr; 76 p->vec[NET_TX_PKT_VHDR_FRAG].iov_base = &p->virt_hdr; 77 p->vec[NET_TX_PKT_VHDR_FRAG].iov_len = 78 p->has_virt_hdr ? sizeof p->virt_hdr : 0; 79 p->vec[NET_TX_PKT_L2HDR_FRAG].iov_base = &p->l2_hdr; 80 p->vec[NET_TX_PKT_L3HDR_FRAG].iov_base = &p->l3_hdr; 81 82 *pkt = p; 83 } 84 85 void net_tx_pkt_uninit(struct NetTxPkt *pkt) 86 { 87 if (pkt) { 88 g_free(pkt->vec); 89 g_free(pkt->raw); 90 g_free(pkt); 91 } 92 } 93 94 void net_tx_pkt_update_ip_hdr_checksum(struct NetTxPkt *pkt) 95 { 96 uint16_t csum; 97 assert(pkt); 98 struct ip_header *ip_hdr; 99 ip_hdr = pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_base; 100 101 ip_hdr->ip_len = cpu_to_be16(pkt->payload_len + 102 pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len); 103 104 ip_hdr->ip_sum = 0; 105 csum = net_raw_checksum((uint8_t *)ip_hdr, 106 pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len); 107 ip_hdr->ip_sum = cpu_to_be16(csum); 108 } 109 110 void net_tx_pkt_update_ip_checksums(struct NetTxPkt *pkt) 111 { 112 uint16_t csum; 113 uint32_t cntr, cso; 114 assert(pkt); 115 uint8_t gso_type = pkt->virt_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN; 116 void *ip_hdr = pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_base; 117 118 if (pkt->payload_len + pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len > 119 ETH_MAX_IP_DGRAM_LEN) { 120 return; 121 } 122 123 if (gso_type == VIRTIO_NET_HDR_GSO_TCPV4 || 124 gso_type == VIRTIO_NET_HDR_GSO_UDP) { 125 /* Calculate IP header checksum */ 126 net_tx_pkt_update_ip_hdr_checksum(pkt); 127 128 /* Calculate IP pseudo header checksum */ 129 cntr = eth_calc_ip4_pseudo_hdr_csum(ip_hdr, pkt->payload_len, &cso); 130 csum = cpu_to_be16(~net_checksum_finish(cntr)); 131 } else if (gso_type == VIRTIO_NET_HDR_GSO_TCPV6) { 132 /* Calculate IP pseudo header checksum */ 133 cntr = eth_calc_ip6_pseudo_hdr_csum(ip_hdr, pkt->payload_len, 134 IP_PROTO_TCP, &cso); 135 csum = cpu_to_be16(~net_checksum_finish(cntr)); 136 } else { 137 return; 138 } 139 140 iov_from_buf(&pkt->vec[NET_TX_PKT_PL_START_FRAG], pkt->payload_frags, 141 pkt->virt_hdr.csum_offset, &csum, sizeof(csum)); 142 } 143 144 static void net_tx_pkt_calculate_hdr_len(struct NetTxPkt *pkt) 145 { 146 pkt->hdr_len = pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_len + 147 pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len; 148 } 149 150 static bool net_tx_pkt_parse_headers(struct NetTxPkt *pkt) 151 { 152 struct iovec *l2_hdr, *l3_hdr; 153 size_t bytes_read; 154 size_t full_ip6hdr_len; 155 uint16_t l3_proto; 156 157 assert(pkt); 158 159 l2_hdr = &pkt->vec[NET_TX_PKT_L2HDR_FRAG]; 160 l3_hdr = &pkt->vec[NET_TX_PKT_L3HDR_FRAG]; 161 162 bytes_read = iov_to_buf(pkt->raw, pkt->raw_frags, 0, l2_hdr->iov_base, 163 ETH_MAX_L2_HDR_LEN); 164 if (bytes_read < sizeof(struct eth_header)) { 165 l2_hdr->iov_len = 0; 166 return false; 167 } 168 169 l2_hdr->iov_len = sizeof(struct eth_header); 170 switch (be16_to_cpu(PKT_GET_ETH_HDR(l2_hdr->iov_base)->h_proto)) { 171 case ETH_P_VLAN: 172 l2_hdr->iov_len += sizeof(struct vlan_header); 173 break; 174 case ETH_P_DVLAN: 175 l2_hdr->iov_len += 2 * sizeof(struct vlan_header); 176 break; 177 } 178 179 if (bytes_read < l2_hdr->iov_len) { 180 l2_hdr->iov_len = 0; 181 l3_hdr->iov_len = 0; 182 pkt->packet_type = ETH_PKT_UCAST; 183 return false; 184 } else { 185 l2_hdr->iov_len = ETH_MAX_L2_HDR_LEN; 186 l2_hdr->iov_len = eth_get_l2_hdr_length(l2_hdr->iov_base); 187 pkt->packet_type = get_eth_packet_type(l2_hdr->iov_base); 188 } 189 190 l3_proto = eth_get_l3_proto(l2_hdr, 1, l2_hdr->iov_len); 191 192 switch (l3_proto) { 193 case ETH_P_IP: 194 bytes_read = iov_to_buf(pkt->raw, pkt->raw_frags, l2_hdr->iov_len, 195 l3_hdr->iov_base, sizeof(struct ip_header)); 196 197 if (bytes_read < sizeof(struct ip_header)) { 198 l3_hdr->iov_len = 0; 199 return false; 200 } 201 202 l3_hdr->iov_len = IP_HDR_GET_LEN(l3_hdr->iov_base); 203 204 if (l3_hdr->iov_len < sizeof(struct ip_header)) { 205 l3_hdr->iov_len = 0; 206 return false; 207 } 208 209 pkt->l4proto = ((struct ip_header *) l3_hdr->iov_base)->ip_p; 210 211 if (IP_HDR_GET_LEN(l3_hdr->iov_base) != sizeof(struct ip_header)) { 212 /* copy optional IPv4 header data if any*/ 213 bytes_read = iov_to_buf(pkt->raw, pkt->raw_frags, 214 l2_hdr->iov_len + sizeof(struct ip_header), 215 l3_hdr->iov_base + sizeof(struct ip_header), 216 l3_hdr->iov_len - sizeof(struct ip_header)); 217 if (bytes_read < l3_hdr->iov_len - sizeof(struct ip_header)) { 218 l3_hdr->iov_len = 0; 219 return false; 220 } 221 } 222 223 break; 224 225 case ETH_P_IPV6: 226 { 227 eth_ip6_hdr_info hdrinfo; 228 229 if (!eth_parse_ipv6_hdr(pkt->raw, pkt->raw_frags, l2_hdr->iov_len, 230 &hdrinfo)) { 231 l3_hdr->iov_len = 0; 232 return false; 233 } 234 235 pkt->l4proto = hdrinfo.l4proto; 236 full_ip6hdr_len = hdrinfo.full_hdr_len; 237 238 if (full_ip6hdr_len > ETH_MAX_IP_DGRAM_LEN) { 239 l3_hdr->iov_len = 0; 240 return false; 241 } 242 243 bytes_read = iov_to_buf(pkt->raw, pkt->raw_frags, l2_hdr->iov_len, 244 l3_hdr->iov_base, full_ip6hdr_len); 245 246 if (bytes_read < full_ip6hdr_len) { 247 l3_hdr->iov_len = 0; 248 return false; 249 } else { 250 l3_hdr->iov_len = full_ip6hdr_len; 251 } 252 break; 253 } 254 default: 255 l3_hdr->iov_len = 0; 256 break; 257 } 258 259 net_tx_pkt_calculate_hdr_len(pkt); 260 return true; 261 } 262 263 static void net_tx_pkt_rebuild_payload(struct NetTxPkt *pkt) 264 { 265 pkt->payload_len = iov_size(pkt->raw, pkt->raw_frags) - pkt->hdr_len; 266 pkt->payload_frags = iov_copy(&pkt->vec[NET_TX_PKT_PL_START_FRAG], 267 pkt->max_payload_frags, 268 pkt->raw, pkt->raw_frags, 269 pkt->hdr_len, pkt->payload_len); 270 } 271 272 bool net_tx_pkt_parse(struct NetTxPkt *pkt) 273 { 274 if (net_tx_pkt_parse_headers(pkt)) { 275 net_tx_pkt_rebuild_payload(pkt); 276 return true; 277 } else { 278 return false; 279 } 280 } 281 282 struct virtio_net_hdr *net_tx_pkt_get_vhdr(struct NetTxPkt *pkt) 283 { 284 assert(pkt); 285 return &pkt->virt_hdr; 286 } 287 288 static uint8_t net_tx_pkt_get_gso_type(struct NetTxPkt *pkt, 289 bool tso_enable) 290 { 291 uint8_t rc = VIRTIO_NET_HDR_GSO_NONE; 292 uint16_t l3_proto; 293 294 l3_proto = eth_get_l3_proto(&pkt->vec[NET_TX_PKT_L2HDR_FRAG], 1, 295 pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_len); 296 297 if (!tso_enable) { 298 goto func_exit; 299 } 300 301 rc = eth_get_gso_type(l3_proto, pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_base, 302 pkt->l4proto); 303 304 func_exit: 305 return rc; 306 } 307 308 void net_tx_pkt_build_vheader(struct NetTxPkt *pkt, bool tso_enable, 309 bool csum_enable, uint32_t gso_size) 310 { 311 struct tcp_hdr l4hdr; 312 assert(pkt); 313 314 /* csum has to be enabled if tso is. */ 315 assert(csum_enable || !tso_enable); 316 317 pkt->virt_hdr.gso_type = net_tx_pkt_get_gso_type(pkt, tso_enable); 318 319 switch (pkt->virt_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { 320 case VIRTIO_NET_HDR_GSO_NONE: 321 pkt->virt_hdr.hdr_len = 0; 322 pkt->virt_hdr.gso_size = 0; 323 break; 324 325 case VIRTIO_NET_HDR_GSO_UDP: 326 pkt->virt_hdr.gso_size = gso_size; 327 pkt->virt_hdr.hdr_len = pkt->hdr_len + sizeof(struct udp_header); 328 break; 329 330 case VIRTIO_NET_HDR_GSO_TCPV4: 331 case VIRTIO_NET_HDR_GSO_TCPV6: 332 iov_to_buf(&pkt->vec[NET_TX_PKT_PL_START_FRAG], pkt->payload_frags, 333 0, &l4hdr, sizeof(l4hdr)); 334 pkt->virt_hdr.hdr_len = pkt->hdr_len + l4hdr.th_off * sizeof(uint32_t); 335 pkt->virt_hdr.gso_size = gso_size; 336 break; 337 338 default: 339 g_assert_not_reached(); 340 } 341 342 if (csum_enable) { 343 switch (pkt->l4proto) { 344 case IP_PROTO_TCP: 345 pkt->virt_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; 346 pkt->virt_hdr.csum_start = pkt->hdr_len; 347 pkt->virt_hdr.csum_offset = offsetof(struct tcp_hdr, th_sum); 348 break; 349 case IP_PROTO_UDP: 350 pkt->virt_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; 351 pkt->virt_hdr.csum_start = pkt->hdr_len; 352 pkt->virt_hdr.csum_offset = offsetof(struct udp_hdr, uh_sum); 353 break; 354 default: 355 break; 356 } 357 } 358 } 359 360 void net_tx_pkt_setup_vlan_header_ex(struct NetTxPkt *pkt, 361 uint16_t vlan, uint16_t vlan_ethtype) 362 { 363 bool is_new; 364 assert(pkt); 365 366 eth_setup_vlan_headers_ex(pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_base, 367 vlan, vlan_ethtype, &is_new); 368 369 /* update l2hdrlen */ 370 if (is_new) { 371 pkt->hdr_len += sizeof(struct vlan_header); 372 pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_len += 373 sizeof(struct vlan_header); 374 } 375 } 376 377 bool net_tx_pkt_add_raw_fragment(struct NetTxPkt *pkt, hwaddr pa, 378 size_t len) 379 { 380 hwaddr mapped_len = 0; 381 struct iovec *ventry; 382 assert(pkt); 383 assert(pkt->max_raw_frags > pkt->raw_frags); 384 385 if (!len) { 386 return true; 387 } 388 389 ventry = &pkt->raw[pkt->raw_frags]; 390 mapped_len = len; 391 392 ventry->iov_base = pci_dma_map(pkt->pci_dev, pa, 393 &mapped_len, DMA_DIRECTION_TO_DEVICE); 394 395 if ((ventry->iov_base != NULL) && (len == mapped_len)) { 396 ventry->iov_len = mapped_len; 397 pkt->raw_frags++; 398 return true; 399 } else { 400 return false; 401 } 402 } 403 404 bool net_tx_pkt_has_fragments(struct NetTxPkt *pkt) 405 { 406 return pkt->raw_frags > 0; 407 } 408 409 eth_pkt_types_e net_tx_pkt_get_packet_type(struct NetTxPkt *pkt) 410 { 411 assert(pkt); 412 413 return pkt->packet_type; 414 } 415 416 size_t net_tx_pkt_get_total_len(struct NetTxPkt *pkt) 417 { 418 assert(pkt); 419 420 return pkt->hdr_len + pkt->payload_len; 421 } 422 423 void net_tx_pkt_dump(struct NetTxPkt *pkt) 424 { 425 #ifdef NET_TX_PKT_DEBUG 426 assert(pkt); 427 428 printf("TX PKT: hdr_len: %d, pkt_type: 0x%X, l2hdr_len: %lu, " 429 "l3hdr_len: %lu, payload_len: %u\n", pkt->hdr_len, pkt->packet_type, 430 pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_len, 431 pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len, pkt->payload_len); 432 #endif 433 } 434 435 void net_tx_pkt_reset(struct NetTxPkt *pkt) 436 { 437 int i; 438 439 /* no assert, as reset can be called before tx_pkt_init */ 440 if (!pkt) { 441 return; 442 } 443 444 memset(&pkt->virt_hdr, 0, sizeof(pkt->virt_hdr)); 445 446 assert(pkt->vec); 447 448 pkt->payload_len = 0; 449 pkt->payload_frags = 0; 450 451 assert(pkt->raw); 452 for (i = 0; i < pkt->raw_frags; i++) { 453 assert(pkt->raw[i].iov_base); 454 pci_dma_unmap(pkt->pci_dev, pkt->raw[i].iov_base, pkt->raw[i].iov_len, 455 DMA_DIRECTION_TO_DEVICE, 0); 456 } 457 pkt->raw_frags = 0; 458 459 pkt->hdr_len = 0; 460 pkt->l4proto = 0; 461 } 462 463 static void net_tx_pkt_do_sw_csum(struct NetTxPkt *pkt) 464 { 465 struct iovec *iov = &pkt->vec[NET_TX_PKT_L2HDR_FRAG]; 466 uint32_t csum_cntr; 467 uint16_t csum = 0; 468 uint32_t cso; 469 /* num of iovec without vhdr */ 470 uint32_t iov_len = pkt->payload_frags + NET_TX_PKT_PL_START_FRAG - 1; 471 uint16_t csl; 472 struct ip_header *iphdr; 473 size_t csum_offset = pkt->virt_hdr.csum_start + pkt->virt_hdr.csum_offset; 474 475 /* Put zero to checksum field */ 476 iov_from_buf(iov, iov_len, csum_offset, &csum, sizeof csum); 477 478 /* Calculate L4 TCP/UDP checksum */ 479 csl = pkt->payload_len; 480 481 /* add pseudo header to csum */ 482 iphdr = pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_base; 483 csum_cntr = eth_calc_ip4_pseudo_hdr_csum(iphdr, csl, &cso); 484 485 /* data checksum */ 486 csum_cntr += 487 net_checksum_add_iov(iov, iov_len, pkt->virt_hdr.csum_start, csl, cso); 488 489 /* Put the checksum obtained into the packet */ 490 csum = cpu_to_be16(net_checksum_finish(csum_cntr)); 491 iov_from_buf(iov, iov_len, csum_offset, &csum, sizeof csum); 492 } 493 494 enum { 495 NET_TX_PKT_FRAGMENT_L2_HDR_POS = 0, 496 NET_TX_PKT_FRAGMENT_L3_HDR_POS, 497 NET_TX_PKT_FRAGMENT_HEADER_NUM 498 }; 499 500 #define NET_MAX_FRAG_SG_LIST (64) 501 502 static size_t net_tx_pkt_fetch_fragment(struct NetTxPkt *pkt, 503 int *src_idx, size_t *src_offset, struct iovec *dst, int *dst_idx) 504 { 505 size_t fetched = 0; 506 struct iovec *src = pkt->vec; 507 508 *dst_idx = NET_TX_PKT_FRAGMENT_HEADER_NUM; 509 510 while (fetched < IP_FRAG_ALIGN_SIZE(pkt->virt_hdr.gso_size)) { 511 512 /* no more place in fragment iov */ 513 if (*dst_idx == NET_MAX_FRAG_SG_LIST) { 514 break; 515 } 516 517 /* no more data in iovec */ 518 if (*src_idx == (pkt->payload_frags + NET_TX_PKT_PL_START_FRAG)) { 519 break; 520 } 521 522 523 dst[*dst_idx].iov_base = src[*src_idx].iov_base + *src_offset; 524 dst[*dst_idx].iov_len = MIN(src[*src_idx].iov_len - *src_offset, 525 IP_FRAG_ALIGN_SIZE(pkt->virt_hdr.gso_size) - fetched); 526 527 *src_offset += dst[*dst_idx].iov_len; 528 fetched += dst[*dst_idx].iov_len; 529 530 if (*src_offset == src[*src_idx].iov_len) { 531 *src_offset = 0; 532 (*src_idx)++; 533 } 534 535 (*dst_idx)++; 536 } 537 538 return fetched; 539 } 540 541 static inline void net_tx_pkt_sendv(struct NetTxPkt *pkt, 542 NetClientState *nc, const struct iovec *iov, int iov_cnt) 543 { 544 if (pkt->is_loopback) { 545 nc->info->receive_iov(nc, iov, iov_cnt); 546 } else { 547 qemu_sendv_packet(nc, iov, iov_cnt); 548 } 549 } 550 551 static bool net_tx_pkt_do_sw_fragmentation(struct NetTxPkt *pkt, 552 NetClientState *nc) 553 { 554 struct iovec fragment[NET_MAX_FRAG_SG_LIST]; 555 size_t fragment_len = 0; 556 bool more_frags = false; 557 558 /* some pointers for shorter code */ 559 void *l2_iov_base, *l3_iov_base; 560 size_t l2_iov_len, l3_iov_len; 561 int src_idx = NET_TX_PKT_PL_START_FRAG, dst_idx; 562 size_t src_offset = 0; 563 size_t fragment_offset = 0; 564 565 l2_iov_base = pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_base; 566 l2_iov_len = pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_len; 567 l3_iov_base = pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_base; 568 l3_iov_len = pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len; 569 570 /* Copy headers */ 571 fragment[NET_TX_PKT_FRAGMENT_L2_HDR_POS].iov_base = l2_iov_base; 572 fragment[NET_TX_PKT_FRAGMENT_L2_HDR_POS].iov_len = l2_iov_len; 573 fragment[NET_TX_PKT_FRAGMENT_L3_HDR_POS].iov_base = l3_iov_base; 574 fragment[NET_TX_PKT_FRAGMENT_L3_HDR_POS].iov_len = l3_iov_len; 575 576 577 /* Put as much data as possible and send */ 578 do { 579 fragment_len = net_tx_pkt_fetch_fragment(pkt, &src_idx, &src_offset, 580 fragment, &dst_idx); 581 582 more_frags = (fragment_offset + fragment_len < pkt->payload_len); 583 584 eth_setup_ip4_fragmentation(l2_iov_base, l2_iov_len, l3_iov_base, 585 l3_iov_len, fragment_len, fragment_offset, more_frags); 586 587 eth_fix_ip4_checksum(l3_iov_base, l3_iov_len); 588 589 net_tx_pkt_sendv(pkt, nc, fragment, dst_idx); 590 591 fragment_offset += fragment_len; 592 593 } while (more_frags); 594 595 return true; 596 } 597 598 bool net_tx_pkt_send(struct NetTxPkt *pkt, NetClientState *nc) 599 { 600 assert(pkt); 601 602 if (!pkt->has_virt_hdr && 603 pkt->virt_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { 604 net_tx_pkt_do_sw_csum(pkt); 605 } 606 607 /* 608 * Since underlying infrastructure does not support IP datagrams longer 609 * than 64K we should drop such packets and don't even try to send 610 */ 611 if (VIRTIO_NET_HDR_GSO_NONE != pkt->virt_hdr.gso_type) { 612 if (pkt->payload_len > 613 ETH_MAX_IP_DGRAM_LEN - 614 pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len) { 615 return false; 616 } 617 } 618 619 if (pkt->has_virt_hdr || 620 pkt->virt_hdr.gso_type == VIRTIO_NET_HDR_GSO_NONE) { 621 net_tx_pkt_sendv(pkt, nc, pkt->vec, 622 pkt->payload_frags + NET_TX_PKT_PL_START_FRAG); 623 return true; 624 } 625 626 return net_tx_pkt_do_sw_fragmentation(pkt, nc); 627 } 628 629 bool net_tx_pkt_send_loopback(struct NetTxPkt *pkt, NetClientState *nc) 630 { 631 bool res; 632 633 pkt->is_loopback = true; 634 res = net_tx_pkt_send(pkt, nc); 635 pkt->is_loopback = false; 636 637 return res; 638 } 639