1 // SPDX-License-Identifier: GPL-2.0-or-later 2 #include <net/gro.h> 3 #include <net/dst_metadata.h> 4 #include <net/busy_poll.h> 5 #include <trace/events/net.h> 6 7 #define MAX_GRO_SKBS 8 8 9 /* This should be increased if a protocol with a bigger head is added. */ 10 #define GRO_MAX_HEAD (MAX_HEADER + 128) 11 12 static DEFINE_SPINLOCK(offload_lock); 13 struct list_head offload_base __read_mostly = LIST_HEAD_INIT(offload_base); 14 /* Maximum number of GRO_NORMAL skbs to batch up for list-RX */ 15 int gro_normal_batch __read_mostly = 8; 16 17 /** 18 * dev_add_offload - register offload handlers 19 * @po: protocol offload declaration 20 * 21 * Add protocol offload handlers to the networking stack. The passed 22 * &proto_offload is linked into kernel lists and may not be freed until 23 * it has been removed from the kernel lists. 24 * 25 * This call does not sleep therefore it can not 26 * guarantee all CPU's that are in middle of receiving packets 27 * will see the new offload handlers (until the next received packet). 28 */ 29 void dev_add_offload(struct packet_offload *po) 30 { 31 struct packet_offload *elem; 32 33 spin_lock(&offload_lock); 34 list_for_each_entry(elem, &offload_base, list) { 35 if (po->priority < elem->priority) 36 break; 37 } 38 list_add_rcu(&po->list, elem->list.prev); 39 spin_unlock(&offload_lock); 40 } 41 EXPORT_SYMBOL(dev_add_offload); 42 43 /** 44 * __dev_remove_offload - remove offload handler 45 * @po: packet offload declaration 46 * 47 * Remove a protocol offload handler that was previously added to the 48 * kernel offload handlers by dev_add_offload(). The passed &offload_type 49 * is removed from the kernel lists and can be freed or reused once this 50 * function returns. 51 * 52 * The packet type might still be in use by receivers 53 * and must not be freed until after all the CPU's have gone 54 * through a quiescent state. 55 */ 56 static void __dev_remove_offload(struct packet_offload *po) 57 { 58 struct list_head *head = &offload_base; 59 struct packet_offload *po1; 60 61 spin_lock(&offload_lock); 62 63 list_for_each_entry(po1, head, list) { 64 if (po == po1) { 65 list_del_rcu(&po->list); 66 goto out; 67 } 68 } 69 70 pr_warn("dev_remove_offload: %p not found\n", po); 71 out: 72 spin_unlock(&offload_lock); 73 } 74 75 /** 76 * dev_remove_offload - remove packet offload handler 77 * @po: packet offload declaration 78 * 79 * Remove a packet offload handler that was previously added to the kernel 80 * offload handlers by dev_add_offload(). The passed &offload_type is 81 * removed from the kernel lists and can be freed or reused once this 82 * function returns. 83 * 84 * This call sleeps to guarantee that no CPU is looking at the packet 85 * type after return. 86 */ 87 void dev_remove_offload(struct packet_offload *po) 88 { 89 __dev_remove_offload(po); 90 91 synchronize_net(); 92 } 93 EXPORT_SYMBOL(dev_remove_offload); 94 95 96 int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb) 97 { 98 struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb); 99 unsigned int offset = skb_gro_offset(skb); 100 unsigned int headlen = skb_headlen(skb); 101 unsigned int len = skb_gro_len(skb); 102 unsigned int delta_truesize; 103 unsigned int gro_max_size; 104 unsigned int new_truesize; 105 struct sk_buff *lp; 106 int segs; 107 108 /* Do not splice page pool based packets w/ non-page pool 109 * packets. This can result in reference count issues as page 110 * pool pages will not decrement the reference count and will 111 * instead be immediately returned to the pool or have frag 112 * count decremented. 113 */ 114 if (p->pp_recycle != skb->pp_recycle) 115 return -ETOOMANYREFS; 116 117 /* pairs with WRITE_ONCE() in netif_set_gro(_ipv4)_max_size() */ 118 gro_max_size = p->protocol == htons(ETH_P_IPV6) ? 119 READ_ONCE(p->dev->gro_max_size) : 120 READ_ONCE(p->dev->gro_ipv4_max_size); 121 122 if (unlikely(p->len + len >= gro_max_size || NAPI_GRO_CB(skb)->flush)) 123 return -E2BIG; 124 125 if (unlikely(p->len + len >= GRO_LEGACY_MAX_SIZE)) { 126 if (NAPI_GRO_CB(skb)->proto != IPPROTO_TCP || 127 (p->protocol == htons(ETH_P_IPV6) && 128 skb_headroom(p) < sizeof(struct hop_jumbo_hdr)) || 129 p->encapsulation) 130 return -E2BIG; 131 } 132 133 segs = NAPI_GRO_CB(skb)->count; 134 lp = NAPI_GRO_CB(p)->last; 135 pinfo = skb_shinfo(lp); 136 137 if (headlen <= offset) { 138 skb_frag_t *frag; 139 skb_frag_t *frag2; 140 int i = skbinfo->nr_frags; 141 int nr_frags = pinfo->nr_frags + i; 142 143 if (nr_frags > MAX_SKB_FRAGS) 144 goto merge; 145 146 offset -= headlen; 147 pinfo->nr_frags = nr_frags; 148 skbinfo->nr_frags = 0; 149 150 frag = pinfo->frags + nr_frags; 151 frag2 = skbinfo->frags + i; 152 do { 153 *--frag = *--frag2; 154 } while (--i); 155 156 skb_frag_off_add(frag, offset); 157 skb_frag_size_sub(frag, offset); 158 159 /* all fragments truesize : remove (head size + sk_buff) */ 160 new_truesize = SKB_TRUESIZE(skb_end_offset(skb)); 161 delta_truesize = skb->truesize - new_truesize; 162 163 skb->truesize = new_truesize; 164 skb->len -= skb->data_len; 165 skb->data_len = 0; 166 167 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE; 168 goto done; 169 } else if (skb->head_frag) { 170 int nr_frags = pinfo->nr_frags; 171 skb_frag_t *frag = pinfo->frags + nr_frags; 172 struct page *page = virt_to_head_page(skb->head); 173 unsigned int first_size = headlen - offset; 174 unsigned int first_offset; 175 176 if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS) 177 goto merge; 178 179 first_offset = skb->data - 180 (unsigned char *)page_address(page) + 181 offset; 182 183 pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags; 184 185 skb_frag_fill_page_desc(frag, page, first_offset, first_size); 186 187 memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags); 188 /* We dont need to clear skbinfo->nr_frags here */ 189 190 new_truesize = SKB_DATA_ALIGN(sizeof(struct sk_buff)); 191 delta_truesize = skb->truesize - new_truesize; 192 skb->truesize = new_truesize; 193 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD; 194 goto done; 195 } 196 197 merge: 198 /* sk owenrship - if any - completely transferred to the aggregated packet */ 199 skb->destructor = NULL; 200 delta_truesize = skb->truesize; 201 if (offset > headlen) { 202 unsigned int eat = offset - headlen; 203 204 skb_frag_off_add(&skbinfo->frags[0], eat); 205 skb_frag_size_sub(&skbinfo->frags[0], eat); 206 skb->data_len -= eat; 207 skb->len -= eat; 208 offset = headlen; 209 } 210 211 __skb_pull(skb, offset); 212 213 if (NAPI_GRO_CB(p)->last == p) 214 skb_shinfo(p)->frag_list = skb; 215 else 216 NAPI_GRO_CB(p)->last->next = skb; 217 NAPI_GRO_CB(p)->last = skb; 218 __skb_header_release(skb); 219 lp = p; 220 221 done: 222 NAPI_GRO_CB(p)->count += segs; 223 p->data_len += len; 224 p->truesize += delta_truesize; 225 p->len += len; 226 if (lp != p) { 227 lp->data_len += len; 228 lp->truesize += delta_truesize; 229 lp->len += len; 230 } 231 NAPI_GRO_CB(skb)->same_flow = 1; 232 return 0; 233 } 234 235 236 static void napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb) 237 { 238 struct packet_offload *ptype; 239 __be16 type = skb->protocol; 240 struct list_head *head = &offload_base; 241 int err = -ENOENT; 242 243 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb)); 244 245 if (NAPI_GRO_CB(skb)->count == 1) { 246 skb_shinfo(skb)->gso_size = 0; 247 goto out; 248 } 249 250 rcu_read_lock(); 251 list_for_each_entry_rcu(ptype, head, list) { 252 if (ptype->type != type || !ptype->callbacks.gro_complete) 253 continue; 254 255 err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete, 256 ipv6_gro_complete, inet_gro_complete, 257 skb, 0); 258 break; 259 } 260 rcu_read_unlock(); 261 262 if (err) { 263 WARN_ON(&ptype->list == head); 264 kfree_skb(skb); 265 return; 266 } 267 268 out: 269 gro_normal_one(napi, skb, NAPI_GRO_CB(skb)->count); 270 } 271 272 static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index, 273 bool flush_old) 274 { 275 struct list_head *head = &napi->gro_hash[index].list; 276 struct sk_buff *skb, *p; 277 278 list_for_each_entry_safe_reverse(skb, p, head, list) { 279 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies) 280 return; 281 skb_list_del_init(skb); 282 napi_gro_complete(napi, skb); 283 napi->gro_hash[index].count--; 284 } 285 286 if (!napi->gro_hash[index].count) 287 __clear_bit(index, &napi->gro_bitmask); 288 } 289 290 /* napi->gro_hash[].list contains packets ordered by age. 291 * youngest packets at the head of it. 292 * Complete skbs in reverse order to reduce latencies. 293 */ 294 void napi_gro_flush(struct napi_struct *napi, bool flush_old) 295 { 296 unsigned long bitmask = napi->gro_bitmask; 297 unsigned int i, base = ~0U; 298 299 while ((i = ffs(bitmask)) != 0) { 300 bitmask >>= i; 301 base += i; 302 __napi_gro_flush_chain(napi, base, flush_old); 303 } 304 } 305 EXPORT_SYMBOL(napi_gro_flush); 306 307 static void gro_list_prepare(const struct list_head *head, 308 const struct sk_buff *skb) 309 { 310 unsigned int maclen = skb->dev->hard_header_len; 311 u32 hash = skb_get_hash_raw(skb); 312 struct sk_buff *p; 313 314 list_for_each_entry(p, head, list) { 315 unsigned long diffs; 316 317 NAPI_GRO_CB(p)->flush = 0; 318 319 if (hash != skb_get_hash_raw(p)) { 320 NAPI_GRO_CB(p)->same_flow = 0; 321 continue; 322 } 323 324 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev; 325 diffs |= p->vlan_all ^ skb->vlan_all; 326 diffs |= skb_metadata_differs(p, skb); 327 if (maclen == ETH_HLEN) 328 diffs |= compare_ether_header(skb_mac_header(p), 329 skb_mac_header(skb)); 330 else if (!diffs) 331 diffs = memcmp(skb_mac_header(p), 332 skb_mac_header(skb), 333 maclen); 334 335 /* in most common scenarions 'slow_gro' is 0 336 * otherwise we are already on some slower paths 337 * either skip all the infrequent tests altogether or 338 * avoid trying too hard to skip each of them individually 339 */ 340 if (!diffs && unlikely(skb->slow_gro | p->slow_gro)) { 341 #if IS_ENABLED(CONFIG_SKB_EXTENSIONS) && IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 342 struct tc_skb_ext *skb_ext; 343 struct tc_skb_ext *p_ext; 344 #endif 345 346 diffs |= p->sk != skb->sk; 347 diffs |= skb_metadata_dst_cmp(p, skb); 348 diffs |= skb_get_nfct(p) ^ skb_get_nfct(skb); 349 350 #if IS_ENABLED(CONFIG_SKB_EXTENSIONS) && IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 351 skb_ext = skb_ext_find(skb, TC_SKB_EXT); 352 p_ext = skb_ext_find(p, TC_SKB_EXT); 353 354 diffs |= (!!p_ext) ^ (!!skb_ext); 355 if (!diffs && unlikely(skb_ext)) 356 diffs |= p_ext->chain ^ skb_ext->chain; 357 #endif 358 } 359 360 NAPI_GRO_CB(p)->same_flow = !diffs; 361 } 362 } 363 364 static inline void skb_gro_reset_offset(struct sk_buff *skb, u32 nhoff) 365 { 366 const struct skb_shared_info *pinfo = skb_shinfo(skb); 367 const skb_frag_t *frag0 = &pinfo->frags[0]; 368 369 NAPI_GRO_CB(skb)->data_offset = 0; 370 NAPI_GRO_CB(skb)->frag0 = NULL; 371 NAPI_GRO_CB(skb)->frag0_len = 0; 372 373 if (!skb_headlen(skb) && pinfo->nr_frags && 374 !PageHighMem(skb_frag_page(frag0)) && 375 (!NET_IP_ALIGN || !((skb_frag_off(frag0) + nhoff) & 3))) { 376 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0); 377 NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int, 378 skb_frag_size(frag0), 379 skb->end - skb->tail); 380 } 381 } 382 383 static void gro_pull_from_frag0(struct sk_buff *skb, int grow) 384 { 385 struct skb_shared_info *pinfo = skb_shinfo(skb); 386 387 BUG_ON(skb->end - skb->tail < grow); 388 389 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow); 390 391 skb->data_len -= grow; 392 skb->tail += grow; 393 394 skb_frag_off_add(&pinfo->frags[0], grow); 395 skb_frag_size_sub(&pinfo->frags[0], grow); 396 397 if (unlikely(!skb_frag_size(&pinfo->frags[0]))) { 398 skb_frag_unref(skb, 0); 399 memmove(pinfo->frags, pinfo->frags + 1, 400 --pinfo->nr_frags * sizeof(pinfo->frags[0])); 401 } 402 } 403 404 static void gro_try_pull_from_frag0(struct sk_buff *skb) 405 { 406 int grow = skb_gro_offset(skb) - skb_headlen(skb); 407 408 if (grow > 0) 409 gro_pull_from_frag0(skb, grow); 410 } 411 412 static void gro_flush_oldest(struct napi_struct *napi, struct list_head *head) 413 { 414 struct sk_buff *oldest; 415 416 oldest = list_last_entry(head, struct sk_buff, list); 417 418 /* We are called with head length >= MAX_GRO_SKBS, so this is 419 * impossible. 420 */ 421 if (WARN_ON_ONCE(!oldest)) 422 return; 423 424 /* Do not adjust napi->gro_hash[].count, caller is adding a new 425 * SKB to the chain. 426 */ 427 skb_list_del_init(oldest); 428 napi_gro_complete(napi, oldest); 429 } 430 431 static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 432 { 433 u32 bucket = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1); 434 struct gro_list *gro_list = &napi->gro_hash[bucket]; 435 struct list_head *head = &offload_base; 436 struct packet_offload *ptype; 437 __be16 type = skb->protocol; 438 struct sk_buff *pp = NULL; 439 enum gro_result ret; 440 int same_flow; 441 442 if (netif_elide_gro(skb->dev)) 443 goto normal; 444 445 gro_list_prepare(&gro_list->list, skb); 446 447 rcu_read_lock(); 448 list_for_each_entry_rcu(ptype, head, list) { 449 if (ptype->type == type && ptype->callbacks.gro_receive) 450 goto found_ptype; 451 } 452 rcu_read_unlock(); 453 goto normal; 454 455 found_ptype: 456 skb_set_network_header(skb, skb_gro_offset(skb)); 457 skb_reset_mac_len(skb); 458 BUILD_BUG_ON(sizeof_field(struct napi_gro_cb, zeroed) != sizeof(u32)); 459 BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct napi_gro_cb, zeroed), 460 sizeof(u32))); /* Avoid slow unaligned acc */ 461 *(u32 *)&NAPI_GRO_CB(skb)->zeroed = 0; 462 NAPI_GRO_CB(skb)->flush = skb_has_frag_list(skb); 463 NAPI_GRO_CB(skb)->is_atomic = 1; 464 NAPI_GRO_CB(skb)->count = 1; 465 if (unlikely(skb_is_gso(skb))) { 466 NAPI_GRO_CB(skb)->count = skb_shinfo(skb)->gso_segs; 467 /* Only support TCP and non DODGY users. */ 468 if (!skb_is_gso_tcp(skb) || 469 (skb_shinfo(skb)->gso_type & SKB_GSO_DODGY)) 470 NAPI_GRO_CB(skb)->flush = 1; 471 } 472 473 /* Setup for GRO checksum validation */ 474 switch (skb->ip_summed) { 475 case CHECKSUM_COMPLETE: 476 NAPI_GRO_CB(skb)->csum = skb->csum; 477 NAPI_GRO_CB(skb)->csum_valid = 1; 478 break; 479 case CHECKSUM_UNNECESSARY: 480 NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1; 481 break; 482 } 483 484 pp = INDIRECT_CALL_INET(ptype->callbacks.gro_receive, 485 ipv6_gro_receive, inet_gro_receive, 486 &gro_list->list, skb); 487 488 rcu_read_unlock(); 489 490 if (PTR_ERR(pp) == -EINPROGRESS) { 491 ret = GRO_CONSUMED; 492 goto ok; 493 } 494 495 same_flow = NAPI_GRO_CB(skb)->same_flow; 496 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED; 497 498 if (pp) { 499 skb_list_del_init(pp); 500 napi_gro_complete(napi, pp); 501 gro_list->count--; 502 } 503 504 if (same_flow) 505 goto ok; 506 507 if (NAPI_GRO_CB(skb)->flush) 508 goto normal; 509 510 if (unlikely(gro_list->count >= MAX_GRO_SKBS)) 511 gro_flush_oldest(napi, &gro_list->list); 512 else 513 gro_list->count++; 514 515 /* Must be called before setting NAPI_GRO_CB(skb)->{age|last} */ 516 gro_try_pull_from_frag0(skb); 517 NAPI_GRO_CB(skb)->age = jiffies; 518 NAPI_GRO_CB(skb)->last = skb; 519 if (!skb_is_gso(skb)) 520 skb_shinfo(skb)->gso_size = skb_gro_len(skb); 521 list_add(&skb->list, &gro_list->list); 522 ret = GRO_HELD; 523 ok: 524 if (gro_list->count) { 525 if (!test_bit(bucket, &napi->gro_bitmask)) 526 __set_bit(bucket, &napi->gro_bitmask); 527 } else if (test_bit(bucket, &napi->gro_bitmask)) { 528 __clear_bit(bucket, &napi->gro_bitmask); 529 } 530 531 return ret; 532 533 normal: 534 ret = GRO_NORMAL; 535 gro_try_pull_from_frag0(skb); 536 goto ok; 537 } 538 539 struct packet_offload *gro_find_receive_by_type(__be16 type) 540 { 541 struct list_head *offload_head = &offload_base; 542 struct packet_offload *ptype; 543 544 list_for_each_entry_rcu(ptype, offload_head, list) { 545 if (ptype->type != type || !ptype->callbacks.gro_receive) 546 continue; 547 return ptype; 548 } 549 return NULL; 550 } 551 EXPORT_SYMBOL(gro_find_receive_by_type); 552 553 struct packet_offload *gro_find_complete_by_type(__be16 type) 554 { 555 struct list_head *offload_head = &offload_base; 556 struct packet_offload *ptype; 557 558 list_for_each_entry_rcu(ptype, offload_head, list) { 559 if (ptype->type != type || !ptype->callbacks.gro_complete) 560 continue; 561 return ptype; 562 } 563 return NULL; 564 } 565 EXPORT_SYMBOL(gro_find_complete_by_type); 566 567 static gro_result_t napi_skb_finish(struct napi_struct *napi, 568 struct sk_buff *skb, 569 gro_result_t ret) 570 { 571 switch (ret) { 572 case GRO_NORMAL: 573 gro_normal_one(napi, skb, 1); 574 break; 575 576 case GRO_MERGED_FREE: 577 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) 578 napi_skb_free_stolen_head(skb); 579 else if (skb->fclone != SKB_FCLONE_UNAVAILABLE) 580 __kfree_skb(skb); 581 else 582 __napi_kfree_skb(skb, SKB_CONSUMED); 583 break; 584 585 case GRO_HELD: 586 case GRO_MERGED: 587 case GRO_CONSUMED: 588 break; 589 } 590 591 return ret; 592 } 593 594 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 595 { 596 gro_result_t ret; 597 598 skb_mark_napi_id(skb, napi); 599 trace_napi_gro_receive_entry(skb); 600 601 skb_gro_reset_offset(skb, 0); 602 603 ret = napi_skb_finish(napi, skb, dev_gro_receive(napi, skb)); 604 trace_napi_gro_receive_exit(ret); 605 606 return ret; 607 } 608 EXPORT_SYMBOL(napi_gro_receive); 609 610 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb) 611 { 612 if (unlikely(skb->pfmemalloc)) { 613 consume_skb(skb); 614 return; 615 } 616 __skb_pull(skb, skb_headlen(skb)); 617 /* restore the reserve we had after netdev_alloc_skb_ip_align() */ 618 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb)); 619 __vlan_hwaccel_clear_tag(skb); 620 skb->dev = napi->dev; 621 skb->skb_iif = 0; 622 623 /* eth_type_trans() assumes pkt_type is PACKET_HOST */ 624 skb->pkt_type = PACKET_HOST; 625 626 skb->encapsulation = 0; 627 skb_shinfo(skb)->gso_type = 0; 628 skb_shinfo(skb)->gso_size = 0; 629 if (unlikely(skb->slow_gro)) { 630 skb_orphan(skb); 631 skb_ext_reset(skb); 632 nf_reset_ct(skb); 633 skb->slow_gro = 0; 634 } 635 636 napi->skb = skb; 637 } 638 639 struct sk_buff *napi_get_frags(struct napi_struct *napi) 640 { 641 struct sk_buff *skb = napi->skb; 642 643 if (!skb) { 644 skb = napi_alloc_skb(napi, GRO_MAX_HEAD); 645 if (skb) { 646 napi->skb = skb; 647 skb_mark_napi_id(skb, napi); 648 } 649 } 650 return skb; 651 } 652 EXPORT_SYMBOL(napi_get_frags); 653 654 static gro_result_t napi_frags_finish(struct napi_struct *napi, 655 struct sk_buff *skb, 656 gro_result_t ret) 657 { 658 switch (ret) { 659 case GRO_NORMAL: 660 case GRO_HELD: 661 __skb_push(skb, ETH_HLEN); 662 skb->protocol = eth_type_trans(skb, skb->dev); 663 if (ret == GRO_NORMAL) 664 gro_normal_one(napi, skb, 1); 665 break; 666 667 case GRO_MERGED_FREE: 668 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) 669 napi_skb_free_stolen_head(skb); 670 else 671 napi_reuse_skb(napi, skb); 672 break; 673 674 case GRO_MERGED: 675 case GRO_CONSUMED: 676 break; 677 } 678 679 return ret; 680 } 681 682 /* Upper GRO stack assumes network header starts at gro_offset=0 683 * Drivers could call both napi_gro_frags() and napi_gro_receive() 684 * We copy ethernet header into skb->data to have a common layout. 685 */ 686 static struct sk_buff *napi_frags_skb(struct napi_struct *napi) 687 { 688 struct sk_buff *skb = napi->skb; 689 const struct ethhdr *eth; 690 unsigned int hlen = sizeof(*eth); 691 692 napi->skb = NULL; 693 694 skb_reset_mac_header(skb); 695 skb_gro_reset_offset(skb, hlen); 696 697 if (unlikely(skb_gro_header_hard(skb, hlen))) { 698 eth = skb_gro_header_slow(skb, hlen, 0); 699 if (unlikely(!eth)) { 700 net_warn_ratelimited("%s: dropping impossible skb from %s\n", 701 __func__, napi->dev->name); 702 napi_reuse_skb(napi, skb); 703 return NULL; 704 } 705 } else { 706 eth = (const struct ethhdr *)skb->data; 707 gro_pull_from_frag0(skb, hlen); 708 NAPI_GRO_CB(skb)->frag0 += hlen; 709 NAPI_GRO_CB(skb)->frag0_len -= hlen; 710 } 711 __skb_pull(skb, hlen); 712 713 /* 714 * This works because the only protocols we care about don't require 715 * special handling. 716 * We'll fix it up properly in napi_frags_finish() 717 */ 718 skb->protocol = eth->h_proto; 719 720 return skb; 721 } 722 723 gro_result_t napi_gro_frags(struct napi_struct *napi) 724 { 725 gro_result_t ret; 726 struct sk_buff *skb = napi_frags_skb(napi); 727 728 trace_napi_gro_frags_entry(skb); 729 730 ret = napi_frags_finish(napi, skb, dev_gro_receive(napi, skb)); 731 trace_napi_gro_frags_exit(ret); 732 733 return ret; 734 } 735 EXPORT_SYMBOL(napi_gro_frags); 736 737 /* Compute the checksum from gro_offset and return the folded value 738 * after adding in any pseudo checksum. 739 */ 740 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb) 741 { 742 __wsum wsum; 743 __sum16 sum; 744 745 wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0); 746 747 /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */ 748 sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum)); 749 /* See comments in __skb_checksum_complete(). */ 750 if (likely(!sum)) { 751 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && 752 !skb->csum_complete_sw) 753 netdev_rx_csum_fault(skb->dev, skb); 754 } 755 756 NAPI_GRO_CB(skb)->csum = wsum; 757 NAPI_GRO_CB(skb)->csum_valid = 1; 758 759 return sum; 760 } 761 EXPORT_SYMBOL(__skb_gro_checksum_complete); 762