xref: /openbmc/linux/net/core/gro.c (revision 2f4e3926)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 #include <net/gro.h>
3 #include <net/dst_metadata.h>
4 #include <net/busy_poll.h>
5 #include <trace/events/net.h>
6 
7 #define MAX_GRO_SKBS 8
8 
9 /* This should be increased if a protocol with a bigger head is added. */
10 #define GRO_MAX_HEAD (MAX_HEADER + 128)
11 
12 static DEFINE_SPINLOCK(offload_lock);
13 struct list_head offload_base __read_mostly = LIST_HEAD_INIT(offload_base);
14 /* Maximum number of GRO_NORMAL skbs to batch up for list-RX */
15 int gro_normal_batch __read_mostly = 8;
16 
17 /**
18  *	dev_add_offload - register offload handlers
19  *	@po: protocol offload declaration
20  *
21  *	Add protocol offload handlers to the networking stack. The passed
22  *	&proto_offload is linked into kernel lists and may not be freed until
23  *	it has been removed from the kernel lists.
24  *
25  *	This call does not sleep therefore it can not
26  *	guarantee all CPU's that are in middle of receiving packets
27  *	will see the new offload handlers (until the next received packet).
28  */
29 void dev_add_offload(struct packet_offload *po)
30 {
31 	struct packet_offload *elem;
32 
33 	spin_lock(&offload_lock);
34 	list_for_each_entry(elem, &offload_base, list) {
35 		if (po->priority < elem->priority)
36 			break;
37 	}
38 	list_add_rcu(&po->list, elem->list.prev);
39 	spin_unlock(&offload_lock);
40 }
41 EXPORT_SYMBOL(dev_add_offload);
42 
43 /**
44  *	__dev_remove_offload	 - remove offload handler
45  *	@po: packet offload declaration
46  *
47  *	Remove a protocol offload handler that was previously added to the
48  *	kernel offload handlers by dev_add_offload(). The passed &offload_type
49  *	is removed from the kernel lists and can be freed or reused once this
50  *	function returns.
51  *
52  *      The packet type might still be in use by receivers
53  *	and must not be freed until after all the CPU's have gone
54  *	through a quiescent state.
55  */
56 static void __dev_remove_offload(struct packet_offload *po)
57 {
58 	struct list_head *head = &offload_base;
59 	struct packet_offload *po1;
60 
61 	spin_lock(&offload_lock);
62 
63 	list_for_each_entry(po1, head, list) {
64 		if (po == po1) {
65 			list_del_rcu(&po->list);
66 			goto out;
67 		}
68 	}
69 
70 	pr_warn("dev_remove_offload: %p not found\n", po);
71 out:
72 	spin_unlock(&offload_lock);
73 }
74 
75 /**
76  *	dev_remove_offload	 - remove packet offload handler
77  *	@po: packet offload declaration
78  *
79  *	Remove a packet offload handler that was previously added to the kernel
80  *	offload handlers by dev_add_offload(). The passed &offload_type is
81  *	removed from the kernel lists and can be freed or reused once this
82  *	function returns.
83  *
84  *	This call sleeps to guarantee that no CPU is looking at the packet
85  *	type after return.
86  */
87 void dev_remove_offload(struct packet_offload *po)
88 {
89 	__dev_remove_offload(po);
90 
91 	synchronize_net();
92 }
93 EXPORT_SYMBOL(dev_remove_offload);
94 
95 
96 int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
97 {
98 	struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);
99 	unsigned int offset = skb_gro_offset(skb);
100 	unsigned int headlen = skb_headlen(skb);
101 	unsigned int len = skb_gro_len(skb);
102 	unsigned int delta_truesize;
103 	unsigned int new_truesize;
104 	struct sk_buff *lp;
105 	int segs;
106 
107 	/* Do not splice page pool based packets w/ non-page pool
108 	 * packets. This can result in reference count issues as page
109 	 * pool pages will not decrement the reference count and will
110 	 * instead be immediately returned to the pool or have frag
111 	 * count decremented.
112 	 */
113 	if (p->pp_recycle != skb->pp_recycle)
114 		return -ETOOMANYREFS;
115 
116 	if (unlikely(p->len + len >= netif_get_gro_max_size(p->dev, p) ||
117 		     NAPI_GRO_CB(skb)->flush))
118 		return -E2BIG;
119 
120 	if (unlikely(p->len + len >= GRO_LEGACY_MAX_SIZE)) {
121 		if (NAPI_GRO_CB(skb)->proto != IPPROTO_TCP ||
122 		    (p->protocol == htons(ETH_P_IPV6) &&
123 		     skb_headroom(p) < sizeof(struct hop_jumbo_hdr)) ||
124 		    p->encapsulation)
125 			return -E2BIG;
126 	}
127 
128 	segs = NAPI_GRO_CB(skb)->count;
129 	lp = NAPI_GRO_CB(p)->last;
130 	pinfo = skb_shinfo(lp);
131 
132 	if (headlen <= offset) {
133 		skb_frag_t *frag;
134 		skb_frag_t *frag2;
135 		int i = skbinfo->nr_frags;
136 		int nr_frags = pinfo->nr_frags + i;
137 
138 		if (nr_frags > MAX_SKB_FRAGS)
139 			goto merge;
140 
141 		offset -= headlen;
142 		pinfo->nr_frags = nr_frags;
143 		skbinfo->nr_frags = 0;
144 
145 		frag = pinfo->frags + nr_frags;
146 		frag2 = skbinfo->frags + i;
147 		do {
148 			*--frag = *--frag2;
149 		} while (--i);
150 
151 		skb_frag_off_add(frag, offset);
152 		skb_frag_size_sub(frag, offset);
153 
154 		/* all fragments truesize : remove (head size + sk_buff) */
155 		new_truesize = SKB_TRUESIZE(skb_end_offset(skb));
156 		delta_truesize = skb->truesize - new_truesize;
157 
158 		skb->truesize = new_truesize;
159 		skb->len -= skb->data_len;
160 		skb->data_len = 0;
161 
162 		NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE;
163 		goto done;
164 	} else if (skb->head_frag) {
165 		int nr_frags = pinfo->nr_frags;
166 		skb_frag_t *frag = pinfo->frags + nr_frags;
167 		struct page *page = virt_to_head_page(skb->head);
168 		unsigned int first_size = headlen - offset;
169 		unsigned int first_offset;
170 
171 		if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS)
172 			goto merge;
173 
174 		first_offset = skb->data -
175 			       (unsigned char *)page_address(page) +
176 			       offset;
177 
178 		pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags;
179 
180 		skb_frag_fill_page_desc(frag, page, first_offset, first_size);
181 
182 		memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags);
183 		/* We dont need to clear skbinfo->nr_frags here */
184 
185 		new_truesize = SKB_DATA_ALIGN(sizeof(struct sk_buff));
186 		delta_truesize = skb->truesize - new_truesize;
187 		skb->truesize = new_truesize;
188 		NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
189 		goto done;
190 	}
191 
192 merge:
193 	/* sk ownership - if any - completely transferred to the aggregated packet */
194 	skb->destructor = NULL;
195 	skb->sk = NULL;
196 	delta_truesize = skb->truesize;
197 	if (offset > headlen) {
198 		unsigned int eat = offset - headlen;
199 
200 		skb_frag_off_add(&skbinfo->frags[0], eat);
201 		skb_frag_size_sub(&skbinfo->frags[0], eat);
202 		skb->data_len -= eat;
203 		skb->len -= eat;
204 		offset = headlen;
205 	}
206 
207 	__skb_pull(skb, offset);
208 
209 	if (NAPI_GRO_CB(p)->last == p)
210 		skb_shinfo(p)->frag_list = skb;
211 	else
212 		NAPI_GRO_CB(p)->last->next = skb;
213 	NAPI_GRO_CB(p)->last = skb;
214 	__skb_header_release(skb);
215 	lp = p;
216 
217 done:
218 	NAPI_GRO_CB(p)->count += segs;
219 	p->data_len += len;
220 	p->truesize += delta_truesize;
221 	p->len += len;
222 	if (lp != p) {
223 		lp->data_len += len;
224 		lp->truesize += delta_truesize;
225 		lp->len += len;
226 	}
227 	NAPI_GRO_CB(skb)->same_flow = 1;
228 	return 0;
229 }
230 
231 
232 static void napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
233 {
234 	struct packet_offload *ptype;
235 	__be16 type = skb->protocol;
236 	struct list_head *head = &offload_base;
237 	int err = -ENOENT;
238 
239 	BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
240 
241 	if (NAPI_GRO_CB(skb)->count == 1) {
242 		skb_shinfo(skb)->gso_size = 0;
243 		goto out;
244 	}
245 
246 	rcu_read_lock();
247 	list_for_each_entry_rcu(ptype, head, list) {
248 		if (ptype->type != type || !ptype->callbacks.gro_complete)
249 			continue;
250 
251 		err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete,
252 					 ipv6_gro_complete, inet_gro_complete,
253 					 skb, 0);
254 		break;
255 	}
256 	rcu_read_unlock();
257 
258 	if (err) {
259 		WARN_ON(&ptype->list == head);
260 		kfree_skb(skb);
261 		return;
262 	}
263 
264 out:
265 	gro_normal_one(napi, skb, NAPI_GRO_CB(skb)->count);
266 }
267 
268 static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
269 				   bool flush_old)
270 {
271 	struct list_head *head = &napi->gro_hash[index].list;
272 	struct sk_buff *skb, *p;
273 
274 	list_for_each_entry_safe_reverse(skb, p, head, list) {
275 		if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
276 			return;
277 		skb_list_del_init(skb);
278 		napi_gro_complete(napi, skb);
279 		napi->gro_hash[index].count--;
280 	}
281 
282 	if (!napi->gro_hash[index].count)
283 		__clear_bit(index, &napi->gro_bitmask);
284 }
285 
286 /* napi->gro_hash[].list contains packets ordered by age.
287  * youngest packets at the head of it.
288  * Complete skbs in reverse order to reduce latencies.
289  */
290 void napi_gro_flush(struct napi_struct *napi, bool flush_old)
291 {
292 	unsigned long bitmask = napi->gro_bitmask;
293 	unsigned int i, base = ~0U;
294 
295 	while ((i = ffs(bitmask)) != 0) {
296 		bitmask >>= i;
297 		base += i;
298 		__napi_gro_flush_chain(napi, base, flush_old);
299 	}
300 }
301 EXPORT_SYMBOL(napi_gro_flush);
302 
303 static unsigned long gro_list_prepare_tc_ext(const struct sk_buff *skb,
304 					     const struct sk_buff *p,
305 					     unsigned long diffs)
306 {
307 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
308 	struct tc_skb_ext *skb_ext;
309 	struct tc_skb_ext *p_ext;
310 
311 	skb_ext = skb_ext_find(skb, TC_SKB_EXT);
312 	p_ext = skb_ext_find(p, TC_SKB_EXT);
313 
314 	diffs |= (!!p_ext) ^ (!!skb_ext);
315 	if (!diffs && unlikely(skb_ext))
316 		diffs |= p_ext->chain ^ skb_ext->chain;
317 #endif
318 	return diffs;
319 }
320 
321 static void gro_list_prepare(const struct list_head *head,
322 			     const struct sk_buff *skb)
323 {
324 	unsigned int maclen = skb->dev->hard_header_len;
325 	u32 hash = skb_get_hash_raw(skb);
326 	struct sk_buff *p;
327 
328 	list_for_each_entry(p, head, list) {
329 		unsigned long diffs;
330 
331 		NAPI_GRO_CB(p)->flush = 0;
332 
333 		if (hash != skb_get_hash_raw(p)) {
334 			NAPI_GRO_CB(p)->same_flow = 0;
335 			continue;
336 		}
337 
338 		diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
339 		diffs |= p->vlan_all ^ skb->vlan_all;
340 		diffs |= skb_metadata_differs(p, skb);
341 		if (maclen == ETH_HLEN)
342 			diffs |= compare_ether_header(skb_mac_header(p),
343 						      skb_mac_header(skb));
344 		else if (!diffs)
345 			diffs = memcmp(skb_mac_header(p),
346 				       skb_mac_header(skb),
347 				       maclen);
348 
349 		/* in most common scenarions 'slow_gro' is 0
350 		 * otherwise we are already on some slower paths
351 		 * either skip all the infrequent tests altogether or
352 		 * avoid trying too hard to skip each of them individually
353 		 */
354 		if (!diffs && unlikely(skb->slow_gro | p->slow_gro)) {
355 			diffs |= p->sk != skb->sk;
356 			diffs |= skb_metadata_dst_cmp(p, skb);
357 			diffs |= skb_get_nfct(p) ^ skb_get_nfct(skb);
358 
359 			diffs |= gro_list_prepare_tc_ext(skb, p, diffs);
360 		}
361 
362 		NAPI_GRO_CB(p)->same_flow = !diffs;
363 	}
364 }
365 
366 static inline void skb_gro_reset_offset(struct sk_buff *skb, u32 nhoff)
367 {
368 	const struct skb_shared_info *pinfo = skb_shinfo(skb);
369 	const skb_frag_t *frag0 = &pinfo->frags[0];
370 
371 	NAPI_GRO_CB(skb)->network_offset = 0;
372 	NAPI_GRO_CB(skb)->data_offset = 0;
373 	NAPI_GRO_CB(skb)->frag0 = NULL;
374 	NAPI_GRO_CB(skb)->frag0_len = 0;
375 
376 	if (!skb_headlen(skb) && pinfo->nr_frags &&
377 	    !PageHighMem(skb_frag_page(frag0)) &&
378 	    (!NET_IP_ALIGN || !((skb_frag_off(frag0) + nhoff) & 3))) {
379 		NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
380 		NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
381 						    skb_frag_size(frag0),
382 						    skb->end - skb->tail);
383 	}
384 }
385 
386 static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
387 {
388 	struct skb_shared_info *pinfo = skb_shinfo(skb);
389 
390 	BUG_ON(skb->end - skb->tail < grow);
391 
392 	memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
393 
394 	skb->data_len -= grow;
395 	skb->tail += grow;
396 
397 	skb_frag_off_add(&pinfo->frags[0], grow);
398 	skb_frag_size_sub(&pinfo->frags[0], grow);
399 
400 	if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
401 		skb_frag_unref(skb, 0);
402 		memmove(pinfo->frags, pinfo->frags + 1,
403 			--pinfo->nr_frags * sizeof(pinfo->frags[0]));
404 	}
405 }
406 
407 static void gro_try_pull_from_frag0(struct sk_buff *skb)
408 {
409 	int grow = skb_gro_offset(skb) - skb_headlen(skb);
410 
411 	if (grow > 0)
412 		gro_pull_from_frag0(skb, grow);
413 }
414 
415 static void gro_flush_oldest(struct napi_struct *napi, struct list_head *head)
416 {
417 	struct sk_buff *oldest;
418 
419 	oldest = list_last_entry(head, struct sk_buff, list);
420 
421 	/* We are called with head length >= MAX_GRO_SKBS, so this is
422 	 * impossible.
423 	 */
424 	if (WARN_ON_ONCE(!oldest))
425 		return;
426 
427 	/* Do not adjust napi->gro_hash[].count, caller is adding a new
428 	 * SKB to the chain.
429 	 */
430 	skb_list_del_init(oldest);
431 	napi_gro_complete(napi, oldest);
432 }
433 
434 static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
435 {
436 	u32 bucket = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1);
437 	struct gro_list *gro_list = &napi->gro_hash[bucket];
438 	struct list_head *head = &offload_base;
439 	struct packet_offload *ptype;
440 	__be16 type = skb->protocol;
441 	struct sk_buff *pp = NULL;
442 	enum gro_result ret;
443 	int same_flow;
444 
445 	if (netif_elide_gro(skb->dev))
446 		goto normal;
447 
448 	gro_list_prepare(&gro_list->list, skb);
449 
450 	rcu_read_lock();
451 	list_for_each_entry_rcu(ptype, head, list) {
452 		if (ptype->type == type && ptype->callbacks.gro_receive)
453 			goto found_ptype;
454 	}
455 	rcu_read_unlock();
456 	goto normal;
457 
458 found_ptype:
459 	skb_set_network_header(skb, skb_gro_offset(skb));
460 	skb_reset_mac_len(skb);
461 	BUILD_BUG_ON(sizeof_field(struct napi_gro_cb, zeroed) != sizeof(u32));
462 	BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct napi_gro_cb, zeroed),
463 					sizeof(u32))); /* Avoid slow unaligned acc */
464 	*(u32 *)&NAPI_GRO_CB(skb)->zeroed = 0;
465 	NAPI_GRO_CB(skb)->flush = skb_has_frag_list(skb);
466 	NAPI_GRO_CB(skb)->is_atomic = 1;
467 	NAPI_GRO_CB(skb)->count = 1;
468 	if (unlikely(skb_is_gso(skb))) {
469 		NAPI_GRO_CB(skb)->count = skb_shinfo(skb)->gso_segs;
470 		/* Only support TCP and non DODGY users. */
471 		if (!skb_is_gso_tcp(skb) ||
472 		    (skb_shinfo(skb)->gso_type & SKB_GSO_DODGY))
473 			NAPI_GRO_CB(skb)->flush = 1;
474 	}
475 
476 	/* Setup for GRO checksum validation */
477 	switch (skb->ip_summed) {
478 	case CHECKSUM_COMPLETE:
479 		NAPI_GRO_CB(skb)->csum = skb->csum;
480 		NAPI_GRO_CB(skb)->csum_valid = 1;
481 		break;
482 	case CHECKSUM_UNNECESSARY:
483 		NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
484 		break;
485 	}
486 
487 	pp = INDIRECT_CALL_INET(ptype->callbacks.gro_receive,
488 				ipv6_gro_receive, inet_gro_receive,
489 				&gro_list->list, skb);
490 
491 	rcu_read_unlock();
492 
493 	if (PTR_ERR(pp) == -EINPROGRESS) {
494 		ret = GRO_CONSUMED;
495 		goto ok;
496 	}
497 
498 	same_flow = NAPI_GRO_CB(skb)->same_flow;
499 	ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
500 
501 	if (pp) {
502 		skb_list_del_init(pp);
503 		napi_gro_complete(napi, pp);
504 		gro_list->count--;
505 	}
506 
507 	if (same_flow)
508 		goto ok;
509 
510 	if (NAPI_GRO_CB(skb)->flush)
511 		goto normal;
512 
513 	if (unlikely(gro_list->count >= MAX_GRO_SKBS))
514 		gro_flush_oldest(napi, &gro_list->list);
515 	else
516 		gro_list->count++;
517 
518 	/* Must be called before setting NAPI_GRO_CB(skb)->{age|last} */
519 	gro_try_pull_from_frag0(skb);
520 	NAPI_GRO_CB(skb)->age = jiffies;
521 	NAPI_GRO_CB(skb)->last = skb;
522 	if (!skb_is_gso(skb))
523 		skb_shinfo(skb)->gso_size = skb_gro_len(skb);
524 	list_add(&skb->list, &gro_list->list);
525 	ret = GRO_HELD;
526 ok:
527 	if (gro_list->count) {
528 		if (!test_bit(bucket, &napi->gro_bitmask))
529 			__set_bit(bucket, &napi->gro_bitmask);
530 	} else if (test_bit(bucket, &napi->gro_bitmask)) {
531 		__clear_bit(bucket, &napi->gro_bitmask);
532 	}
533 
534 	return ret;
535 
536 normal:
537 	ret = GRO_NORMAL;
538 	gro_try_pull_from_frag0(skb);
539 	goto ok;
540 }
541 
542 struct packet_offload *gro_find_receive_by_type(__be16 type)
543 {
544 	struct list_head *offload_head = &offload_base;
545 	struct packet_offload *ptype;
546 
547 	list_for_each_entry_rcu(ptype, offload_head, list) {
548 		if (ptype->type != type || !ptype->callbacks.gro_receive)
549 			continue;
550 		return ptype;
551 	}
552 	return NULL;
553 }
554 EXPORT_SYMBOL(gro_find_receive_by_type);
555 
556 struct packet_offload *gro_find_complete_by_type(__be16 type)
557 {
558 	struct list_head *offload_head = &offload_base;
559 	struct packet_offload *ptype;
560 
561 	list_for_each_entry_rcu(ptype, offload_head, list) {
562 		if (ptype->type != type || !ptype->callbacks.gro_complete)
563 			continue;
564 		return ptype;
565 	}
566 	return NULL;
567 }
568 EXPORT_SYMBOL(gro_find_complete_by_type);
569 
570 static gro_result_t napi_skb_finish(struct napi_struct *napi,
571 				    struct sk_buff *skb,
572 				    gro_result_t ret)
573 {
574 	switch (ret) {
575 	case GRO_NORMAL:
576 		gro_normal_one(napi, skb, 1);
577 		break;
578 
579 	case GRO_MERGED_FREE:
580 		if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
581 			napi_skb_free_stolen_head(skb);
582 		else if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
583 			__kfree_skb(skb);
584 		else
585 			__napi_kfree_skb(skb, SKB_CONSUMED);
586 		break;
587 
588 	case GRO_HELD:
589 	case GRO_MERGED:
590 	case GRO_CONSUMED:
591 		break;
592 	}
593 
594 	return ret;
595 }
596 
597 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
598 {
599 	gro_result_t ret;
600 
601 	skb_mark_napi_id(skb, napi);
602 	trace_napi_gro_receive_entry(skb);
603 
604 	skb_gro_reset_offset(skb, 0);
605 
606 	ret = napi_skb_finish(napi, skb, dev_gro_receive(napi, skb));
607 	trace_napi_gro_receive_exit(ret);
608 
609 	return ret;
610 }
611 EXPORT_SYMBOL(napi_gro_receive);
612 
613 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
614 {
615 	if (unlikely(skb->pfmemalloc)) {
616 		consume_skb(skb);
617 		return;
618 	}
619 	__skb_pull(skb, skb_headlen(skb));
620 	/* restore the reserve we had after netdev_alloc_skb_ip_align() */
621 	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
622 	__vlan_hwaccel_clear_tag(skb);
623 	skb->dev = napi->dev;
624 	skb->skb_iif = 0;
625 
626 	/* eth_type_trans() assumes pkt_type is PACKET_HOST */
627 	skb->pkt_type = PACKET_HOST;
628 
629 	skb->encapsulation = 0;
630 	skb_shinfo(skb)->gso_type = 0;
631 	skb_shinfo(skb)->gso_size = 0;
632 	if (unlikely(skb->slow_gro)) {
633 		skb_orphan(skb);
634 		skb_ext_reset(skb);
635 		nf_reset_ct(skb);
636 		skb->slow_gro = 0;
637 	}
638 
639 	napi->skb = skb;
640 }
641 
642 struct sk_buff *napi_get_frags(struct napi_struct *napi)
643 {
644 	struct sk_buff *skb = napi->skb;
645 
646 	if (!skb) {
647 		skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
648 		if (skb) {
649 			napi->skb = skb;
650 			skb_mark_napi_id(skb, napi);
651 		}
652 	}
653 	return skb;
654 }
655 EXPORT_SYMBOL(napi_get_frags);
656 
657 static gro_result_t napi_frags_finish(struct napi_struct *napi,
658 				      struct sk_buff *skb,
659 				      gro_result_t ret)
660 {
661 	switch (ret) {
662 	case GRO_NORMAL:
663 	case GRO_HELD:
664 		__skb_push(skb, ETH_HLEN);
665 		skb->protocol = eth_type_trans(skb, skb->dev);
666 		if (ret == GRO_NORMAL)
667 			gro_normal_one(napi, skb, 1);
668 		break;
669 
670 	case GRO_MERGED_FREE:
671 		if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
672 			napi_skb_free_stolen_head(skb);
673 		else
674 			napi_reuse_skb(napi, skb);
675 		break;
676 
677 	case GRO_MERGED:
678 	case GRO_CONSUMED:
679 		break;
680 	}
681 
682 	return ret;
683 }
684 
685 /* Upper GRO stack assumes network header starts at gro_offset=0
686  * Drivers could call both napi_gro_frags() and napi_gro_receive()
687  * We copy ethernet header into skb->data to have a common layout.
688  */
689 static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
690 {
691 	struct sk_buff *skb = napi->skb;
692 	const struct ethhdr *eth;
693 	unsigned int hlen = sizeof(*eth);
694 
695 	napi->skb = NULL;
696 
697 	skb_reset_mac_header(skb);
698 	skb_gro_reset_offset(skb, hlen);
699 
700 	if (unlikely(skb_gro_header_hard(skb, hlen))) {
701 		eth = skb_gro_header_slow(skb, hlen, 0);
702 		if (unlikely(!eth)) {
703 			net_warn_ratelimited("%s: dropping impossible skb from %s\n",
704 					     __func__, napi->dev->name);
705 			napi_reuse_skb(napi, skb);
706 			return NULL;
707 		}
708 	} else {
709 		eth = (const struct ethhdr *)skb->data;
710 		gro_pull_from_frag0(skb, hlen);
711 		NAPI_GRO_CB(skb)->frag0 += hlen;
712 		NAPI_GRO_CB(skb)->frag0_len -= hlen;
713 	}
714 	__skb_pull(skb, hlen);
715 
716 	/*
717 	 * This works because the only protocols we care about don't require
718 	 * special handling.
719 	 * We'll fix it up properly in napi_frags_finish()
720 	 */
721 	skb->protocol = eth->h_proto;
722 
723 	return skb;
724 }
725 
726 gro_result_t napi_gro_frags(struct napi_struct *napi)
727 {
728 	gro_result_t ret;
729 	struct sk_buff *skb = napi_frags_skb(napi);
730 
731 	trace_napi_gro_frags_entry(skb);
732 
733 	ret = napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
734 	trace_napi_gro_frags_exit(ret);
735 
736 	return ret;
737 }
738 EXPORT_SYMBOL(napi_gro_frags);
739 
740 /* Compute the checksum from gro_offset and return the folded value
741  * after adding in any pseudo checksum.
742  */
743 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
744 {
745 	__wsum wsum;
746 	__sum16 sum;
747 
748 	wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
749 
750 	/* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
751 	sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
752 	/* See comments in __skb_checksum_complete(). */
753 	if (likely(!sum)) {
754 		if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
755 		    !skb->csum_complete_sw)
756 			netdev_rx_csum_fault(skb->dev, skb);
757 	}
758 
759 	NAPI_GRO_CB(skb)->csum = wsum;
760 	NAPI_GRO_CB(skb)->csum_valid = 1;
761 
762 	return sum;
763 }
764 EXPORT_SYMBOL(__skb_gro_checksum_complete);
765