xref: /openbmc/linux/net/core/gro.c (revision 62a9bbf2)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 #include <net/gro.h>
3 #include <net/dst_metadata.h>
4 #include <net/busy_poll.h>
5 #include <trace/events/net.h>
6 
7 #define MAX_GRO_SKBS 8
8 
9 /* This should be increased if a protocol with a bigger head is added. */
10 #define GRO_MAX_HEAD (MAX_HEADER + 128)
11 
12 static DEFINE_SPINLOCK(offload_lock);
13 static struct list_head offload_base __read_mostly = LIST_HEAD_INIT(offload_base);
14 /* Maximum number of GRO_NORMAL skbs to batch up for list-RX */
15 int gro_normal_batch __read_mostly = 8;
16 
17 /**
18  *	dev_add_offload - register offload handlers
19  *	@po: protocol offload declaration
20  *
21  *	Add protocol offload handlers to the networking stack. The passed
22  *	&proto_offload is linked into kernel lists and may not be freed until
23  *	it has been removed from the kernel lists.
24  *
25  *	This call does not sleep therefore it can not
26  *	guarantee all CPU's that are in middle of receiving packets
27  *	will see the new offload handlers (until the next received packet).
28  */
29 void dev_add_offload(struct packet_offload *po)
30 {
31 	struct packet_offload *elem;
32 
33 	spin_lock(&offload_lock);
34 	list_for_each_entry(elem, &offload_base, list) {
35 		if (po->priority < elem->priority)
36 			break;
37 	}
38 	list_add_rcu(&po->list, elem->list.prev);
39 	spin_unlock(&offload_lock);
40 }
41 EXPORT_SYMBOL(dev_add_offload);
42 
43 /**
44  *	__dev_remove_offload	 - remove offload handler
45  *	@po: packet offload declaration
46  *
47  *	Remove a protocol offload handler that was previously added to the
48  *	kernel offload handlers by dev_add_offload(). The passed &offload_type
49  *	is removed from the kernel lists and can be freed or reused once this
50  *	function returns.
51  *
52  *      The packet type might still be in use by receivers
53  *	and must not be freed until after all the CPU's have gone
54  *	through a quiescent state.
55  */
56 static void __dev_remove_offload(struct packet_offload *po)
57 {
58 	struct list_head *head = &offload_base;
59 	struct packet_offload *po1;
60 
61 	spin_lock(&offload_lock);
62 
63 	list_for_each_entry(po1, head, list) {
64 		if (po == po1) {
65 			list_del_rcu(&po->list);
66 			goto out;
67 		}
68 	}
69 
70 	pr_warn("dev_remove_offload: %p not found\n", po);
71 out:
72 	spin_unlock(&offload_lock);
73 }
74 
75 /**
76  *	dev_remove_offload	 - remove packet offload handler
77  *	@po: packet offload declaration
78  *
79  *	Remove a packet offload handler that was previously added to the kernel
80  *	offload handlers by dev_add_offload(). The passed &offload_type is
81  *	removed from the kernel lists and can be freed or reused once this
82  *	function returns.
83  *
84  *	This call sleeps to guarantee that no CPU is looking at the packet
85  *	type after return.
86  */
87 void dev_remove_offload(struct packet_offload *po)
88 {
89 	__dev_remove_offload(po);
90 
91 	synchronize_net();
92 }
93 EXPORT_SYMBOL(dev_remove_offload);
94 
95 /**
96  *	skb_eth_gso_segment - segmentation handler for ethernet protocols.
97  *	@skb: buffer to segment
98  *	@features: features for the output path (see dev->features)
99  *	@type: Ethernet Protocol ID
100  */
101 struct sk_buff *skb_eth_gso_segment(struct sk_buff *skb,
102 				    netdev_features_t features, __be16 type)
103 {
104 	struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
105 	struct packet_offload *ptype;
106 
107 	rcu_read_lock();
108 	list_for_each_entry_rcu(ptype, &offload_base, list) {
109 		if (ptype->type == type && ptype->callbacks.gso_segment) {
110 			segs = ptype->callbacks.gso_segment(skb, features);
111 			break;
112 		}
113 	}
114 	rcu_read_unlock();
115 
116 	return segs;
117 }
118 EXPORT_SYMBOL(skb_eth_gso_segment);
119 
120 /**
121  *	skb_mac_gso_segment - mac layer segmentation handler.
122  *	@skb: buffer to segment
123  *	@features: features for the output path (see dev->features)
124  */
125 struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
126 				    netdev_features_t features)
127 {
128 	struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
129 	struct packet_offload *ptype;
130 	int vlan_depth = skb->mac_len;
131 	__be16 type = skb_network_protocol(skb, &vlan_depth);
132 
133 	if (unlikely(!type))
134 		return ERR_PTR(-EINVAL);
135 
136 	__skb_pull(skb, vlan_depth);
137 
138 	rcu_read_lock();
139 	list_for_each_entry_rcu(ptype, &offload_base, list) {
140 		if (ptype->type == type && ptype->callbacks.gso_segment) {
141 			segs = ptype->callbacks.gso_segment(skb, features);
142 			break;
143 		}
144 	}
145 	rcu_read_unlock();
146 
147 	__skb_push(skb, skb->data - skb_mac_header(skb));
148 
149 	return segs;
150 }
151 EXPORT_SYMBOL(skb_mac_gso_segment);
152 
153 int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
154 {
155 	struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);
156 	unsigned int offset = skb_gro_offset(skb);
157 	unsigned int headlen = skb_headlen(skb);
158 	unsigned int len = skb_gro_len(skb);
159 	unsigned int delta_truesize;
160 	unsigned int gro_max_size;
161 	unsigned int new_truesize;
162 	struct sk_buff *lp;
163 	int segs;
164 
165 	/* pairs with WRITE_ONCE() in netif_set_gro_max_size() */
166 	gro_max_size = READ_ONCE(p->dev->gro_max_size);
167 
168 	if (unlikely(p->len + len >= gro_max_size || NAPI_GRO_CB(skb)->flush))
169 		return -E2BIG;
170 
171 	if (unlikely(p->len + len >= GRO_LEGACY_MAX_SIZE)) {
172 		if (p->protocol != htons(ETH_P_IPV6) ||
173 		    skb_headroom(p) < sizeof(struct hop_jumbo_hdr) ||
174 		    ipv6_hdr(p)->nexthdr != IPPROTO_TCP ||
175 		    p->encapsulation)
176 			return -E2BIG;
177 	}
178 
179 	segs = NAPI_GRO_CB(skb)->count;
180 	lp = NAPI_GRO_CB(p)->last;
181 	pinfo = skb_shinfo(lp);
182 
183 	if (headlen <= offset) {
184 		skb_frag_t *frag;
185 		skb_frag_t *frag2;
186 		int i = skbinfo->nr_frags;
187 		int nr_frags = pinfo->nr_frags + i;
188 
189 		if (nr_frags > MAX_SKB_FRAGS)
190 			goto merge;
191 
192 		offset -= headlen;
193 		pinfo->nr_frags = nr_frags;
194 		skbinfo->nr_frags = 0;
195 
196 		frag = pinfo->frags + nr_frags;
197 		frag2 = skbinfo->frags + i;
198 		do {
199 			*--frag = *--frag2;
200 		} while (--i);
201 
202 		skb_frag_off_add(frag, offset);
203 		skb_frag_size_sub(frag, offset);
204 
205 		/* all fragments truesize : remove (head size + sk_buff) */
206 		new_truesize = SKB_TRUESIZE(skb_end_offset(skb));
207 		delta_truesize = skb->truesize - new_truesize;
208 
209 		skb->truesize = new_truesize;
210 		skb->len -= skb->data_len;
211 		skb->data_len = 0;
212 
213 		NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE;
214 		goto done;
215 	} else if (skb->head_frag) {
216 		int nr_frags = pinfo->nr_frags;
217 		skb_frag_t *frag = pinfo->frags + nr_frags;
218 		struct page *page = virt_to_head_page(skb->head);
219 		unsigned int first_size = headlen - offset;
220 		unsigned int first_offset;
221 
222 		if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS)
223 			goto merge;
224 
225 		first_offset = skb->data -
226 			       (unsigned char *)page_address(page) +
227 			       offset;
228 
229 		pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags;
230 
231 		__skb_frag_set_page(frag, page);
232 		skb_frag_off_set(frag, first_offset);
233 		skb_frag_size_set(frag, first_size);
234 
235 		memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags);
236 		/* We dont need to clear skbinfo->nr_frags here */
237 
238 		new_truesize = SKB_DATA_ALIGN(sizeof(struct sk_buff));
239 		delta_truesize = skb->truesize - new_truesize;
240 		skb->truesize = new_truesize;
241 		NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
242 		goto done;
243 	}
244 
245 merge:
246 	/* sk owenrship - if any - completely transferred to the aggregated packet */
247 	skb->destructor = NULL;
248 	delta_truesize = skb->truesize;
249 	if (offset > headlen) {
250 		unsigned int eat = offset - headlen;
251 
252 		skb_frag_off_add(&skbinfo->frags[0], eat);
253 		skb_frag_size_sub(&skbinfo->frags[0], eat);
254 		skb->data_len -= eat;
255 		skb->len -= eat;
256 		offset = headlen;
257 	}
258 
259 	__skb_pull(skb, offset);
260 
261 	if (NAPI_GRO_CB(p)->last == p)
262 		skb_shinfo(p)->frag_list = skb;
263 	else
264 		NAPI_GRO_CB(p)->last->next = skb;
265 	NAPI_GRO_CB(p)->last = skb;
266 	__skb_header_release(skb);
267 	lp = p;
268 
269 done:
270 	NAPI_GRO_CB(p)->count += segs;
271 	p->data_len += len;
272 	p->truesize += delta_truesize;
273 	p->len += len;
274 	if (lp != p) {
275 		lp->data_len += len;
276 		lp->truesize += delta_truesize;
277 		lp->len += len;
278 	}
279 	NAPI_GRO_CB(skb)->same_flow = 1;
280 	return 0;
281 }
282 
283 
284 static void napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
285 {
286 	struct packet_offload *ptype;
287 	__be16 type = skb->protocol;
288 	struct list_head *head = &offload_base;
289 	int err = -ENOENT;
290 
291 	BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
292 
293 	if (NAPI_GRO_CB(skb)->count == 1) {
294 		skb_shinfo(skb)->gso_size = 0;
295 		goto out;
296 	}
297 
298 	rcu_read_lock();
299 	list_for_each_entry_rcu(ptype, head, list) {
300 		if (ptype->type != type || !ptype->callbacks.gro_complete)
301 			continue;
302 
303 		err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete,
304 					 ipv6_gro_complete, inet_gro_complete,
305 					 skb, 0);
306 		break;
307 	}
308 	rcu_read_unlock();
309 
310 	if (err) {
311 		WARN_ON(&ptype->list == head);
312 		kfree_skb(skb);
313 		return;
314 	}
315 
316 out:
317 	gro_normal_one(napi, skb, NAPI_GRO_CB(skb)->count);
318 }
319 
320 static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
321 				   bool flush_old)
322 {
323 	struct list_head *head = &napi->gro_hash[index].list;
324 	struct sk_buff *skb, *p;
325 
326 	list_for_each_entry_safe_reverse(skb, p, head, list) {
327 		if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
328 			return;
329 		skb_list_del_init(skb);
330 		napi_gro_complete(napi, skb);
331 		napi->gro_hash[index].count--;
332 	}
333 
334 	if (!napi->gro_hash[index].count)
335 		__clear_bit(index, &napi->gro_bitmask);
336 }
337 
338 /* napi->gro_hash[].list contains packets ordered by age.
339  * youngest packets at the head of it.
340  * Complete skbs in reverse order to reduce latencies.
341  */
342 void napi_gro_flush(struct napi_struct *napi, bool flush_old)
343 {
344 	unsigned long bitmask = napi->gro_bitmask;
345 	unsigned int i, base = ~0U;
346 
347 	while ((i = ffs(bitmask)) != 0) {
348 		bitmask >>= i;
349 		base += i;
350 		__napi_gro_flush_chain(napi, base, flush_old);
351 	}
352 }
353 EXPORT_SYMBOL(napi_gro_flush);
354 
355 static void gro_list_prepare(const struct list_head *head,
356 			     const struct sk_buff *skb)
357 {
358 	unsigned int maclen = skb->dev->hard_header_len;
359 	u32 hash = skb_get_hash_raw(skb);
360 	struct sk_buff *p;
361 
362 	list_for_each_entry(p, head, list) {
363 		unsigned long diffs;
364 
365 		NAPI_GRO_CB(p)->flush = 0;
366 
367 		if (hash != skb_get_hash_raw(p)) {
368 			NAPI_GRO_CB(p)->same_flow = 0;
369 			continue;
370 		}
371 
372 		diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
373 		diffs |= p->vlan_all ^ skb->vlan_all;
374 		diffs |= skb_metadata_differs(p, skb);
375 		if (maclen == ETH_HLEN)
376 			diffs |= compare_ether_header(skb_mac_header(p),
377 						      skb_mac_header(skb));
378 		else if (!diffs)
379 			diffs = memcmp(skb_mac_header(p),
380 				       skb_mac_header(skb),
381 				       maclen);
382 
383 		/* in most common scenarions 'slow_gro' is 0
384 		 * otherwise we are already on some slower paths
385 		 * either skip all the infrequent tests altogether or
386 		 * avoid trying too hard to skip each of them individually
387 		 */
388 		if (!diffs && unlikely(skb->slow_gro | p->slow_gro)) {
389 #if IS_ENABLED(CONFIG_SKB_EXTENSIONS) && IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
390 			struct tc_skb_ext *skb_ext;
391 			struct tc_skb_ext *p_ext;
392 #endif
393 
394 			diffs |= p->sk != skb->sk;
395 			diffs |= skb_metadata_dst_cmp(p, skb);
396 			diffs |= skb_get_nfct(p) ^ skb_get_nfct(skb);
397 
398 #if IS_ENABLED(CONFIG_SKB_EXTENSIONS) && IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
399 			skb_ext = skb_ext_find(skb, TC_SKB_EXT);
400 			p_ext = skb_ext_find(p, TC_SKB_EXT);
401 
402 			diffs |= (!!p_ext) ^ (!!skb_ext);
403 			if (!diffs && unlikely(skb_ext))
404 				diffs |= p_ext->chain ^ skb_ext->chain;
405 #endif
406 		}
407 
408 		NAPI_GRO_CB(p)->same_flow = !diffs;
409 	}
410 }
411 
412 static inline void skb_gro_reset_offset(struct sk_buff *skb, u32 nhoff)
413 {
414 	const struct skb_shared_info *pinfo = skb_shinfo(skb);
415 	const skb_frag_t *frag0 = &pinfo->frags[0];
416 
417 	NAPI_GRO_CB(skb)->data_offset = 0;
418 	NAPI_GRO_CB(skb)->frag0 = NULL;
419 	NAPI_GRO_CB(skb)->frag0_len = 0;
420 
421 	if (!skb_headlen(skb) && pinfo->nr_frags &&
422 	    !PageHighMem(skb_frag_page(frag0)) &&
423 	    (!NET_IP_ALIGN || !((skb_frag_off(frag0) + nhoff) & 3))) {
424 		NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
425 		NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
426 						    skb_frag_size(frag0),
427 						    skb->end - skb->tail);
428 	}
429 }
430 
431 static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
432 {
433 	struct skb_shared_info *pinfo = skb_shinfo(skb);
434 
435 	BUG_ON(skb->end - skb->tail < grow);
436 
437 	memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
438 
439 	skb->data_len -= grow;
440 	skb->tail += grow;
441 
442 	skb_frag_off_add(&pinfo->frags[0], grow);
443 	skb_frag_size_sub(&pinfo->frags[0], grow);
444 
445 	if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
446 		skb_frag_unref(skb, 0);
447 		memmove(pinfo->frags, pinfo->frags + 1,
448 			--pinfo->nr_frags * sizeof(pinfo->frags[0]));
449 	}
450 }
451 
452 static void gro_flush_oldest(struct napi_struct *napi, struct list_head *head)
453 {
454 	struct sk_buff *oldest;
455 
456 	oldest = list_last_entry(head, struct sk_buff, list);
457 
458 	/* We are called with head length >= MAX_GRO_SKBS, so this is
459 	 * impossible.
460 	 */
461 	if (WARN_ON_ONCE(!oldest))
462 		return;
463 
464 	/* Do not adjust napi->gro_hash[].count, caller is adding a new
465 	 * SKB to the chain.
466 	 */
467 	skb_list_del_init(oldest);
468 	napi_gro_complete(napi, oldest);
469 }
470 
471 static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
472 {
473 	u32 bucket = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1);
474 	struct gro_list *gro_list = &napi->gro_hash[bucket];
475 	struct list_head *head = &offload_base;
476 	struct packet_offload *ptype;
477 	__be16 type = skb->protocol;
478 	struct sk_buff *pp = NULL;
479 	enum gro_result ret;
480 	int same_flow;
481 	int grow;
482 
483 	if (netif_elide_gro(skb->dev))
484 		goto normal;
485 
486 	gro_list_prepare(&gro_list->list, skb);
487 
488 	rcu_read_lock();
489 	list_for_each_entry_rcu(ptype, head, list) {
490 		if (ptype->type == type && ptype->callbacks.gro_receive)
491 			goto found_ptype;
492 	}
493 	rcu_read_unlock();
494 	goto normal;
495 
496 found_ptype:
497 	skb_set_network_header(skb, skb_gro_offset(skb));
498 	skb_reset_mac_len(skb);
499 	BUILD_BUG_ON(sizeof_field(struct napi_gro_cb, zeroed) != sizeof(u32));
500 	BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct napi_gro_cb, zeroed),
501 					sizeof(u32))); /* Avoid slow unaligned acc */
502 	*(u32 *)&NAPI_GRO_CB(skb)->zeroed = 0;
503 	NAPI_GRO_CB(skb)->flush = skb_has_frag_list(skb);
504 	NAPI_GRO_CB(skb)->is_atomic = 1;
505 	NAPI_GRO_CB(skb)->count = 1;
506 	if (unlikely(skb_is_gso(skb))) {
507 		NAPI_GRO_CB(skb)->count = skb_shinfo(skb)->gso_segs;
508 		/* Only support TCP and non DODGY users. */
509 		if (!skb_is_gso_tcp(skb) ||
510 		    (skb_shinfo(skb)->gso_type & SKB_GSO_DODGY))
511 			NAPI_GRO_CB(skb)->flush = 1;
512 	}
513 
514 	/* Setup for GRO checksum validation */
515 	switch (skb->ip_summed) {
516 	case CHECKSUM_COMPLETE:
517 		NAPI_GRO_CB(skb)->csum = skb->csum;
518 		NAPI_GRO_CB(skb)->csum_valid = 1;
519 		break;
520 	case CHECKSUM_UNNECESSARY:
521 		NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
522 		break;
523 	}
524 
525 	pp = INDIRECT_CALL_INET(ptype->callbacks.gro_receive,
526 				ipv6_gro_receive, inet_gro_receive,
527 				&gro_list->list, skb);
528 
529 	rcu_read_unlock();
530 
531 	if (PTR_ERR(pp) == -EINPROGRESS) {
532 		ret = GRO_CONSUMED;
533 		goto ok;
534 	}
535 
536 	same_flow = NAPI_GRO_CB(skb)->same_flow;
537 	ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
538 
539 	if (pp) {
540 		skb_list_del_init(pp);
541 		napi_gro_complete(napi, pp);
542 		gro_list->count--;
543 	}
544 
545 	if (same_flow)
546 		goto ok;
547 
548 	if (NAPI_GRO_CB(skb)->flush)
549 		goto normal;
550 
551 	if (unlikely(gro_list->count >= MAX_GRO_SKBS))
552 		gro_flush_oldest(napi, &gro_list->list);
553 	else
554 		gro_list->count++;
555 
556 	NAPI_GRO_CB(skb)->age = jiffies;
557 	NAPI_GRO_CB(skb)->last = skb;
558 	if (!skb_is_gso(skb))
559 		skb_shinfo(skb)->gso_size = skb_gro_len(skb);
560 	list_add(&skb->list, &gro_list->list);
561 	ret = GRO_HELD;
562 
563 pull:
564 	grow = skb_gro_offset(skb) - skb_headlen(skb);
565 	if (grow > 0)
566 		gro_pull_from_frag0(skb, grow);
567 ok:
568 	if (gro_list->count) {
569 		if (!test_bit(bucket, &napi->gro_bitmask))
570 			__set_bit(bucket, &napi->gro_bitmask);
571 	} else if (test_bit(bucket, &napi->gro_bitmask)) {
572 		__clear_bit(bucket, &napi->gro_bitmask);
573 	}
574 
575 	return ret;
576 
577 normal:
578 	ret = GRO_NORMAL;
579 	goto pull;
580 }
581 
582 struct packet_offload *gro_find_receive_by_type(__be16 type)
583 {
584 	struct list_head *offload_head = &offload_base;
585 	struct packet_offload *ptype;
586 
587 	list_for_each_entry_rcu(ptype, offload_head, list) {
588 		if (ptype->type != type || !ptype->callbacks.gro_receive)
589 			continue;
590 		return ptype;
591 	}
592 	return NULL;
593 }
594 EXPORT_SYMBOL(gro_find_receive_by_type);
595 
596 struct packet_offload *gro_find_complete_by_type(__be16 type)
597 {
598 	struct list_head *offload_head = &offload_base;
599 	struct packet_offload *ptype;
600 
601 	list_for_each_entry_rcu(ptype, offload_head, list) {
602 		if (ptype->type != type || !ptype->callbacks.gro_complete)
603 			continue;
604 		return ptype;
605 	}
606 	return NULL;
607 }
608 EXPORT_SYMBOL(gro_find_complete_by_type);
609 
610 static gro_result_t napi_skb_finish(struct napi_struct *napi,
611 				    struct sk_buff *skb,
612 				    gro_result_t ret)
613 {
614 	switch (ret) {
615 	case GRO_NORMAL:
616 		gro_normal_one(napi, skb, 1);
617 		break;
618 
619 	case GRO_MERGED_FREE:
620 		if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
621 			napi_skb_free_stolen_head(skb);
622 		else if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
623 			__kfree_skb(skb);
624 		else
625 			__kfree_skb_defer(skb);
626 		break;
627 
628 	case GRO_HELD:
629 	case GRO_MERGED:
630 	case GRO_CONSUMED:
631 		break;
632 	}
633 
634 	return ret;
635 }
636 
637 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
638 {
639 	gro_result_t ret;
640 
641 	skb_mark_napi_id(skb, napi);
642 	trace_napi_gro_receive_entry(skb);
643 
644 	skb_gro_reset_offset(skb, 0);
645 
646 	ret = napi_skb_finish(napi, skb, dev_gro_receive(napi, skb));
647 	trace_napi_gro_receive_exit(ret);
648 
649 	return ret;
650 }
651 EXPORT_SYMBOL(napi_gro_receive);
652 
653 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
654 {
655 	if (unlikely(skb->pfmemalloc)) {
656 		consume_skb(skb);
657 		return;
658 	}
659 	__skb_pull(skb, skb_headlen(skb));
660 	/* restore the reserve we had after netdev_alloc_skb_ip_align() */
661 	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
662 	__vlan_hwaccel_clear_tag(skb);
663 	skb->dev = napi->dev;
664 	skb->skb_iif = 0;
665 
666 	/* eth_type_trans() assumes pkt_type is PACKET_HOST */
667 	skb->pkt_type = PACKET_HOST;
668 
669 	skb->encapsulation = 0;
670 	skb_shinfo(skb)->gso_type = 0;
671 	skb_shinfo(skb)->gso_size = 0;
672 	if (unlikely(skb->slow_gro)) {
673 		skb_orphan(skb);
674 		skb_ext_reset(skb);
675 		nf_reset_ct(skb);
676 		skb->slow_gro = 0;
677 	}
678 
679 	napi->skb = skb;
680 }
681 
682 struct sk_buff *napi_get_frags(struct napi_struct *napi)
683 {
684 	struct sk_buff *skb = napi->skb;
685 
686 	if (!skb) {
687 		skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
688 		if (skb) {
689 			napi->skb = skb;
690 			skb_mark_napi_id(skb, napi);
691 		}
692 	}
693 	return skb;
694 }
695 EXPORT_SYMBOL(napi_get_frags);
696 
697 static gro_result_t napi_frags_finish(struct napi_struct *napi,
698 				      struct sk_buff *skb,
699 				      gro_result_t ret)
700 {
701 	switch (ret) {
702 	case GRO_NORMAL:
703 	case GRO_HELD:
704 		__skb_push(skb, ETH_HLEN);
705 		skb->protocol = eth_type_trans(skb, skb->dev);
706 		if (ret == GRO_NORMAL)
707 			gro_normal_one(napi, skb, 1);
708 		break;
709 
710 	case GRO_MERGED_FREE:
711 		if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
712 			napi_skb_free_stolen_head(skb);
713 		else
714 			napi_reuse_skb(napi, skb);
715 		break;
716 
717 	case GRO_MERGED:
718 	case GRO_CONSUMED:
719 		break;
720 	}
721 
722 	return ret;
723 }
724 
725 /* Upper GRO stack assumes network header starts at gro_offset=0
726  * Drivers could call both napi_gro_frags() and napi_gro_receive()
727  * We copy ethernet header into skb->data to have a common layout.
728  */
729 static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
730 {
731 	struct sk_buff *skb = napi->skb;
732 	const struct ethhdr *eth;
733 	unsigned int hlen = sizeof(*eth);
734 
735 	napi->skb = NULL;
736 
737 	skb_reset_mac_header(skb);
738 	skb_gro_reset_offset(skb, hlen);
739 
740 	if (unlikely(skb_gro_header_hard(skb, hlen))) {
741 		eth = skb_gro_header_slow(skb, hlen, 0);
742 		if (unlikely(!eth)) {
743 			net_warn_ratelimited("%s: dropping impossible skb from %s\n",
744 					     __func__, napi->dev->name);
745 			napi_reuse_skb(napi, skb);
746 			return NULL;
747 		}
748 	} else {
749 		eth = (const struct ethhdr *)skb->data;
750 		gro_pull_from_frag0(skb, hlen);
751 		NAPI_GRO_CB(skb)->frag0 += hlen;
752 		NAPI_GRO_CB(skb)->frag0_len -= hlen;
753 	}
754 	__skb_pull(skb, hlen);
755 
756 	/*
757 	 * This works because the only protocols we care about don't require
758 	 * special handling.
759 	 * We'll fix it up properly in napi_frags_finish()
760 	 */
761 	skb->protocol = eth->h_proto;
762 
763 	return skb;
764 }
765 
766 gro_result_t napi_gro_frags(struct napi_struct *napi)
767 {
768 	gro_result_t ret;
769 	struct sk_buff *skb = napi_frags_skb(napi);
770 
771 	trace_napi_gro_frags_entry(skb);
772 
773 	ret = napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
774 	trace_napi_gro_frags_exit(ret);
775 
776 	return ret;
777 }
778 EXPORT_SYMBOL(napi_gro_frags);
779 
780 /* Compute the checksum from gro_offset and return the folded value
781  * after adding in any pseudo checksum.
782  */
783 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
784 {
785 	__wsum wsum;
786 	__sum16 sum;
787 
788 	wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
789 
790 	/* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
791 	sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
792 	/* See comments in __skb_checksum_complete(). */
793 	if (likely(!sum)) {
794 		if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
795 		    !skb->csum_complete_sw)
796 			netdev_rx_csum_fault(skb->dev, skb);
797 	}
798 
799 	NAPI_GRO_CB(skb)->csum = wsum;
800 	NAPI_GRO_CB(skb)->csum_valid = 1;
801 
802 	return sum;
803 }
804 EXPORT_SYMBOL(__skb_gro_checksum_complete);
805