Lines Matching +full:foo +full:- +full:queue
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
16 /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
32 /* This indicates where we are processing relative to skb->data. */
35 /* This is non-zero if the packet cannot be merged with the new skb. */
44 /* Used in ipv6_gro_receive() and foo-over-udp */
56 /* This is non-zero if the packet may be of the same flow. */
71 /* Used in foo-over-udp, set in udp[46]_gro_receive */
100 #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
105 return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT; in gro_recursion_inc_test()
114 NAPI_GRO_CB(skb)->flush |= 1;
129 NAPI_GRO_CB(skb)->flush |= 1;
138 return NAPI_GRO_CB(skb)->data_offset;
143 return skb->len - NAPI_GRO_CB(skb)->data_offset; in skb_gro_len()
148 NAPI_GRO_CB(skb)->data_offset += len; in skb_gro_pull()
154 return NAPI_GRO_CB(skb)->frag0 + offset; in skb_gro_header_fast()
159 return NAPI_GRO_CB(skb)->frag0_len < hlen; in skb_gro_header_hard()
164 NAPI_GRO_CB(skb)->frag0 = NULL; in skb_gro_frag0_invalidate()
165 NAPI_GRO_CB(skb)->frag0_len = 0; in skb_gro_frag0_invalidate()
175 return skb->data + offset; in skb_gro_header_slow()
191 return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) + in skb_gro_network_header()
199 return csum_tcpudp_nofold(iph->saddr, iph->daddr, in inet_gro_compute_pseudo()
206 if (NAPI_GRO_CB(skb)->csum_valid) in skb_gro_postpull_rcsum()
207 NAPI_GRO_CB(skb)->csum = wsum_negate(csum_partial(start, len, in skb_gro_postpull_rcsum()
208 wsum_negate(NAPI_GRO_CB(skb)->csum))); in skb_gro_postpull_rcsum()
220 return (NAPI_GRO_CB(skb)->gro_remcsum_start == skb_gro_offset(skb)); in skb_at_gro_remcsum_start()
227 return ((skb->ip_summed != CHECKSUM_PARTIAL || in __skb_gro_checksum_validate_needed()
231 NAPI_GRO_CB(skb)->csum_cnt == 0 && in __skb_gro_checksum_validate_needed()
238 if (NAPI_GRO_CB(skb)->csum_valid && in __skb_gro_checksum_validate_complete()
239 !csum_fold(csum_add(psum, NAPI_GRO_CB(skb)->csum))) in __skb_gro_checksum_validate_complete()
242 NAPI_GRO_CB(skb)->csum = psum; in __skb_gro_checksum_validate_complete()
249 if (NAPI_GRO_CB(skb)->csum_cnt > 0) { in skb_gro_incr_csum_unnecessary()
251 NAPI_GRO_CB(skb)->csum_cnt--; in skb_gro_incr_csum_unnecessary()
285 return (NAPI_GRO_CB(skb)->csum_cnt == 0 && in __skb_gro_checksum_convert_check()
286 !NAPI_GRO_CB(skb)->csum_valid); in __skb_gro_checksum_convert_check()
292 NAPI_GRO_CB(skb)->csum = ~pseudo; in __skb_gro_checksum_convert()
293 NAPI_GRO_CB(skb)->csum_valid = 1; in __skb_gro_checksum_convert()
310 grc->offset = 0; in skb_gro_remcsum_init()
311 grc->delta = 0; in skb_gro_remcsum_init()
323 BUG_ON(!NAPI_GRO_CB(skb)->csum_valid); in skb_gro_remcsum_process()
326 NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start; in skb_gro_remcsum_process()
334 delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum, in skb_gro_remcsum_process()
337 /* Adjust skb->csum since we changed the packet */ in skb_gro_remcsum_process()
338 NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta); in skb_gro_remcsum_process()
340 grc->offset = off + hdrlen + offset; in skb_gro_remcsum_process()
341 grc->delta = delta; in skb_gro_remcsum_process()
350 size_t plen = grc->offset + sizeof(u16); in skb_gro_remcsum_cleanup()
352 if (!grc->delta) in skb_gro_remcsum_cleanup()
355 ptr = skb_gro_header(skb, plen, grc->offset); in skb_gro_remcsum_cleanup()
359 remcsum_unadjust((__sum16 *)ptr, grc->delta); in skb_gro_remcsum_cleanup()
365 if (PTR_ERR(pp) != -EINPROGRESS) in skb_gro_flush_final()
366 NAPI_GRO_CB(skb)->flush |= flush; in skb_gro_flush_final()
373 if (PTR_ERR(pp) != -EINPROGRESS) { in skb_gro_flush_final_remcsum()
374 NAPI_GRO_CB(skb)->flush |= flush; in skb_gro_flush_final_remcsum()
376 skb->remcsum_offload = 0; in skb_gro_flush_final_remcsum()
382 NAPI_GRO_CB(skb)->flush |= flush; in skb_gro_flush_final()
389 NAPI_GRO_CB(skb)->flush |= flush; in skb_gro_flush_final_remcsum()
391 skb->remcsum_offload = 0; in skb_gro_flush_final_remcsum()
413 NAPI_GRO_CB(skb)->flush |= 1, NULL : \
437 return ~csum_unfold(csum_ipv6_magic(&iph->saddr, &iph->daddr, in ip6_gro_compute_pseudo()
446 if (!napi->rx_count) in gro_normal_list()
448 netif_receive_skb_list_internal(&napi->rx_list); in gro_normal_list()
449 INIT_LIST_HEAD(&napi->rx_list); in gro_normal_list()
450 napi->rx_count = 0; in gro_normal_list()
453 /* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded,
458 list_add_tail(&skb->list, &napi->rx_list); in gro_normal_one()
459 napi->rx_count += segs; in gro_normal_one()
460 if (napi->rx_count >= READ_ONCE(gro_normal_batch)) in gro_normal_one()
467 * The caller must verify skb_valid_dst(skb) is false and skb->dev is initialized.
472 *iif = inet_iif(skb) ?: skb->dev->ifindex; in inet_get_iif_sdif()
476 if (netif_is_l3_slave(skb->dev)) { in inet_get_iif_sdif()
477 struct net_device *master = netdev_master_upper_dev_get_rcu(skb->dev); in inet_get_iif_sdif()
480 *iif = master ? master->ifindex : 0; in inet_get_iif_sdif()
488 * The caller must verify skb_valid_dst(skb) is false and skb->dev is initialized.
493 /* using skb->dev->ifindex because skb_dst(skb) is not initialized */ in inet6_get_iif_sdif()
494 *iif = skb->dev->ifindex; in inet6_get_iif_sdif()
498 if (netif_is_l3_slave(skb->dev)) { in inet6_get_iif_sdif()
499 struct net_device *master = netdev_master_upper_dev_get_rcu(skb->dev); in inet6_get_iif_sdif()
502 *iif = master ? master->ifindex : 0; in inet6_get_iif_sdif()