ip6_output.c (b4b12b0d2f02613101a7a667ef7b7cc8d388e597) ip6_output.c (956fe2190820df3a6ee530204e059da508159319)
1/*
2 * IPv6 output functions
3 * Linux INET6 implementation
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * Based on linux/net/ipv4/ip_output.c

--- 114 unchanged lines hidden (view full) ---

123 }
124 rcu_read_unlock_bh();
125
126 IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
127 kfree_skb(skb);
128 return -EINVAL;
129}
130
1/*
2 * IPv6 output functions
3 * Linux INET6 implementation
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * Based on linux/net/ipv4/ip_output.c

--- 114 unchanged lines hidden (view full) ---

123 }
124 rcu_read_unlock_bh();
125
126 IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
127 kfree_skb(skb);
128 return -EINVAL;
129}
130
131static int ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
131static int __ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
132{
132{
133 int ret;
134
135 ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
136 if (ret) {
137 kfree_skb(skb);
138 return ret;
139 }
140
141#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
142 /* Policy lookup after SNAT yielded a new policy */
143 if (skb_dst(skb)->xfrm) {
144 IPCB(skb)->flags |= IPSKB_REROUTED;
145 return dst_output(net, sk, skb);
146 }
147#endif
148
149 if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
150 dst_allfrag(skb_dst(skb)) ||
151 (IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size))
152 return ip6_fragment(net, sk, skb, ip6_finish_output2);
153 else
154 return ip6_finish_output2(net, sk, skb);
155}
156
133#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
134 /* Policy lookup after SNAT yielded a new policy */
135 if (skb_dst(skb)->xfrm) {
136 IPCB(skb)->flags |= IPSKB_REROUTED;
137 return dst_output(net, sk, skb);
138 }
139#endif
140
141 if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
142 dst_allfrag(skb_dst(skb)) ||
143 (IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size))
144 return ip6_fragment(net, sk, skb, ip6_finish_output2);
145 else
146 return ip6_finish_output2(net, sk, skb);
147}
148
149static int ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
150{
151 int ret;
152
153 ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
154 switch (ret) {
155 case NET_XMIT_SUCCESS:
156 return __ip6_finish_output(net, sk, skb);
157 case NET_XMIT_CN:
158 return __ip6_finish_output(net, sk, skb) ? : ret;
159 default:
160 kfree_skb(skb);
161 return ret;
162 }
163}
164
157int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
158{
159 struct net_device *dev = skb_dst(skb)->dev;
160 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
161
162 skb->protocol = htons(ETH_P_IPV6);
163 skb->dev = dev;
164

--- 422 unchanged lines hidden (view full) ---

587#ifdef CONFIG_NET_SCHED
588 to->tc_index = from->tc_index;
589#endif
590 nf_copy(to, from);
591 skb_ext_copy(to, from);
592 skb_copy_secmark(to, from);
593}
594
165int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
166{
167 struct net_device *dev = skb_dst(skb)->dev;
168 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
169
170 skb->protocol = htons(ETH_P_IPV6);
171 skb->dev = dev;
172

--- 422 unchanged lines hidden (view full) ---

595#ifdef CONFIG_NET_SCHED
596 to->tc_index = from->tc_index;
597#endif
598 nf_copy(to, from);
599 skb_ext_copy(to, from);
600 skb_copy_secmark(to, from);
601}
602
595int ip6_fraglist_init(struct sk_buff *skb, unsigned int hlen, u8 *prevhdr,
596 u8 nexthdr, __be32 frag_id,
597 struct ip6_fraglist_iter *iter)
598{
599 unsigned int first_len;
600 struct frag_hdr *fh;
601
602 /* BUILD HEADER */
603 *prevhdr = NEXTHDR_FRAGMENT;
604 iter->tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
605 if (!iter->tmp_hdr)
606 return -ENOMEM;
607
608 iter->frag_list = skb_shinfo(skb)->frag_list;
609 iter->frag = iter->frag_list;
610 skb_frag_list_init(skb);
611
612 iter->offset = 0;
613 iter->hlen = hlen;
614 iter->frag_id = frag_id;
615 iter->nexthdr = nexthdr;
616
617 __skb_pull(skb, hlen);
618 fh = __skb_push(skb, sizeof(struct frag_hdr));
619 __skb_push(skb, hlen);
620 skb_reset_network_header(skb);
621 memcpy(skb_network_header(skb), iter->tmp_hdr, hlen);
622
623 fh->nexthdr = nexthdr;
624 fh->reserved = 0;
625 fh->frag_off = htons(IP6_MF);
626 fh->identification = frag_id;
627
628 first_len = skb_pagelen(skb);
629 skb->data_len = first_len - skb_headlen(skb);
630 skb->len = first_len;
631 ipv6_hdr(skb)->payload_len = htons(first_len - sizeof(struct ipv6hdr));
632
633 return 0;
634}
635EXPORT_SYMBOL(ip6_fraglist_init);
636
637void ip6_fraglist_prepare(struct sk_buff *skb,
638 struct ip6_fraglist_iter *iter)
639{
640 struct sk_buff *frag = iter->frag;
641 unsigned int hlen = iter->hlen;
642 struct frag_hdr *fh;
643
644 frag->ip_summed = CHECKSUM_NONE;
645 skb_reset_transport_header(frag);
646 fh = __skb_push(frag, sizeof(struct frag_hdr));
647 __skb_push(frag, hlen);
648 skb_reset_network_header(frag);
649 memcpy(skb_network_header(frag), iter->tmp_hdr, hlen);
650 iter->offset += skb->len - hlen - sizeof(struct frag_hdr);
651 fh->nexthdr = iter->nexthdr;
652 fh->reserved = 0;
653 fh->frag_off = htons(iter->offset);
654 if (frag->next)
655 fh->frag_off |= htons(IP6_MF);
656 fh->identification = iter->frag_id;
657 ipv6_hdr(frag)->payload_len = htons(frag->len - sizeof(struct ipv6hdr));
658 ip6_copy_metadata(frag, skb);
659}
660EXPORT_SYMBOL(ip6_fraglist_prepare);
661
662void ip6_frag_init(struct sk_buff *skb, unsigned int hlen, unsigned int mtu,
663 unsigned short needed_tailroom, int hdr_room, u8 *prevhdr,
664 u8 nexthdr, __be32 frag_id, struct ip6_frag_state *state)
665{
666 state->prevhdr = prevhdr;
667 state->nexthdr = nexthdr;
668 state->frag_id = frag_id;
669
670 state->hlen = hlen;
671 state->mtu = mtu;
672
673 state->left = skb->len - hlen; /* Space per frame */
674 state->ptr = hlen; /* Where to start from */
675
676 state->hroom = hdr_room;
677 state->troom = needed_tailroom;
678
679 state->offset = 0;
680}
681EXPORT_SYMBOL(ip6_frag_init);
682
683struct sk_buff *ip6_frag_next(struct sk_buff *skb, struct ip6_frag_state *state)
684{
685 u8 *prevhdr = state->prevhdr, *fragnexthdr_offset;
686 struct sk_buff *frag;
687 struct frag_hdr *fh;
688 unsigned int len;
689
690 len = state->left;
691 /* IF: it doesn't fit, use 'mtu' - the data space left */
692 if (len > state->mtu)
693 len = state->mtu;
694 /* IF: we are not sending up to and including the packet end
695 then align the next start on an eight byte boundary */
696 if (len < state->left)
697 len &= ~7;
698
699 /* Allocate buffer */
700 frag = alloc_skb(len + state->hlen + sizeof(struct frag_hdr) +
701 state->hroom + state->troom, GFP_ATOMIC);
702 if (!frag)
703 return ERR_PTR(-ENOMEM);
704
705 /*
706 * Set up data on packet
707 */
708
709 ip6_copy_metadata(frag, skb);
710 skb_reserve(frag, state->hroom);
711 skb_put(frag, len + state->hlen + sizeof(struct frag_hdr));
712 skb_reset_network_header(frag);
713 fh = (struct frag_hdr *)(skb_network_header(frag) + state->hlen);
714 frag->transport_header = (frag->network_header + state->hlen +
715 sizeof(struct frag_hdr));
716
717 /*
718 * Charge the memory for the fragment to any owner
719 * it might possess
720 */
721 if (skb->sk)
722 skb_set_owner_w(frag, skb->sk);
723
724 /*
725 * Copy the packet header into the new buffer.
726 */
727 skb_copy_from_linear_data(skb, skb_network_header(frag), state->hlen);
728
729 fragnexthdr_offset = skb_network_header(frag);
730 fragnexthdr_offset += prevhdr - skb_network_header(skb);
731 *fragnexthdr_offset = NEXTHDR_FRAGMENT;
732
733 /*
734 * Build fragment header.
735 */
736 fh->nexthdr = state->nexthdr;
737 fh->reserved = 0;
738 fh->identification = state->frag_id;
739
740 /*
741 * Copy a block of the IP datagram.
742 */
743 BUG_ON(skb_copy_bits(skb, state->ptr, skb_transport_header(frag),
744 len));
745 state->left -= len;
746
747 fh->frag_off = htons(state->offset);
748 if (state->left > 0)
749 fh->frag_off |= htons(IP6_MF);
750 ipv6_hdr(frag)->payload_len = htons(frag->len - sizeof(struct ipv6hdr));
751
752 state->ptr += len;
753 state->offset += len;
754
755 return frag;
756}
757EXPORT_SYMBOL(ip6_frag_next);
758
759int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
760 int (*output)(struct net *, struct sock *, struct sk_buff *))
761{
762 struct sk_buff *frag;
763 struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
764 struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
765 inet6_sk(skb->sk) : NULL;
603int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
604 int (*output)(struct net *, struct sock *, struct sk_buff *))
605{
606 struct sk_buff *frag;
607 struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
608 struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
609 inet6_sk(skb->sk) : NULL;
766 struct ip6_frag_state state;
767 unsigned int mtu, hlen, nexthdr_offset;
768 int hroom, err = 0;
610 struct ipv6hdr *tmp_hdr;
611 struct frag_hdr *fh;
612 unsigned int mtu, hlen, left, len, nexthdr_offset;
613 int hroom, troom;
769 __be32 frag_id;
614 __be32 frag_id;
615 int ptr, offset = 0, err = 0;
770 u8 *prevhdr, nexthdr = 0;
771
772 err = ip6_find_1stfragopt(skb, &prevhdr);
773 if (err < 0)
774 goto fail;
775 hlen = err;
776 nexthdr = *prevhdr;
777 nexthdr_offset = prevhdr - skb_network_header(skb);

--- 30 unchanged lines hidden (view full) ---

808 if (skb->ip_summed == CHECKSUM_PARTIAL &&
809 (err = skb_checksum_help(skb)))
810 goto fail;
811
812 prevhdr = skb_network_header(skb) + nexthdr_offset;
813 hroom = LL_RESERVED_SPACE(rt->dst.dev);
814 if (skb_has_frag_list(skb)) {
815 unsigned int first_len = skb_pagelen(skb);
616 u8 *prevhdr, nexthdr = 0;
617
618 err = ip6_find_1stfragopt(skb, &prevhdr);
619 if (err < 0)
620 goto fail;
621 hlen = err;
622 nexthdr = *prevhdr;
623 nexthdr_offset = prevhdr - skb_network_header(skb);

--- 30 unchanged lines hidden (view full) ---

654 if (skb->ip_summed == CHECKSUM_PARTIAL &&
655 (err = skb_checksum_help(skb)))
656 goto fail;
657
658 prevhdr = skb_network_header(skb) + nexthdr_offset;
659 hroom = LL_RESERVED_SPACE(rt->dst.dev);
660 if (skb_has_frag_list(skb)) {
661 unsigned int first_len = skb_pagelen(skb);
816 struct ip6_fraglist_iter iter;
817 struct sk_buff *frag2;
818
819 if (first_len - hlen > mtu ||
820 ((first_len - hlen) & 7) ||
821 skb_cloned(skb) ||
822 skb_headroom(skb) < (hroom + sizeof(struct frag_hdr)))
823 goto slow_path;
824

--- 11 unchanged lines hidden (view full) ---

836 BUG_ON(frag->sk);
837 if (skb->sk) {
838 frag->sk = skb->sk;
839 frag->destructor = sock_wfree;
840 }
841 skb->truesize -= frag->truesize;
842 }
843
662 struct sk_buff *frag2;
663
664 if (first_len - hlen > mtu ||
665 ((first_len - hlen) & 7) ||
666 skb_cloned(skb) ||
667 skb_headroom(skb) < (hroom + sizeof(struct frag_hdr)))
668 goto slow_path;
669

--- 11 unchanged lines hidden (view full) ---

681 BUG_ON(frag->sk);
682 if (skb->sk) {
683 frag->sk = skb->sk;
684 frag->destructor = sock_wfree;
685 }
686 skb->truesize -= frag->truesize;
687 }
688
844 err = ip6_fraglist_init(skb, hlen, prevhdr, nexthdr, frag_id,
845 &iter);
846 if (err < 0)
689 err = 0;
690 offset = 0;
691 /* BUILD HEADER */
692
693 *prevhdr = NEXTHDR_FRAGMENT;
694 tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
695 if (!tmp_hdr) {
696 err = -ENOMEM;
847 goto fail;
697 goto fail;
698 }
699 frag = skb_shinfo(skb)->frag_list;
700 skb_frag_list_init(skb);
848
701
702 __skb_pull(skb, hlen);
703 fh = __skb_push(skb, sizeof(struct frag_hdr));
704 __skb_push(skb, hlen);
705 skb_reset_network_header(skb);
706 memcpy(skb_network_header(skb), tmp_hdr, hlen);
707
708 fh->nexthdr = nexthdr;
709 fh->reserved = 0;
710 fh->frag_off = htons(IP6_MF);
711 fh->identification = frag_id;
712
713 first_len = skb_pagelen(skb);
714 skb->data_len = first_len - skb_headlen(skb);
715 skb->len = first_len;
716 ipv6_hdr(skb)->payload_len = htons(first_len -
717 sizeof(struct ipv6hdr));
718
849 for (;;) {
850 /* Prepare header of the next frame,
851 * before previous one went down. */
719 for (;;) {
720 /* Prepare header of the next frame,
721 * before previous one went down. */
852 if (iter.frag)
853 ip6_fraglist_prepare(skb, &iter);
722 if (frag) {
723 frag->ip_summed = CHECKSUM_NONE;
724 skb_reset_transport_header(frag);
725 fh = __skb_push(frag, sizeof(struct frag_hdr));
726 __skb_push(frag, hlen);
727 skb_reset_network_header(frag);
728 memcpy(skb_network_header(frag), tmp_hdr,
729 hlen);
730 offset += skb->len - hlen - sizeof(struct frag_hdr);
731 fh->nexthdr = nexthdr;
732 fh->reserved = 0;
733 fh->frag_off = htons(offset);
734 if (frag->next)
735 fh->frag_off |= htons(IP6_MF);
736 fh->identification = frag_id;
737 ipv6_hdr(frag)->payload_len =
738 htons(frag->len -
739 sizeof(struct ipv6hdr));
740 ip6_copy_metadata(frag, skb);
741 }
854
855 err = output(net, sk, skb);
856 if (!err)
857 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
858 IPSTATS_MIB_FRAGCREATES);
859
742
743 err = output(net, sk, skb);
744 if (!err)
745 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
746 IPSTATS_MIB_FRAGCREATES);
747
860 if (err || !iter.frag)
748 if (err || !frag)
861 break;
862
749 break;
750
863 skb = ip6_fraglist_next(&iter);
751 skb = frag;
752 frag = skb->next;
753 skb_mark_not_on_list(skb);
864 }
865
754 }
755
866 kfree(iter.tmp_hdr);
756 kfree(tmp_hdr);
867
868 if (err == 0) {
869 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
870 IPSTATS_MIB_FRAGOKS);
871 return 0;
872 }
873
757
758 if (err == 0) {
759 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
760 IPSTATS_MIB_FRAGOKS);
761 return 0;
762 }
763
874 kfree_skb_list(iter.frag_list);
764 kfree_skb_list(frag);
875
876 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
877 IPSTATS_MIB_FRAGFAILS);
878 return err;
879
880slow_path_clean:
881 skb_walk_frags(skb, frag2) {
882 if (frag2 == frag)
883 break;
884 frag2->sk = NULL;
885 frag2->destructor = NULL;
886 skb->truesize += frag2->truesize;
887 }
888 }
889
890slow_path:
765
766 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
767 IPSTATS_MIB_FRAGFAILS);
768 return err;
769
770slow_path_clean:
771 skb_walk_frags(skb, frag2) {
772 if (frag2 == frag)
773 break;
774 frag2->sk = NULL;
775 frag2->destructor = NULL;
776 skb->truesize += frag2->truesize;
777 }
778 }
779
780slow_path:
781 left = skb->len - hlen; /* Space per frame */
782 ptr = hlen; /* Where to start from */
783
891 /*
892 * Fragment the datagram.
893 */
894
784 /*
785 * Fragment the datagram.
786 */
787
895 ip6_frag_init(skb, hlen, mtu, rt->dst.dev->needed_tailroom,
896 LL_RESERVED_SPACE(rt->dst.dev), prevhdr, nexthdr, frag_id,
897 &state);
788 troom = rt->dst.dev->needed_tailroom;
898
899 /*
900 * Keep copying data until we run out.
901 */
789
790 /*
791 * Keep copying data until we run out.
792 */
793 while (left > 0) {
794 u8 *fragnexthdr_offset;
902
795
903 while (state.left > 0) {
904 frag = ip6_frag_next(skb, &state);
905 if (IS_ERR(frag)) {
906 err = PTR_ERR(frag);
796 len = left;
797 /* IF: it doesn't fit, use 'mtu' - the data space left */
798 if (len > mtu)
799 len = mtu;
800 /* IF: we are not sending up to and including the packet end
801 then align the next start on an eight byte boundary */
802 if (len < left) {
803 len &= ~7;
804 }
805
806 /* Allocate buffer */
807 frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) +
808 hroom + troom, GFP_ATOMIC);
809 if (!frag) {
810 err = -ENOMEM;
907 goto fail;
908 }
909
910 /*
811 goto fail;
812 }
813
814 /*
815 * Set up data on packet
816 */
817
818 ip6_copy_metadata(frag, skb);
819 skb_reserve(frag, hroom);
820 skb_put(frag, len + hlen + sizeof(struct frag_hdr));
821 skb_reset_network_header(frag);
822 fh = (struct frag_hdr *)(skb_network_header(frag) + hlen);
823 frag->transport_header = (frag->network_header + hlen +
824 sizeof(struct frag_hdr));
825
826 /*
827 * Charge the memory for the fragment to any owner
828 * it might possess
829 */
830 if (skb->sk)
831 skb_set_owner_w(frag, skb->sk);
832
833 /*
834 * Copy the packet header into the new buffer.
835 */
836 skb_copy_from_linear_data(skb, skb_network_header(frag), hlen);
837
838 fragnexthdr_offset = skb_network_header(frag);
839 fragnexthdr_offset += prevhdr - skb_network_header(skb);
840 *fragnexthdr_offset = NEXTHDR_FRAGMENT;
841
842 /*
843 * Build fragment header.
844 */
845 fh->nexthdr = nexthdr;
846 fh->reserved = 0;
847 fh->identification = frag_id;
848
849 /*
850 * Copy a block of the IP datagram.
851 */
852 BUG_ON(skb_copy_bits(skb, ptr, skb_transport_header(frag),
853 len));
854 left -= len;
855
856 fh->frag_off = htons(offset);
857 if (left > 0)
858 fh->frag_off |= htons(IP6_MF);
859 ipv6_hdr(frag)->payload_len = htons(frag->len -
860 sizeof(struct ipv6hdr));
861
862 ptr += len;
863 offset += len;
864
865 /*
911 * Put this fragment into the sending queue.
912 */
913 err = output(net, sk, frag);
914 if (err)
915 goto fail;
916
917 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
918 IPSTATS_MIB_FRAGCREATES);

--- 404 unchanged lines hidden (view full) ---

1323 int err;
1324 int offset = 0;
1325 u32 tskey = 0;
1326 struct rt6_info *rt = (struct rt6_info *)cork->dst;
1327 struct ipv6_txoptions *opt = v6_cork->opt;
1328 int csummode = CHECKSUM_NONE;
1329 unsigned int maxnonfragsize, headersize;
1330 unsigned int wmem_alloc_delta = 0;
866 * Put this fragment into the sending queue.
867 */
868 err = output(net, sk, frag);
869 if (err)
870 goto fail;
871
872 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
873 IPSTATS_MIB_FRAGCREATES);

--- 404 unchanged lines hidden (view full) ---

1278 int err;
1279 int offset = 0;
1280 u32 tskey = 0;
1281 struct rt6_info *rt = (struct rt6_info *)cork->dst;
1282 struct ipv6_txoptions *opt = v6_cork->opt;
1283 int csummode = CHECKSUM_NONE;
1284 unsigned int maxnonfragsize, headersize;
1285 unsigned int wmem_alloc_delta = 0;
1331 bool paged, extra_uref = false;
1286 bool paged, extra_uref;
1332
1333 skb = skb_peek_tail(queue);
1334 if (!skb) {
1335 exthdrlen = opt ? opt->opt_flen : 0;
1336 dst_exthdrlen = rt->dst.header_len - rt->rt6i_nfheader_len;
1337 }
1338
1339 paged = !!cork->gso_size;

--- 52 unchanged lines hidden (view full) ---

1392 (!(flags & MSG_MORE) || cork->gso_size) &&
1393 rt->dst.dev->features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM))
1394 csummode = CHECKSUM_PARTIAL;
1395
1396 if (flags & MSG_ZEROCOPY && length && sock_flag(sk, SOCK_ZEROCOPY)) {
1397 uarg = sock_zerocopy_realloc(sk, length, skb_zcopy(skb));
1398 if (!uarg)
1399 return -ENOBUFS;
1287
1288 skb = skb_peek_tail(queue);
1289 if (!skb) {
1290 exthdrlen = opt ? opt->opt_flen : 0;
1291 dst_exthdrlen = rt->dst.header_len - rt->rt6i_nfheader_len;
1292 }
1293
1294 paged = !!cork->gso_size;

--- 52 unchanged lines hidden (view full) ---

1347 (!(flags & MSG_MORE) || cork->gso_size) &&
1348 rt->dst.dev->features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM))
1349 csummode = CHECKSUM_PARTIAL;
1350
1351 if (flags & MSG_ZEROCOPY && length && sock_flag(sk, SOCK_ZEROCOPY)) {
1352 uarg = sock_zerocopy_realloc(sk, length, skb_zcopy(skb));
1353 if (!uarg)
1354 return -ENOBUFS;
1400 extra_uref = !skb; /* only extra ref if !MSG_MORE */
1355 extra_uref = true;
1401 if (rt->dst.dev->features & NETIF_F_SG &&
1402 csummode == CHECKSUM_PARTIAL) {
1403 paged = true;
1404 } else {
1405 uarg->zerocopy = 0;
1406 skb_zcopy_set(skb, uarg, &extra_uref);
1407 }
1408 }

--- 469 unchanged lines hidden ---
1356 if (rt->dst.dev->features & NETIF_F_SG &&
1357 csummode == CHECKSUM_PARTIAL) {
1358 paged = true;
1359 } else {
1360 uarg->zerocopy = 0;
1361 skb_zcopy_set(skb, uarg, &extra_uref);
1362 }
1363 }

--- 469 unchanged lines hidden ---