xref: /openbmc/linux/net/ipv4/tcp_offload.c (revision d2999e1b)
1 /*
2  *	IPV4 GSO/GRO offload support
3  *	Linux INET implementation
4  *
5  *	This program is free software; you can redistribute it and/or
6  *	modify it under the terms of the GNU General Public License
7  *	as published by the Free Software Foundation; either version
8  *	2 of the License, or (at your option) any later version.
9  *
10  *	TCPv4 GSO/GRO support
11  */
12 
13 #include <linux/skbuff.h>
14 #include <net/tcp.h>
15 #include <net/protocol.h>
16 
17 struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
18 				netdev_features_t features)
19 {
20 	struct sk_buff *segs = ERR_PTR(-EINVAL);
21 	unsigned int sum_truesize = 0;
22 	struct tcphdr *th;
23 	unsigned int thlen;
24 	unsigned int seq;
25 	__be32 delta;
26 	unsigned int oldlen;
27 	unsigned int mss;
28 	struct sk_buff *gso_skb = skb;
29 	__sum16 newcheck;
30 	bool ooo_okay, copy_destructor;
31 
32 	if (!pskb_may_pull(skb, sizeof(*th)))
33 		goto out;
34 
35 	th = tcp_hdr(skb);
36 	thlen = th->doff * 4;
37 	if (thlen < sizeof(*th))
38 		goto out;
39 
40 	if (!pskb_may_pull(skb, thlen))
41 		goto out;
42 
43 	oldlen = (u16)~skb->len;
44 	__skb_pull(skb, thlen);
45 
46 	mss = tcp_skb_mss(skb);
47 	if (unlikely(skb->len <= mss))
48 		goto out;
49 
50 	if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
51 		/* Packet is from an untrusted source, reset gso_segs. */
52 		int type = skb_shinfo(skb)->gso_type;
53 
54 		if (unlikely(type &
55 			     ~(SKB_GSO_TCPV4 |
56 			       SKB_GSO_DODGY |
57 			       SKB_GSO_TCP_ECN |
58 			       SKB_GSO_TCPV6 |
59 			       SKB_GSO_GRE |
60 			       SKB_GSO_GRE_CSUM |
61 			       SKB_GSO_IPIP |
62 			       SKB_GSO_SIT |
63 			       SKB_GSO_MPLS |
64 			       SKB_GSO_UDP_TUNNEL |
65 			       SKB_GSO_UDP_TUNNEL_CSUM |
66 			       0) ||
67 			     !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))))
68 			goto out;
69 
70 		skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
71 
72 		segs = NULL;
73 		goto out;
74 	}
75 
76 	copy_destructor = gso_skb->destructor == tcp_wfree;
77 	ooo_okay = gso_skb->ooo_okay;
78 	/* All segments but the first should have ooo_okay cleared */
79 	skb->ooo_okay = 0;
80 
81 	segs = skb_segment(skb, features);
82 	if (IS_ERR(segs))
83 		goto out;
84 
85 	/* Only first segment might have ooo_okay set */
86 	segs->ooo_okay = ooo_okay;
87 
88 	delta = htonl(oldlen + (thlen + mss));
89 
90 	skb = segs;
91 	th = tcp_hdr(skb);
92 	seq = ntohl(th->seq);
93 
94 	newcheck = ~csum_fold((__force __wsum)((__force u32)th->check +
95 					       (__force u32)delta));
96 
97 	do {
98 		th->fin = th->psh = 0;
99 		th->check = newcheck;
100 
101 		if (skb->ip_summed != CHECKSUM_PARTIAL)
102 			th->check = gso_make_checksum(skb, ~th->check);
103 
104 		seq += mss;
105 		if (copy_destructor) {
106 			skb->destructor = gso_skb->destructor;
107 			skb->sk = gso_skb->sk;
108 			sum_truesize += skb->truesize;
109 		}
110 		skb = skb->next;
111 		th = tcp_hdr(skb);
112 
113 		th->seq = htonl(seq);
114 		th->cwr = 0;
115 	} while (skb->next);
116 
117 	/* Following permits TCP Small Queues to work well with GSO :
118 	 * The callback to TCP stack will be called at the time last frag
119 	 * is freed at TX completion, and not right now when gso_skb
120 	 * is freed by GSO engine
121 	 */
122 	if (copy_destructor) {
123 		swap(gso_skb->sk, skb->sk);
124 		swap(gso_skb->destructor, skb->destructor);
125 		sum_truesize += skb->truesize;
126 		atomic_add(sum_truesize - gso_skb->truesize,
127 			   &skb->sk->sk_wmem_alloc);
128 	}
129 
130 	delta = htonl(oldlen + (skb_tail_pointer(skb) -
131 				skb_transport_header(skb)) +
132 		      skb->data_len);
133 	th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
134 				(__force u32)delta));
135 	if (skb->ip_summed != CHECKSUM_PARTIAL)
136 		th->check = gso_make_checksum(skb, ~th->check);
137 out:
138 	return segs;
139 }
140 
141 struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
142 {
143 	struct sk_buff **pp = NULL;
144 	struct sk_buff *p;
145 	struct tcphdr *th;
146 	struct tcphdr *th2;
147 	unsigned int len;
148 	unsigned int thlen;
149 	__be32 flags;
150 	unsigned int mss = 1;
151 	unsigned int hlen;
152 	unsigned int off;
153 	int flush = 1;
154 	int i;
155 
156 	off = skb_gro_offset(skb);
157 	hlen = off + sizeof(*th);
158 	th = skb_gro_header_fast(skb, off);
159 	if (skb_gro_header_hard(skb, hlen)) {
160 		th = skb_gro_header_slow(skb, hlen, off);
161 		if (unlikely(!th))
162 			goto out;
163 	}
164 
165 	thlen = th->doff * 4;
166 	if (thlen < sizeof(*th))
167 		goto out;
168 
169 	hlen = off + thlen;
170 	if (skb_gro_header_hard(skb, hlen)) {
171 		th = skb_gro_header_slow(skb, hlen, off);
172 		if (unlikely(!th))
173 			goto out;
174 	}
175 
176 	skb_gro_pull(skb, thlen);
177 
178 	len = skb_gro_len(skb);
179 	flags = tcp_flag_word(th);
180 
181 	for (; (p = *head); head = &p->next) {
182 		if (!NAPI_GRO_CB(p)->same_flow)
183 			continue;
184 
185 		th2 = tcp_hdr(p);
186 
187 		if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
188 			NAPI_GRO_CB(p)->same_flow = 0;
189 			continue;
190 		}
191 
192 		goto found;
193 	}
194 
195 	goto out_check_final;
196 
197 found:
198 	/* Include the IP ID check below from the inner most IP hdr */
199 	flush = NAPI_GRO_CB(p)->flush | NAPI_GRO_CB(p)->flush_id;
200 	flush |= (__force int)(flags & TCP_FLAG_CWR);
201 	flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
202 		  ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
203 	flush |= (__force int)(th->ack_seq ^ th2->ack_seq);
204 	for (i = sizeof(*th); i < thlen; i += 4)
205 		flush |= *(u32 *)((u8 *)th + i) ^
206 			 *(u32 *)((u8 *)th2 + i);
207 
208 	mss = tcp_skb_mss(p);
209 
210 	flush |= (len - 1) >= mss;
211 	flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
212 
213 	if (flush || skb_gro_receive(head, skb)) {
214 		mss = 1;
215 		goto out_check_final;
216 	}
217 
218 	p = *head;
219 	th2 = tcp_hdr(p);
220 	tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
221 
222 out_check_final:
223 	flush = len < mss;
224 	flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH |
225 					TCP_FLAG_RST | TCP_FLAG_SYN |
226 					TCP_FLAG_FIN));
227 
228 	if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
229 		pp = head;
230 
231 out:
232 	NAPI_GRO_CB(skb)->flush |= (flush != 0);
233 
234 	return pp;
235 }
236 
237 int tcp_gro_complete(struct sk_buff *skb)
238 {
239 	struct tcphdr *th = tcp_hdr(skb);
240 
241 	skb->csum_start = (unsigned char *)th - skb->head;
242 	skb->csum_offset = offsetof(struct tcphdr, check);
243 	skb->ip_summed = CHECKSUM_PARTIAL;
244 
245 	skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
246 
247 	if (th->cwr)
248 		skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
249 
250 	return 0;
251 }
252 EXPORT_SYMBOL(tcp_gro_complete);
253 
254 static int tcp_v4_gso_send_check(struct sk_buff *skb)
255 {
256 	const struct iphdr *iph;
257 	struct tcphdr *th;
258 
259 	if (!pskb_may_pull(skb, sizeof(*th)))
260 		return -EINVAL;
261 
262 	iph = ip_hdr(skb);
263 	th = tcp_hdr(skb);
264 
265 	th->check = 0;
266 	skb->ip_summed = CHECKSUM_PARTIAL;
267 	__tcp_v4_send_check(skb, iph->saddr, iph->daddr);
268 	return 0;
269 }
270 
271 static struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
272 {
273 	/* Use the IP hdr immediately proceeding for this transport */
274 	const struct iphdr *iph = skb_gro_network_header(skb);
275 	__wsum wsum;
276 
277 	/* Don't bother verifying checksum if we're going to flush anyway. */
278 	if (NAPI_GRO_CB(skb)->flush)
279 		goto skip_csum;
280 
281 	wsum = NAPI_GRO_CB(skb)->csum;
282 
283 	switch (skb->ip_summed) {
284 	case CHECKSUM_NONE:
285 		wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb),
286 				    0);
287 
288 		/* fall through */
289 
290 	case CHECKSUM_COMPLETE:
291 		if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,
292 				  wsum)) {
293 			skb->ip_summed = CHECKSUM_UNNECESSARY;
294 			break;
295 		}
296 
297 		NAPI_GRO_CB(skb)->flush = 1;
298 		return NULL;
299 	}
300 
301 skip_csum:
302 	return tcp_gro_receive(head, skb);
303 }
304 
305 static int tcp4_gro_complete(struct sk_buff *skb, int thoff)
306 {
307 	const struct iphdr *iph = ip_hdr(skb);
308 	struct tcphdr *th = tcp_hdr(skb);
309 
310 	th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
311 				  iph->daddr, 0);
312 	skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
313 
314 	return tcp_gro_complete(skb);
315 }
316 
317 static const struct net_offload tcpv4_offload = {
318 	.callbacks = {
319 		.gso_send_check	=	tcp_v4_gso_send_check,
320 		.gso_segment	=	tcp_gso_segment,
321 		.gro_receive	=	tcp4_gro_receive,
322 		.gro_complete	=	tcp4_gro_complete,
323 	},
324 };
325 
326 int __init tcpv4_offload_init(void)
327 {
328 	return inet_add_offload(&tcpv4_offload, IPPROTO_TCP);
329 }
330