xref: /openbmc/linux/net/ipv4/tcp_offload.c (revision c000c4f1)
1 /*
2  *	IPV4 GSO/GRO offload support
3  *	Linux INET implementation
4  *
5  *	This program is free software; you can redistribute it and/or
6  *	modify it under the terms of the GNU General Public License
7  *	as published by the Free Software Foundation; either version
8  *	2 of the License, or (at your option) any later version.
9  *
10  *	TCPv4 GSO/GRO support
11  */
12 
13 #include <linux/skbuff.h>
14 #include <net/tcp.h>
15 #include <net/protocol.h>
16 
17 static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq,
18 			   unsigned int seq, unsigned int mss)
19 {
20 	while (skb) {
21 		if (before(ts_seq, seq + mss)) {
22 			skb_shinfo(skb)->tx_flags |= SKBTX_SW_TSTAMP;
23 			skb_shinfo(skb)->tskey = ts_seq;
24 			return;
25 		}
26 
27 		skb = skb->next;
28 		seq += mss;
29 	}
30 }
31 
32 static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
33 					netdev_features_t features)
34 {
35 	if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4))
36 		return ERR_PTR(-EINVAL);
37 
38 	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
39 		return ERR_PTR(-EINVAL);
40 
41 	if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
42 		const struct iphdr *iph = ip_hdr(skb);
43 		struct tcphdr *th = tcp_hdr(skb);
44 
45 		/* Set up checksum pseudo header, usually expect stack to
46 		 * have done this already.
47 		 */
48 
49 		th->check = 0;
50 		skb->ip_summed = CHECKSUM_PARTIAL;
51 		__tcp_v4_send_check(skb, iph->saddr, iph->daddr);
52 	}
53 
54 	return tcp_gso_segment(skb, features);
55 }
56 
57 struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
58 				netdev_features_t features)
59 {
60 	struct sk_buff *segs = ERR_PTR(-EINVAL);
61 	unsigned int sum_truesize = 0;
62 	struct tcphdr *th;
63 	unsigned int thlen;
64 	unsigned int seq;
65 	__be32 delta;
66 	unsigned int oldlen;
67 	unsigned int mss;
68 	struct sk_buff *gso_skb = skb;
69 	__sum16 newcheck;
70 	bool ooo_okay, copy_destructor;
71 
72 	th = tcp_hdr(skb);
73 	thlen = th->doff * 4;
74 	if (thlen < sizeof(*th))
75 		goto out;
76 
77 	if (!pskb_may_pull(skb, thlen))
78 		goto out;
79 
80 	oldlen = (u16)~skb->len;
81 	__skb_pull(skb, thlen);
82 
83 	mss = skb_shinfo(skb)->gso_size;
84 	if (unlikely(skb->len <= mss))
85 		goto out;
86 
87 	if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
88 		/* Packet is from an untrusted source, reset gso_segs. */
89 
90 		skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
91 
92 		segs = NULL;
93 		goto out;
94 	}
95 
96 	copy_destructor = gso_skb->destructor == tcp_wfree;
97 	ooo_okay = gso_skb->ooo_okay;
98 	/* All segments but the first should have ooo_okay cleared */
99 	skb->ooo_okay = 0;
100 
101 	segs = skb_segment(skb, features);
102 	if (IS_ERR(segs))
103 		goto out;
104 
105 	/* Only first segment might have ooo_okay set */
106 	segs->ooo_okay = ooo_okay;
107 
108 	/* GSO partial and frag_list segmentation only requires splitting
109 	 * the frame into an MSS multiple and possibly a remainder, both
110 	 * cases return a GSO skb. So update the mss now.
111 	 */
112 	if (skb_is_gso(segs))
113 		mss *= skb_shinfo(segs)->gso_segs;
114 
115 	delta = htonl(oldlen + (thlen + mss));
116 
117 	skb = segs;
118 	th = tcp_hdr(skb);
119 	seq = ntohl(th->seq);
120 
121 	if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_SW_TSTAMP))
122 		tcp_gso_tstamp(segs, skb_shinfo(gso_skb)->tskey, seq, mss);
123 
124 	newcheck = ~csum_fold((__force __wsum)((__force u32)th->check +
125 					       (__force u32)delta));
126 
127 	while (skb->next) {
128 		th->fin = th->psh = 0;
129 		th->check = newcheck;
130 
131 		if (skb->ip_summed == CHECKSUM_PARTIAL)
132 			gso_reset_checksum(skb, ~th->check);
133 		else
134 			th->check = gso_make_checksum(skb, ~th->check);
135 
136 		seq += mss;
137 		if (copy_destructor) {
138 			skb->destructor = gso_skb->destructor;
139 			skb->sk = gso_skb->sk;
140 			sum_truesize += skb->truesize;
141 		}
142 		skb = skb->next;
143 		th = tcp_hdr(skb);
144 
145 		th->seq = htonl(seq);
146 		th->cwr = 0;
147 	}
148 
149 	/* Following permits TCP Small Queues to work well with GSO :
150 	 * The callback to TCP stack will be called at the time last frag
151 	 * is freed at TX completion, and not right now when gso_skb
152 	 * is freed by GSO engine
153 	 */
154 	if (copy_destructor) {
155 		int delta;
156 
157 		swap(gso_skb->sk, skb->sk);
158 		swap(gso_skb->destructor, skb->destructor);
159 		sum_truesize += skb->truesize;
160 		delta = sum_truesize - gso_skb->truesize;
161 		/* In some pathological cases, delta can be negative.
162 		 * We need to either use refcount_add() or refcount_sub_and_test()
163 		 */
164 		if (likely(delta >= 0))
165 			refcount_add(delta, &skb->sk->sk_wmem_alloc);
166 		else
167 			WARN_ON_ONCE(refcount_sub_and_test(-delta, &skb->sk->sk_wmem_alloc));
168 	}
169 
170 	delta = htonl(oldlen + (skb_tail_pointer(skb) -
171 				skb_transport_header(skb)) +
172 		      skb->data_len);
173 	th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
174 				(__force u32)delta));
175 	if (skb->ip_summed == CHECKSUM_PARTIAL)
176 		gso_reset_checksum(skb, ~th->check);
177 	else
178 		th->check = gso_make_checksum(skb, ~th->check);
179 out:
180 	return segs;
181 }
182 
183 struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
184 {
185 	struct sk_buff **pp = NULL;
186 	struct sk_buff *p;
187 	struct tcphdr *th;
188 	struct tcphdr *th2;
189 	unsigned int len;
190 	unsigned int thlen;
191 	__be32 flags;
192 	unsigned int mss = 1;
193 	unsigned int hlen;
194 	unsigned int off;
195 	int flush = 1;
196 	int i;
197 
198 	off = skb_gro_offset(skb);
199 	hlen = off + sizeof(*th);
200 	th = skb_gro_header_fast(skb, off);
201 	if (skb_gro_header_hard(skb, hlen)) {
202 		th = skb_gro_header_slow(skb, hlen, off);
203 		if (unlikely(!th))
204 			goto out;
205 	}
206 
207 	thlen = th->doff * 4;
208 	if (thlen < sizeof(*th))
209 		goto out;
210 
211 	hlen = off + thlen;
212 	if (skb_gro_header_hard(skb, hlen)) {
213 		th = skb_gro_header_slow(skb, hlen, off);
214 		if (unlikely(!th))
215 			goto out;
216 	}
217 
218 	skb_gro_pull(skb, thlen);
219 
220 	len = skb_gro_len(skb);
221 	flags = tcp_flag_word(th);
222 
223 	for (; (p = *head); head = &p->next) {
224 		if (!NAPI_GRO_CB(p)->same_flow)
225 			continue;
226 
227 		th2 = tcp_hdr(p);
228 
229 		if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
230 			NAPI_GRO_CB(p)->same_flow = 0;
231 			continue;
232 		}
233 
234 		goto found;
235 	}
236 
237 	goto out_check_final;
238 
239 found:
240 	/* Include the IP ID check below from the inner most IP hdr */
241 	flush = NAPI_GRO_CB(p)->flush;
242 	flush |= (__force int)(flags & TCP_FLAG_CWR);
243 	flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
244 		  ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
245 	flush |= (__force int)(th->ack_seq ^ th2->ack_seq);
246 	for (i = sizeof(*th); i < thlen; i += 4)
247 		flush |= *(u32 *)((u8 *)th + i) ^
248 			 *(u32 *)((u8 *)th2 + i);
249 
250 	/* When we receive our second frame we can made a decision on if we
251 	 * continue this flow as an atomic flow with a fixed ID or if we use
252 	 * an incrementing ID.
253 	 */
254 	if (NAPI_GRO_CB(p)->flush_id != 1 ||
255 	    NAPI_GRO_CB(p)->count != 1 ||
256 	    !NAPI_GRO_CB(p)->is_atomic)
257 		flush |= NAPI_GRO_CB(p)->flush_id;
258 	else
259 		NAPI_GRO_CB(p)->is_atomic = false;
260 
261 	mss = skb_shinfo(p)->gso_size;
262 
263 	flush |= (len - 1) >= mss;
264 	flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
265 
266 	if (flush || skb_gro_receive(head, skb)) {
267 		mss = 1;
268 		goto out_check_final;
269 	}
270 
271 	tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
272 
273 out_check_final:
274 	flush = len < mss;
275 	flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH |
276 					TCP_FLAG_RST | TCP_FLAG_SYN |
277 					TCP_FLAG_FIN));
278 
279 	if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
280 		pp = head;
281 
282 out:
283 	NAPI_GRO_CB(skb)->flush |= (flush != 0);
284 
285 	return pp;
286 }
287 
288 int tcp_gro_complete(struct sk_buff *skb)
289 {
290 	struct tcphdr *th = tcp_hdr(skb);
291 
292 	skb->csum_start = (unsigned char *)th - skb->head;
293 	skb->csum_offset = offsetof(struct tcphdr, check);
294 	skb->ip_summed = CHECKSUM_PARTIAL;
295 
296 	skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
297 
298 	if (th->cwr)
299 		skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
300 
301 	return 0;
302 }
303 EXPORT_SYMBOL(tcp_gro_complete);
304 
305 static struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
306 {
307 	/* Don't bother verifying checksum if we're going to flush anyway. */
308 	if (!NAPI_GRO_CB(skb)->flush &&
309 	    skb_gro_checksum_validate(skb, IPPROTO_TCP,
310 				      inet_gro_compute_pseudo)) {
311 		NAPI_GRO_CB(skb)->flush = 1;
312 		return NULL;
313 	}
314 
315 	return tcp_gro_receive(head, skb);
316 }
317 
318 static int tcp4_gro_complete(struct sk_buff *skb, int thoff)
319 {
320 	const struct iphdr *iph = ip_hdr(skb);
321 	struct tcphdr *th = tcp_hdr(skb);
322 
323 	th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
324 				  iph->daddr, 0);
325 	skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
326 
327 	if (NAPI_GRO_CB(skb)->is_atomic)
328 		skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID;
329 
330 	return tcp_gro_complete(skb);
331 }
332 
333 static const struct net_offload tcpv4_offload = {
334 	.callbacks = {
335 		.gso_segment	=	tcp4_gso_segment,
336 		.gro_receive	=	tcp4_gro_receive,
337 		.gro_complete	=	tcp4_gro_complete,
338 	},
339 };
340 
341 int __init tcpv4_offload_init(void)
342 {
343 	return inet_add_offload(&tcpv4_offload, IPPROTO_TCP);
344 }
345