xref: /openbmc/linux/net/ipv4/tcp_offload.c (revision a90bb65a)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	IPV4 GSO/GRO offload support
4  *	Linux INET implementation
5  *
6  *	TCPv4 GSO/GRO support
7  */
8 
9 #include <linux/indirect_call_wrapper.h>
10 #include <linux/skbuff.h>
11 #include <net/gro.h>
12 #include <net/tcp.h>
13 #include <net/protocol.h>
14 
15 static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq,
16 			   unsigned int seq, unsigned int mss)
17 {
18 	while (skb) {
19 		if (before(ts_seq, seq + mss)) {
20 			skb_shinfo(skb)->tx_flags |= SKBTX_SW_TSTAMP;
21 			skb_shinfo(skb)->tskey = ts_seq;
22 			return;
23 		}
24 
25 		skb = skb->next;
26 		seq += mss;
27 	}
28 }
29 
30 static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
31 					netdev_features_t features)
32 {
33 	if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4))
34 		return ERR_PTR(-EINVAL);
35 
36 	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
37 		return ERR_PTR(-EINVAL);
38 
39 	if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
40 		const struct iphdr *iph = ip_hdr(skb);
41 		struct tcphdr *th = tcp_hdr(skb);
42 
43 		/* Set up checksum pseudo header, usually expect stack to
44 		 * have done this already.
45 		 */
46 
47 		th->check = 0;
48 		skb->ip_summed = CHECKSUM_PARTIAL;
49 		__tcp_v4_send_check(skb, iph->saddr, iph->daddr);
50 	}
51 
52 	return tcp_gso_segment(skb, features);
53 }
54 
55 struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
56 				netdev_features_t features)
57 {
58 	struct sk_buff *segs = ERR_PTR(-EINVAL);
59 	unsigned int sum_truesize = 0;
60 	struct tcphdr *th;
61 	unsigned int thlen;
62 	unsigned int seq;
63 	__be32 delta;
64 	unsigned int oldlen;
65 	unsigned int mss;
66 	struct sk_buff *gso_skb = skb;
67 	__sum16 newcheck;
68 	bool ooo_okay, copy_destructor;
69 
70 	th = tcp_hdr(skb);
71 	thlen = th->doff * 4;
72 	if (thlen < sizeof(*th))
73 		goto out;
74 
75 	if (!pskb_may_pull(skb, thlen))
76 		goto out;
77 
78 	oldlen = (u16)~skb->len;
79 	__skb_pull(skb, thlen);
80 
81 	mss = skb_shinfo(skb)->gso_size;
82 	if (unlikely(skb->len <= mss))
83 		goto out;
84 
85 	if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
86 		/* Packet is from an untrusted source, reset gso_segs. */
87 
88 		skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
89 
90 		segs = NULL;
91 		goto out;
92 	}
93 
94 	copy_destructor = gso_skb->destructor == tcp_wfree;
95 	ooo_okay = gso_skb->ooo_okay;
96 	/* All segments but the first should have ooo_okay cleared */
97 	skb->ooo_okay = 0;
98 
99 	segs = skb_segment(skb, features);
100 	if (IS_ERR(segs))
101 		goto out;
102 
103 	/* Only first segment might have ooo_okay set */
104 	segs->ooo_okay = ooo_okay;
105 
106 	/* GSO partial and frag_list segmentation only requires splitting
107 	 * the frame into an MSS multiple and possibly a remainder, both
108 	 * cases return a GSO skb. So update the mss now.
109 	 */
110 	if (skb_is_gso(segs))
111 		mss *= skb_shinfo(segs)->gso_segs;
112 
113 	delta = htonl(oldlen + (thlen + mss));
114 
115 	skb = segs;
116 	th = tcp_hdr(skb);
117 	seq = ntohl(th->seq);
118 
119 	if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_SW_TSTAMP))
120 		tcp_gso_tstamp(segs, skb_shinfo(gso_skb)->tskey, seq, mss);
121 
122 	newcheck = ~csum_fold((__force __wsum)((__force u32)th->check +
123 					       (__force u32)delta));
124 
125 	while (skb->next) {
126 		th->fin = th->psh = 0;
127 		th->check = newcheck;
128 
129 		if (skb->ip_summed == CHECKSUM_PARTIAL)
130 			gso_reset_checksum(skb, ~th->check);
131 		else
132 			th->check = gso_make_checksum(skb, ~th->check);
133 
134 		seq += mss;
135 		if (copy_destructor) {
136 			skb->destructor = gso_skb->destructor;
137 			skb->sk = gso_skb->sk;
138 			sum_truesize += skb->truesize;
139 		}
140 		skb = skb->next;
141 		th = tcp_hdr(skb);
142 
143 		th->seq = htonl(seq);
144 		th->cwr = 0;
145 	}
146 
147 	/* Following permits TCP Small Queues to work well with GSO :
148 	 * The callback to TCP stack will be called at the time last frag
149 	 * is freed at TX completion, and not right now when gso_skb
150 	 * is freed by GSO engine
151 	 */
152 	if (copy_destructor) {
153 		int delta;
154 
155 		swap(gso_skb->sk, skb->sk);
156 		swap(gso_skb->destructor, skb->destructor);
157 		sum_truesize += skb->truesize;
158 		delta = sum_truesize - gso_skb->truesize;
159 		/* In some pathological cases, delta can be negative.
160 		 * We need to either use refcount_add() or refcount_sub_and_test()
161 		 */
162 		if (likely(delta >= 0))
163 			refcount_add(delta, &skb->sk->sk_wmem_alloc);
164 		else
165 			WARN_ON_ONCE(refcount_sub_and_test(-delta, &skb->sk->sk_wmem_alloc));
166 	}
167 
168 	delta = htonl(oldlen + (skb_tail_pointer(skb) -
169 				skb_transport_header(skb)) +
170 		      skb->data_len);
171 	th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
172 				(__force u32)delta));
173 	if (skb->ip_summed == CHECKSUM_PARTIAL)
174 		gso_reset_checksum(skb, ~th->check);
175 	else
176 		th->check = gso_make_checksum(skb, ~th->check);
177 out:
178 	return segs;
179 }
180 
181 struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb)
182 {
183 	struct sk_buff *pp = NULL;
184 	struct sk_buff *p;
185 	struct tcphdr *th;
186 	struct tcphdr *th2;
187 	unsigned int len;
188 	unsigned int thlen;
189 	__be32 flags;
190 	unsigned int mss = 1;
191 	unsigned int hlen;
192 	unsigned int off;
193 	int flush = 1;
194 	int i;
195 
196 	off = skb_gro_offset(skb);
197 	hlen = off + sizeof(*th);
198 	th = skb_gro_header_fast(skb, off);
199 	if (skb_gro_header_hard(skb, hlen)) {
200 		th = skb_gro_header_slow(skb, hlen, off);
201 		if (unlikely(!th))
202 			goto out;
203 	}
204 
205 	thlen = th->doff * 4;
206 	if (thlen < sizeof(*th))
207 		goto out;
208 
209 	hlen = off + thlen;
210 	if (skb_gro_header_hard(skb, hlen)) {
211 		th = skb_gro_header_slow(skb, hlen, off);
212 		if (unlikely(!th))
213 			goto out;
214 	}
215 
216 	skb_gro_pull(skb, thlen);
217 
218 	len = skb_gro_len(skb);
219 	flags = tcp_flag_word(th);
220 
221 	list_for_each_entry(p, head, list) {
222 		if (!NAPI_GRO_CB(p)->same_flow)
223 			continue;
224 
225 		th2 = tcp_hdr(p);
226 
227 		if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
228 			NAPI_GRO_CB(p)->same_flow = 0;
229 			continue;
230 		}
231 
232 		goto found;
233 	}
234 	p = NULL;
235 	goto out_check_final;
236 
237 found:
238 	/* Include the IP ID check below from the inner most IP hdr */
239 	flush = NAPI_GRO_CB(p)->flush;
240 	flush |= (__force int)(flags & TCP_FLAG_CWR);
241 	flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
242 		  ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
243 	flush |= (__force int)(th->ack_seq ^ th2->ack_seq);
244 	for (i = sizeof(*th); i < thlen; i += 4)
245 		flush |= *(u32 *)((u8 *)th + i) ^
246 			 *(u32 *)((u8 *)th2 + i);
247 
248 	/* When we receive our second frame we can made a decision on if we
249 	 * continue this flow as an atomic flow with a fixed ID or if we use
250 	 * an incrementing ID.
251 	 */
252 	if (NAPI_GRO_CB(p)->flush_id != 1 ||
253 	    NAPI_GRO_CB(p)->count != 1 ||
254 	    !NAPI_GRO_CB(p)->is_atomic)
255 		flush |= NAPI_GRO_CB(p)->flush_id;
256 	else
257 		NAPI_GRO_CB(p)->is_atomic = false;
258 
259 	mss = skb_shinfo(p)->gso_size;
260 
261 	flush |= (len - 1) >= mss;
262 	flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
263 #ifdef CONFIG_TLS_DEVICE
264 	flush |= p->decrypted ^ skb->decrypted;
265 #endif
266 
267 	if (flush || skb_gro_receive(p, skb)) {
268 		mss = 1;
269 		goto out_check_final;
270 	}
271 
272 	tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
273 
274 out_check_final:
275 	flush = len < mss;
276 	flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH |
277 					TCP_FLAG_RST | TCP_FLAG_SYN |
278 					TCP_FLAG_FIN));
279 
280 	if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
281 		pp = p;
282 
283 out:
284 	NAPI_GRO_CB(skb)->flush |= (flush != 0);
285 
286 	return pp;
287 }
288 
289 int tcp_gro_complete(struct sk_buff *skb)
290 {
291 	struct tcphdr *th = tcp_hdr(skb);
292 
293 	skb->csum_start = (unsigned char *)th - skb->head;
294 	skb->csum_offset = offsetof(struct tcphdr, check);
295 	skb->ip_summed = CHECKSUM_PARTIAL;
296 
297 	skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
298 
299 	if (th->cwr)
300 		skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
301 
302 	if (skb->encapsulation)
303 		skb->inner_transport_header = skb->transport_header;
304 
305 	return 0;
306 }
307 EXPORT_SYMBOL(tcp_gro_complete);
308 
309 INDIRECT_CALLABLE_SCOPE
310 struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)
311 {
312 	/* Don't bother verifying checksum if we're going to flush anyway. */
313 	if (!NAPI_GRO_CB(skb)->flush &&
314 	    skb_gro_checksum_validate(skb, IPPROTO_TCP,
315 				      inet_gro_compute_pseudo)) {
316 		NAPI_GRO_CB(skb)->flush = 1;
317 		return NULL;
318 	}
319 
320 	return tcp_gro_receive(head, skb);
321 }
322 
323 INDIRECT_CALLABLE_SCOPE int tcp4_gro_complete(struct sk_buff *skb, int thoff)
324 {
325 	const struct iphdr *iph = ip_hdr(skb);
326 	struct tcphdr *th = tcp_hdr(skb);
327 
328 	th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
329 				  iph->daddr, 0);
330 	skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
331 
332 	if (NAPI_GRO_CB(skb)->is_atomic)
333 		skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID;
334 
335 	return tcp_gro_complete(skb);
336 }
337 
338 static const struct net_offload tcpv4_offload = {
339 	.callbacks = {
340 		.gso_segment	=	tcp4_gso_segment,
341 		.gro_receive	=	tcp4_gro_receive,
342 		.gro_complete	=	tcp4_gro_complete,
343 	},
344 };
345 
346 int __init tcpv4_offload_init(void)
347 {
348 	return inet_add_offload(&tcpv4_offload, IPPROTO_TCP);
349 }
350