xref: /openbmc/linux/net/ipv4/tcp_offload.c (revision 6772c486)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	IPV4 GSO/GRO offload support
4  *	Linux INET implementation
5  *
6  *	TCPv4 GSO/GRO support
7  */
8 
9 #include <linux/indirect_call_wrapper.h>
10 #include <linux/skbuff.h>
11 #include <net/gro.h>
12 #include <net/gso.h>
13 #include <net/tcp.h>
14 #include <net/protocol.h>
15 
tcp_gso_tstamp(struct sk_buff * skb,unsigned int ts_seq,unsigned int seq,unsigned int mss)16 static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq,
17 			   unsigned int seq, unsigned int mss)
18 {
19 	while (skb) {
20 		if (before(ts_seq, seq + mss)) {
21 			skb_shinfo(skb)->tx_flags |= SKBTX_SW_TSTAMP;
22 			skb_shinfo(skb)->tskey = ts_seq;
23 			return;
24 		}
25 
26 		skb = skb->next;
27 		seq += mss;
28 	}
29 }
30 
tcp4_gso_segment(struct sk_buff * skb,netdev_features_t features)31 static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
32 					netdev_features_t features)
33 {
34 	if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4))
35 		return ERR_PTR(-EINVAL);
36 
37 	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
38 		return ERR_PTR(-EINVAL);
39 
40 	if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
41 		const struct iphdr *iph = ip_hdr(skb);
42 		struct tcphdr *th = tcp_hdr(skb);
43 
44 		/* Set up checksum pseudo header, usually expect stack to
45 		 * have done this already.
46 		 */
47 
48 		th->check = 0;
49 		skb->ip_summed = CHECKSUM_PARTIAL;
50 		__tcp_v4_send_check(skb, iph->saddr, iph->daddr);
51 	}
52 
53 	return tcp_gso_segment(skb, features);
54 }
55 
tcp_gso_segment(struct sk_buff * skb,netdev_features_t features)56 struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
57 				netdev_features_t features)
58 {
59 	struct sk_buff *segs = ERR_PTR(-EINVAL);
60 	unsigned int sum_truesize = 0;
61 	struct tcphdr *th;
62 	unsigned int thlen;
63 	unsigned int seq;
64 	unsigned int oldlen;
65 	unsigned int mss;
66 	struct sk_buff *gso_skb = skb;
67 	__sum16 newcheck;
68 	bool ooo_okay, copy_destructor;
69 	__wsum delta;
70 
71 	th = tcp_hdr(skb);
72 	thlen = th->doff * 4;
73 	if (thlen < sizeof(*th))
74 		goto out;
75 
76 	if (unlikely(skb_checksum_start(skb) != skb_transport_header(skb)))
77 		goto out;
78 
79 	if (!pskb_may_pull(skb, thlen))
80 		goto out;
81 
82 	oldlen = ~skb->len;
83 	__skb_pull(skb, thlen);
84 
85 	mss = skb_shinfo(skb)->gso_size;
86 	if (unlikely(skb->len <= mss))
87 		goto out;
88 
89 	if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
90 		/* Packet is from an untrusted source, reset gso_segs. */
91 
92 		skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
93 
94 		segs = NULL;
95 		goto out;
96 	}
97 
98 	copy_destructor = gso_skb->destructor == tcp_wfree;
99 	ooo_okay = gso_skb->ooo_okay;
100 	/* All segments but the first should have ooo_okay cleared */
101 	skb->ooo_okay = 0;
102 
103 	segs = skb_segment(skb, features);
104 	if (IS_ERR(segs))
105 		goto out;
106 
107 	/* Only first segment might have ooo_okay set */
108 	segs->ooo_okay = ooo_okay;
109 
110 	/* GSO partial and frag_list segmentation only requires splitting
111 	 * the frame into an MSS multiple and possibly a remainder, both
112 	 * cases return a GSO skb. So update the mss now.
113 	 */
114 	if (skb_is_gso(segs))
115 		mss *= skb_shinfo(segs)->gso_segs;
116 
117 	delta = (__force __wsum)htonl(oldlen + thlen + mss);
118 
119 	skb = segs;
120 	th = tcp_hdr(skb);
121 	seq = ntohl(th->seq);
122 
123 	if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_SW_TSTAMP))
124 		tcp_gso_tstamp(segs, skb_shinfo(gso_skb)->tskey, seq, mss);
125 
126 	newcheck = ~csum_fold(csum_add(csum_unfold(th->check), delta));
127 
128 	while (skb->next) {
129 		th->fin = th->psh = 0;
130 		th->check = newcheck;
131 
132 		if (skb->ip_summed == CHECKSUM_PARTIAL)
133 			gso_reset_checksum(skb, ~th->check);
134 		else
135 			th->check = gso_make_checksum(skb, ~th->check);
136 
137 		seq += mss;
138 		if (copy_destructor) {
139 			skb->destructor = gso_skb->destructor;
140 			skb->sk = gso_skb->sk;
141 			sum_truesize += skb->truesize;
142 		}
143 		skb = skb->next;
144 		th = tcp_hdr(skb);
145 
146 		th->seq = htonl(seq);
147 		th->cwr = 0;
148 	}
149 
150 	/* Following permits TCP Small Queues to work well with GSO :
151 	 * The callback to TCP stack will be called at the time last frag
152 	 * is freed at TX completion, and not right now when gso_skb
153 	 * is freed by GSO engine
154 	 */
155 	if (copy_destructor) {
156 		int delta;
157 
158 		swap(gso_skb->sk, skb->sk);
159 		swap(gso_skb->destructor, skb->destructor);
160 		sum_truesize += skb->truesize;
161 		delta = sum_truesize - gso_skb->truesize;
162 		/* In some pathological cases, delta can be negative.
163 		 * We need to either use refcount_add() or refcount_sub_and_test()
164 		 */
165 		if (likely(delta >= 0))
166 			refcount_add(delta, &skb->sk->sk_wmem_alloc);
167 		else
168 			WARN_ON_ONCE(refcount_sub_and_test(-delta, &skb->sk->sk_wmem_alloc));
169 	}
170 
171 	delta = (__force __wsum)htonl(oldlen +
172 				      (skb_tail_pointer(skb) -
173 				       skb_transport_header(skb)) +
174 				      skb->data_len);
175 	th->check = ~csum_fold(csum_add(csum_unfold(th->check), delta));
176 	if (skb->ip_summed == CHECKSUM_PARTIAL)
177 		gso_reset_checksum(skb, ~th->check);
178 	else
179 		th->check = gso_make_checksum(skb, ~th->check);
180 out:
181 	return segs;
182 }
183 
tcp_gro_receive(struct list_head * head,struct sk_buff * skb)184 struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb)
185 {
186 	struct sk_buff *pp = NULL;
187 	struct sk_buff *p;
188 	struct tcphdr *th;
189 	struct tcphdr *th2;
190 	unsigned int len;
191 	unsigned int thlen;
192 	__be32 flags;
193 	unsigned int mss = 1;
194 	unsigned int hlen;
195 	unsigned int off;
196 	int flush = 1;
197 	int i;
198 
199 	off = skb_gro_offset(skb);
200 	hlen = off + sizeof(*th);
201 	th = skb_gro_header(skb, hlen, off);
202 	if (unlikely(!th))
203 		goto out;
204 
205 	thlen = th->doff * 4;
206 	if (thlen < sizeof(*th))
207 		goto out;
208 
209 	hlen = off + thlen;
210 	if (skb_gro_header_hard(skb, hlen)) {
211 		th = skb_gro_header_slow(skb, hlen, off);
212 		if (unlikely(!th))
213 			goto out;
214 	}
215 
216 	skb_gro_pull(skb, thlen);
217 
218 	len = skb_gro_len(skb);
219 	flags = tcp_flag_word(th);
220 
221 	list_for_each_entry(p, head, list) {
222 		if (!NAPI_GRO_CB(p)->same_flow)
223 			continue;
224 
225 		th2 = tcp_hdr(p);
226 
227 		if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
228 			NAPI_GRO_CB(p)->same_flow = 0;
229 			continue;
230 		}
231 
232 		goto found;
233 	}
234 	p = NULL;
235 	goto out_check_final;
236 
237 found:
238 	/* Include the IP ID check below from the inner most IP hdr */
239 	flush = NAPI_GRO_CB(p)->flush;
240 	flush |= (__force int)(flags & TCP_FLAG_CWR);
241 	flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
242 		  ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
243 	flush |= (__force int)(th->ack_seq ^ th2->ack_seq);
244 	for (i = sizeof(*th); i < thlen; i += 4)
245 		flush |= *(u32 *)((u8 *)th + i) ^
246 			 *(u32 *)((u8 *)th2 + i);
247 
248 	/* When we receive our second frame we can made a decision on if we
249 	 * continue this flow as an atomic flow with a fixed ID or if we use
250 	 * an incrementing ID.
251 	 */
252 	if (NAPI_GRO_CB(p)->flush_id != 1 ||
253 	    NAPI_GRO_CB(p)->count != 1 ||
254 	    !NAPI_GRO_CB(p)->is_atomic)
255 		flush |= NAPI_GRO_CB(p)->flush_id;
256 	else
257 		NAPI_GRO_CB(p)->is_atomic = false;
258 
259 	mss = skb_shinfo(p)->gso_size;
260 
261 	/* If skb is a GRO packet, make sure its gso_size matches prior packet mss.
262 	 * If it is a single frame, do not aggregate it if its length
263 	 * is bigger than our mss.
264 	 */
265 	if (unlikely(skb_is_gso(skb)))
266 		flush |= (mss != skb_shinfo(skb)->gso_size);
267 	else
268 		flush |= (len - 1) >= mss;
269 
270 	flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
271 #ifdef CONFIG_TLS_DEVICE
272 	flush |= p->decrypted ^ skb->decrypted;
273 #endif
274 
275 	if (flush || skb_gro_receive(p, skb)) {
276 		mss = 1;
277 		goto out_check_final;
278 	}
279 
280 	tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
281 
282 out_check_final:
283 	/* Force a flush if last segment is smaller than mss. */
284 	if (unlikely(skb_is_gso(skb)))
285 		flush = len != NAPI_GRO_CB(skb)->count * skb_shinfo(skb)->gso_size;
286 	else
287 		flush = len < mss;
288 
289 	flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH |
290 					TCP_FLAG_RST | TCP_FLAG_SYN |
291 					TCP_FLAG_FIN));
292 
293 	if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
294 		pp = p;
295 
296 out:
297 	NAPI_GRO_CB(skb)->flush |= (flush != 0);
298 
299 	return pp;
300 }
301 
tcp_gro_complete(struct sk_buff * skb)302 void tcp_gro_complete(struct sk_buff *skb)
303 {
304 	struct tcphdr *th = tcp_hdr(skb);
305 
306 	skb->csum_start = (unsigned char *)th - skb->head;
307 	skb->csum_offset = offsetof(struct tcphdr, check);
308 	skb->ip_summed = CHECKSUM_PARTIAL;
309 
310 	skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
311 
312 	if (th->cwr)
313 		skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
314 
315 	if (skb->encapsulation)
316 		skb->inner_transport_header = skb->transport_header;
317 }
318 EXPORT_SYMBOL(tcp_gro_complete);
319 
320 INDIRECT_CALLABLE_SCOPE
tcp4_gro_receive(struct list_head * head,struct sk_buff * skb)321 struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)
322 {
323 	/* Don't bother verifying checksum if we're going to flush anyway. */
324 	if (!NAPI_GRO_CB(skb)->flush &&
325 	    skb_gro_checksum_validate(skb, IPPROTO_TCP,
326 				      inet_gro_compute_pseudo)) {
327 		NAPI_GRO_CB(skb)->flush = 1;
328 		return NULL;
329 	}
330 
331 	return tcp_gro_receive(head, skb);
332 }
333 
tcp4_gro_complete(struct sk_buff * skb,int thoff)334 INDIRECT_CALLABLE_SCOPE int tcp4_gro_complete(struct sk_buff *skb, int thoff)
335 {
336 	const struct iphdr *iph = ip_hdr(skb);
337 	struct tcphdr *th = tcp_hdr(skb);
338 
339 	th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
340 				  iph->daddr, 0);
341 	skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
342 
343 	if (NAPI_GRO_CB(skb)->is_atomic)
344 		skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID;
345 
346 	tcp_gro_complete(skb);
347 	return 0;
348 }
349 
350 static const struct net_offload tcpv4_offload = {
351 	.callbacks = {
352 		.gso_segment	=	tcp4_gso_segment,
353 		.gro_receive	=	tcp4_gro_receive,
354 		.gro_complete	=	tcp4_gro_complete,
355 	},
356 };
357 
tcpv4_offload_init(void)358 int __init tcpv4_offload_init(void)
359 {
360 	return inet_add_offload(&tcpv4_offload, IPPROTO_TCP);
361 }
362