1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * IPV4 GSO/GRO offload support
4 * Linux INET implementation
5 *
6 * TCPv4 GSO/GRO support
7 */
8
9 #include <linux/indirect_call_wrapper.h>
10 #include <linux/skbuff.h>
11 #include <net/gro.h>
12 #include <net/gso.h>
13 #include <net/tcp.h>
14 #include <net/protocol.h>
15
tcp_gso_tstamp(struct sk_buff * skb,struct sk_buff * gso_skb,unsigned int seq,unsigned int mss)16 static void tcp_gso_tstamp(struct sk_buff *skb, struct sk_buff *gso_skb,
17 unsigned int seq, unsigned int mss)
18 {
19 u32 flags = skb_shinfo(gso_skb)->tx_flags & SKBTX_ANY_TSTAMP;
20 u32 ts_seq = skb_shinfo(gso_skb)->tskey;
21
22 while (skb) {
23 if (before(ts_seq, seq + mss)) {
24 skb_shinfo(skb)->tx_flags |= flags;
25 skb_shinfo(skb)->tskey = ts_seq;
26 return;
27 }
28
29 skb = skb->next;
30 seq += mss;
31 }
32 }
33
tcp4_gso_segment(struct sk_buff * skb,netdev_features_t features)34 static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
35 netdev_features_t features)
36 {
37 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4))
38 return ERR_PTR(-EINVAL);
39
40 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
41 return ERR_PTR(-EINVAL);
42
43 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
44 const struct iphdr *iph = ip_hdr(skb);
45 struct tcphdr *th = tcp_hdr(skb);
46
47 /* Set up checksum pseudo header, usually expect stack to
48 * have done this already.
49 */
50
51 th->check = 0;
52 skb->ip_summed = CHECKSUM_PARTIAL;
53 __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
54 }
55
56 return tcp_gso_segment(skb, features);
57 }
58
tcp_gso_segment(struct sk_buff * skb,netdev_features_t features)59 struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
60 netdev_features_t features)
61 {
62 struct sk_buff *segs = ERR_PTR(-EINVAL);
63 unsigned int sum_truesize = 0;
64 struct tcphdr *th;
65 unsigned int thlen;
66 unsigned int seq;
67 unsigned int oldlen;
68 unsigned int mss;
69 struct sk_buff *gso_skb = skb;
70 __sum16 newcheck;
71 bool ooo_okay, copy_destructor;
72 __wsum delta;
73
74 th = tcp_hdr(skb);
75 thlen = th->doff * 4;
76 if (thlen < sizeof(*th))
77 goto out;
78
79 if (unlikely(skb_checksum_start(skb) != skb_transport_header(skb)))
80 goto out;
81
82 if (!pskb_may_pull(skb, thlen))
83 goto out;
84
85 oldlen = ~skb->len;
86 __skb_pull(skb, thlen);
87
88 mss = skb_shinfo(skb)->gso_size;
89 if (unlikely(skb->len <= mss))
90 goto out;
91
92 if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
93 /* Packet is from an untrusted source, reset gso_segs. */
94
95 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
96
97 segs = NULL;
98 goto out;
99 }
100
101 copy_destructor = gso_skb->destructor == tcp_wfree;
102 ooo_okay = gso_skb->ooo_okay;
103 /* All segments but the first should have ooo_okay cleared */
104 skb->ooo_okay = 0;
105
106 segs = skb_segment(skb, features);
107 if (IS_ERR(segs))
108 goto out;
109
110 /* Only first segment might have ooo_okay set */
111 segs->ooo_okay = ooo_okay;
112
113 /* GSO partial and frag_list segmentation only requires splitting
114 * the frame into an MSS multiple and possibly a remainder, both
115 * cases return a GSO skb. So update the mss now.
116 */
117 if (skb_is_gso(segs))
118 mss *= skb_shinfo(segs)->gso_segs;
119
120 delta = (__force __wsum)htonl(oldlen + thlen + mss);
121
122 skb = segs;
123 th = tcp_hdr(skb);
124 seq = ntohl(th->seq);
125
126 if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_ANY_TSTAMP))
127 tcp_gso_tstamp(segs, gso_skb, seq, mss);
128
129 newcheck = ~csum_fold(csum_add(csum_unfold(th->check), delta));
130
131 while (skb->next) {
132 th->fin = th->psh = 0;
133 th->check = newcheck;
134
135 if (skb->ip_summed == CHECKSUM_PARTIAL)
136 gso_reset_checksum(skb, ~th->check);
137 else
138 th->check = gso_make_checksum(skb, ~th->check);
139
140 seq += mss;
141 if (copy_destructor) {
142 skb->destructor = gso_skb->destructor;
143 skb->sk = gso_skb->sk;
144 sum_truesize += skb->truesize;
145 }
146 skb = skb->next;
147 th = tcp_hdr(skb);
148
149 th->seq = htonl(seq);
150 th->cwr = 0;
151 }
152
153 /* Following permits TCP Small Queues to work well with GSO :
154 * The callback to TCP stack will be called at the time last frag
155 * is freed at TX completion, and not right now when gso_skb
156 * is freed by GSO engine
157 */
158 if (copy_destructor) {
159 int delta;
160
161 swap(gso_skb->sk, skb->sk);
162 swap(gso_skb->destructor, skb->destructor);
163 sum_truesize += skb->truesize;
164 delta = sum_truesize - gso_skb->truesize;
165 /* In some pathological cases, delta can be negative.
166 * We need to either use refcount_add() or refcount_sub_and_test()
167 */
168 if (likely(delta >= 0))
169 refcount_add(delta, &skb->sk->sk_wmem_alloc);
170 else
171 WARN_ON_ONCE(refcount_sub_and_test(-delta, &skb->sk->sk_wmem_alloc));
172 }
173
174 delta = (__force __wsum)htonl(oldlen +
175 (skb_tail_pointer(skb) -
176 skb_transport_header(skb)) +
177 skb->data_len);
178 th->check = ~csum_fold(csum_add(csum_unfold(th->check), delta));
179 if (skb->ip_summed == CHECKSUM_PARTIAL)
180 gso_reset_checksum(skb, ~th->check);
181 else
182 th->check = gso_make_checksum(skb, ~th->check);
183 out:
184 return segs;
185 }
186
tcp_gro_receive(struct list_head * head,struct sk_buff * skb)187 struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb)
188 {
189 struct sk_buff *pp = NULL;
190 struct sk_buff *p;
191 struct tcphdr *th;
192 struct tcphdr *th2;
193 unsigned int len;
194 unsigned int thlen;
195 __be32 flags;
196 unsigned int mss = 1;
197 unsigned int hlen;
198 unsigned int off;
199 int flush = 1;
200 int i;
201
202 off = skb_gro_offset(skb);
203 hlen = off + sizeof(*th);
204 th = skb_gro_header(skb, hlen, off);
205 if (unlikely(!th))
206 goto out;
207
208 thlen = th->doff * 4;
209 if (thlen < sizeof(*th))
210 goto out;
211
212 hlen = off + thlen;
213 if (skb_gro_header_hard(skb, hlen)) {
214 th = skb_gro_header_slow(skb, hlen, off);
215 if (unlikely(!th))
216 goto out;
217 }
218
219 skb_gro_pull(skb, thlen);
220
221 len = skb_gro_len(skb);
222 flags = tcp_flag_word(th);
223
224 list_for_each_entry(p, head, list) {
225 if (!NAPI_GRO_CB(p)->same_flow)
226 continue;
227
228 th2 = tcp_hdr(p);
229
230 if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
231 NAPI_GRO_CB(p)->same_flow = 0;
232 continue;
233 }
234
235 goto found;
236 }
237 p = NULL;
238 goto out_check_final;
239
240 found:
241 /* Include the IP ID check below from the inner most IP hdr */
242 flush = NAPI_GRO_CB(p)->flush;
243 flush |= (__force int)(flags & TCP_FLAG_CWR);
244 flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
245 ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
246 flush |= (__force int)(th->ack_seq ^ th2->ack_seq);
247 for (i = sizeof(*th); i < thlen; i += 4)
248 flush |= *(u32 *)((u8 *)th + i) ^
249 *(u32 *)((u8 *)th2 + i);
250
251 /* When we receive our second frame we can made a decision on if we
252 * continue this flow as an atomic flow with a fixed ID or if we use
253 * an incrementing ID.
254 */
255 if (NAPI_GRO_CB(p)->flush_id != 1 ||
256 NAPI_GRO_CB(p)->count != 1 ||
257 !NAPI_GRO_CB(p)->is_atomic)
258 flush |= NAPI_GRO_CB(p)->flush_id;
259 else
260 NAPI_GRO_CB(p)->is_atomic = false;
261
262 mss = skb_shinfo(p)->gso_size;
263
264 /* If skb is a GRO packet, make sure its gso_size matches prior packet mss.
265 * If it is a single frame, do not aggregate it if its length
266 * is bigger than our mss.
267 */
268 if (unlikely(skb_is_gso(skb)))
269 flush |= (mss != skb_shinfo(skb)->gso_size);
270 else
271 flush |= (len - 1) >= mss;
272
273 flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
274 #ifdef CONFIG_TLS_DEVICE
275 flush |= p->decrypted ^ skb->decrypted;
276 #endif
277
278 if (flush || skb_gro_receive(p, skb)) {
279 mss = 1;
280 goto out_check_final;
281 }
282
283 tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
284
285 out_check_final:
286 /* Force a flush if last segment is smaller than mss. */
287 if (unlikely(skb_is_gso(skb)))
288 flush = len != NAPI_GRO_CB(skb)->count * skb_shinfo(skb)->gso_size;
289 else
290 flush = len < mss;
291
292 flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH |
293 TCP_FLAG_RST | TCP_FLAG_SYN |
294 TCP_FLAG_FIN));
295
296 if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
297 pp = p;
298
299 out:
300 NAPI_GRO_CB(skb)->flush |= (flush != 0);
301
302 return pp;
303 }
304
tcp_gro_complete(struct sk_buff * skb)305 void tcp_gro_complete(struct sk_buff *skb)
306 {
307 struct tcphdr *th = tcp_hdr(skb);
308
309 skb->csum_start = (unsigned char *)th - skb->head;
310 skb->csum_offset = offsetof(struct tcphdr, check);
311 skb->ip_summed = CHECKSUM_PARTIAL;
312
313 skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
314
315 if (th->cwr)
316 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
317
318 if (skb->encapsulation)
319 skb->inner_transport_header = skb->transport_header;
320 }
321 EXPORT_SYMBOL(tcp_gro_complete);
322
323 INDIRECT_CALLABLE_SCOPE
tcp4_gro_receive(struct list_head * head,struct sk_buff * skb)324 struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)
325 {
326 /* Don't bother verifying checksum if we're going to flush anyway. */
327 if (!NAPI_GRO_CB(skb)->flush &&
328 skb_gro_checksum_validate(skb, IPPROTO_TCP,
329 inet_gro_compute_pseudo)) {
330 NAPI_GRO_CB(skb)->flush = 1;
331 return NULL;
332 }
333
334 return tcp_gro_receive(head, skb);
335 }
336
tcp4_gro_complete(struct sk_buff * skb,int thoff)337 INDIRECT_CALLABLE_SCOPE int tcp4_gro_complete(struct sk_buff *skb, int thoff)
338 {
339 const struct iphdr *iph = ip_hdr(skb);
340 struct tcphdr *th = tcp_hdr(skb);
341
342 th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
343 iph->daddr, 0);
344 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
345
346 if (NAPI_GRO_CB(skb)->is_atomic)
347 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID;
348
349 tcp_gro_complete(skb);
350 return 0;
351 }
352
353 static const struct net_offload tcpv4_offload = {
354 .callbacks = {
355 .gso_segment = tcp4_gso_segment,
356 .gro_receive = tcp4_gro_receive,
357 .gro_complete = tcp4_gro_complete,
358 },
359 };
360
tcpv4_offload_init(void)361 int __init tcpv4_offload_init(void)
362 {
363 return inet_add_offload(&tcpv4_offload, IPPROTO_TCP);
364 }
365