1 /*
2 * QEMU RX packets abstractions
3 *
4 * Copyright (c) 2012 Ravello Systems LTD (http://ravellosystems.com)
5 *
6 * Developed by Daynix Computing LTD (http://www.daynix.com)
7 *
8 * Authors:
9 * Dmitry Fleytman <dmitry@daynix.com>
10 * Tamir Shomer <tamirs@daynix.com>
11 * Yan Vugenfirer <yan@daynix.com>
12 *
13 * This work is licensed under the terms of the GNU GPL, version 2 or later.
14 * See the COPYING file in the top-level directory.
15 *
16 */
17
18 #include "qemu/osdep.h"
19 #include "qemu/crc32c.h"
20 #include "trace.h"
21 #include "net_rx_pkt.h"
22 #include "net/checksum.h"
23 #include "net/tap.h"
24
25 struct NetRxPkt {
26 struct virtio_net_hdr virt_hdr;
27 struct {
28 struct eth_header eth;
29 struct vlan_header vlan;
30 } ehdr_buf;
31 struct iovec *vec;
32 uint16_t vec_len_total;
33 uint16_t vec_len;
34 uint32_t tot_len;
35 uint16_t tci;
36 size_t ehdr_buf_len;
37 eth_pkt_types_e packet_type;
38
39 /* Analysis results */
40 bool hasip4;
41 bool hasip6;
42
43 size_t l3hdr_off;
44 size_t l4hdr_off;
45 size_t l5hdr_off;
46
47 eth_ip6_hdr_info ip6hdr_info;
48 eth_ip4_hdr_info ip4hdr_info;
49 eth_l4_hdr_info l4hdr_info;
50 };
51
net_rx_pkt_init(struct NetRxPkt ** pkt)52 void net_rx_pkt_init(struct NetRxPkt **pkt)
53 {
54 struct NetRxPkt *p = g_malloc0(sizeof *p);
55 p->vec = NULL;
56 p->vec_len_total = 0;
57 *pkt = p;
58 }
59
net_rx_pkt_uninit(struct NetRxPkt * pkt)60 void net_rx_pkt_uninit(struct NetRxPkt *pkt)
61 {
62 if (pkt->vec_len_total != 0) {
63 g_free(pkt->vec);
64 }
65
66 g_free(pkt);
67 }
68
net_rx_pkt_get_vhdr(struct NetRxPkt * pkt)69 struct virtio_net_hdr *net_rx_pkt_get_vhdr(struct NetRxPkt *pkt)
70 {
71 assert(pkt);
72 return &pkt->virt_hdr;
73 }
74
75 static inline void
net_rx_pkt_iovec_realloc(struct NetRxPkt * pkt,int new_iov_len)76 net_rx_pkt_iovec_realloc(struct NetRxPkt *pkt,
77 int new_iov_len)
78 {
79 if (pkt->vec_len_total < new_iov_len) {
80 g_free(pkt->vec);
81 pkt->vec = g_malloc(sizeof(*pkt->vec) * new_iov_len);
82 pkt->vec_len_total = new_iov_len;
83 }
84 }
85
86 static void
net_rx_pkt_pull_data(struct NetRxPkt * pkt,const struct iovec * iov,int iovcnt,size_t ploff)87 net_rx_pkt_pull_data(struct NetRxPkt *pkt,
88 const struct iovec *iov, int iovcnt,
89 size_t ploff)
90 {
91 uint32_t pllen = iov_size(iov, iovcnt) - ploff;
92
93 if (pkt->ehdr_buf_len) {
94 net_rx_pkt_iovec_realloc(pkt, iovcnt + 1);
95
96 pkt->vec[0].iov_base = &pkt->ehdr_buf;
97 pkt->vec[0].iov_len = pkt->ehdr_buf_len;
98
99 pkt->tot_len = pllen + pkt->ehdr_buf_len;
100 pkt->vec_len = iov_copy(pkt->vec + 1, pkt->vec_len_total - 1,
101 iov, iovcnt, ploff, pllen) + 1;
102 } else {
103 net_rx_pkt_iovec_realloc(pkt, iovcnt);
104
105 pkt->tot_len = pllen;
106 pkt->vec_len = iov_copy(pkt->vec, pkt->vec_len_total,
107 iov, iovcnt, ploff, pkt->tot_len);
108 }
109
110 eth_get_protocols(pkt->vec, pkt->vec_len, 0, &pkt->hasip4, &pkt->hasip6,
111 &pkt->l3hdr_off, &pkt->l4hdr_off, &pkt->l5hdr_off,
112 &pkt->ip6hdr_info, &pkt->ip4hdr_info, &pkt->l4hdr_info);
113
114 trace_net_rx_pkt_parsed(pkt->hasip4, pkt->hasip6, pkt->l4hdr_info.proto,
115 pkt->l3hdr_off, pkt->l4hdr_off, pkt->l5hdr_off);
116 }
117
net_rx_pkt_attach_iovec(struct NetRxPkt * pkt,const struct iovec * iov,int iovcnt,size_t iovoff,bool strip_vlan)118 void net_rx_pkt_attach_iovec(struct NetRxPkt *pkt,
119 const struct iovec *iov, int iovcnt,
120 size_t iovoff, bool strip_vlan)
121 {
122 uint16_t tci = 0;
123 uint16_t ploff = iovoff;
124 assert(pkt);
125
126 if (strip_vlan) {
127 pkt->ehdr_buf_len = eth_strip_vlan(iov, iovcnt, iovoff, &pkt->ehdr_buf,
128 &ploff, &tci);
129 } else {
130 pkt->ehdr_buf_len = 0;
131 }
132
133 pkt->tci = tci;
134
135 net_rx_pkt_pull_data(pkt, iov, iovcnt, ploff);
136 }
137
net_rx_pkt_attach_iovec_ex(struct NetRxPkt * pkt,const struct iovec * iov,int iovcnt,size_t iovoff,int strip_vlan_index,uint16_t vet,uint16_t vet_ext)138 void net_rx_pkt_attach_iovec_ex(struct NetRxPkt *pkt,
139 const struct iovec *iov, int iovcnt,
140 size_t iovoff, int strip_vlan_index,
141 uint16_t vet, uint16_t vet_ext)
142 {
143 uint16_t tci = 0;
144 uint16_t ploff = iovoff;
145 assert(pkt);
146
147 pkt->ehdr_buf_len = eth_strip_vlan_ex(iov, iovcnt, iovoff,
148 strip_vlan_index, vet, vet_ext,
149 &pkt->ehdr_buf,
150 &ploff, &tci);
151
152 pkt->tci = tci;
153
154 net_rx_pkt_pull_data(pkt, iov, iovcnt, ploff);
155 }
156
net_rx_pkt_dump(struct NetRxPkt * pkt)157 void net_rx_pkt_dump(struct NetRxPkt *pkt)
158 {
159 #ifdef NET_RX_PKT_DEBUG
160 assert(pkt);
161
162 printf("RX PKT: tot_len: %d, ehdr_buf_len: %lu, vlan_tag: %d\n",
163 pkt->tot_len, pkt->ehdr_buf_len, pkt->tci);
164 #endif
165 }
166
net_rx_pkt_set_packet_type(struct NetRxPkt * pkt,eth_pkt_types_e packet_type)167 void net_rx_pkt_set_packet_type(struct NetRxPkt *pkt,
168 eth_pkt_types_e packet_type)
169 {
170 assert(pkt);
171
172 pkt->packet_type = packet_type;
173
174 }
175
net_rx_pkt_get_packet_type(struct NetRxPkt * pkt)176 eth_pkt_types_e net_rx_pkt_get_packet_type(struct NetRxPkt *pkt)
177 {
178 assert(pkt);
179
180 return pkt->packet_type;
181 }
182
net_rx_pkt_get_total_len(struct NetRxPkt * pkt)183 size_t net_rx_pkt_get_total_len(struct NetRxPkt *pkt)
184 {
185 assert(pkt);
186
187 return pkt->tot_len;
188 }
189
net_rx_pkt_set_protocols(struct NetRxPkt * pkt,const struct iovec * iov,size_t iovcnt,size_t iovoff)190 void net_rx_pkt_set_protocols(struct NetRxPkt *pkt,
191 const struct iovec *iov, size_t iovcnt,
192 size_t iovoff)
193 {
194 assert(pkt);
195
196 eth_get_protocols(iov, iovcnt, iovoff, &pkt->hasip4, &pkt->hasip6,
197 &pkt->l3hdr_off, &pkt->l4hdr_off, &pkt->l5hdr_off,
198 &pkt->ip6hdr_info, &pkt->ip4hdr_info, &pkt->l4hdr_info);
199 }
200
net_rx_pkt_get_protocols(struct NetRxPkt * pkt,bool * hasip4,bool * hasip6,EthL4HdrProto * l4hdr_proto)201 void net_rx_pkt_get_protocols(struct NetRxPkt *pkt,
202 bool *hasip4, bool *hasip6,
203 EthL4HdrProto *l4hdr_proto)
204 {
205 assert(pkt);
206
207 *hasip4 = pkt->hasip4;
208 *hasip6 = pkt->hasip6;
209 *l4hdr_proto = pkt->l4hdr_info.proto;
210 }
211
net_rx_pkt_get_l3_hdr_offset(struct NetRxPkt * pkt)212 size_t net_rx_pkt_get_l3_hdr_offset(struct NetRxPkt *pkt)
213 {
214 assert(pkt);
215 return pkt->l3hdr_off;
216 }
217
net_rx_pkt_get_l4_hdr_offset(struct NetRxPkt * pkt)218 size_t net_rx_pkt_get_l4_hdr_offset(struct NetRxPkt *pkt)
219 {
220 assert(pkt);
221 return pkt->l4hdr_off;
222 }
223
net_rx_pkt_get_l5_hdr_offset(struct NetRxPkt * pkt)224 size_t net_rx_pkt_get_l5_hdr_offset(struct NetRxPkt *pkt)
225 {
226 assert(pkt);
227 return pkt->l5hdr_off;
228 }
229
net_rx_pkt_get_ip6_info(struct NetRxPkt * pkt)230 eth_ip6_hdr_info *net_rx_pkt_get_ip6_info(struct NetRxPkt *pkt)
231 {
232 return &pkt->ip6hdr_info;
233 }
234
net_rx_pkt_get_ip4_info(struct NetRxPkt * pkt)235 eth_ip4_hdr_info *net_rx_pkt_get_ip4_info(struct NetRxPkt *pkt)
236 {
237 return &pkt->ip4hdr_info;
238 }
239
240 static inline void
_net_rx_rss_add_chunk(uint8_t * rss_input,size_t * bytes_written,void * ptr,size_t size)241 _net_rx_rss_add_chunk(uint8_t *rss_input, size_t *bytes_written,
242 void *ptr, size_t size)
243 {
244 memcpy(&rss_input[*bytes_written], ptr, size);
245 trace_net_rx_pkt_rss_add_chunk(ptr, size, *bytes_written);
246 *bytes_written += size;
247 }
248
249 static inline void
_net_rx_rss_prepare_ip4(uint8_t * rss_input,struct NetRxPkt * pkt,size_t * bytes_written)250 _net_rx_rss_prepare_ip4(uint8_t *rss_input,
251 struct NetRxPkt *pkt,
252 size_t *bytes_written)
253 {
254 struct ip_header *ip4_hdr = &pkt->ip4hdr_info.ip4_hdr;
255
256 _net_rx_rss_add_chunk(rss_input, bytes_written,
257 &ip4_hdr->ip_src, sizeof(uint32_t));
258
259 _net_rx_rss_add_chunk(rss_input, bytes_written,
260 &ip4_hdr->ip_dst, sizeof(uint32_t));
261 }
262
263 static inline void
_net_rx_rss_prepare_ip6(uint8_t * rss_input,struct NetRxPkt * pkt,bool ipv6ex,size_t * bytes_written)264 _net_rx_rss_prepare_ip6(uint8_t *rss_input,
265 struct NetRxPkt *pkt,
266 bool ipv6ex, size_t *bytes_written)
267 {
268 eth_ip6_hdr_info *ip6info = &pkt->ip6hdr_info;
269
270 _net_rx_rss_add_chunk(rss_input, bytes_written,
271 (ipv6ex && ip6info->rss_ex_src_valid) ? &ip6info->rss_ex_src
272 : &ip6info->ip6_hdr.ip6_src,
273 sizeof(struct in6_address));
274
275 _net_rx_rss_add_chunk(rss_input, bytes_written,
276 (ipv6ex && ip6info->rss_ex_dst_valid) ? &ip6info->rss_ex_dst
277 : &ip6info->ip6_hdr.ip6_dst,
278 sizeof(struct in6_address));
279 }
280
281 static inline void
_net_rx_rss_prepare_tcp(uint8_t * rss_input,struct NetRxPkt * pkt,size_t * bytes_written)282 _net_rx_rss_prepare_tcp(uint8_t *rss_input,
283 struct NetRxPkt *pkt,
284 size_t *bytes_written)
285 {
286 struct tcp_header *tcphdr = &pkt->l4hdr_info.hdr.tcp;
287
288 _net_rx_rss_add_chunk(rss_input, bytes_written,
289 &tcphdr->th_sport, sizeof(uint16_t));
290
291 _net_rx_rss_add_chunk(rss_input, bytes_written,
292 &tcphdr->th_dport, sizeof(uint16_t));
293 }
294
295 static inline void
_net_rx_rss_prepare_udp(uint8_t * rss_input,struct NetRxPkt * pkt,size_t * bytes_written)296 _net_rx_rss_prepare_udp(uint8_t *rss_input,
297 struct NetRxPkt *pkt,
298 size_t *bytes_written)
299 {
300 struct udp_header *udphdr = &pkt->l4hdr_info.hdr.udp;
301
302 _net_rx_rss_add_chunk(rss_input, bytes_written,
303 &udphdr->uh_sport, sizeof(uint16_t));
304
305 _net_rx_rss_add_chunk(rss_input, bytes_written,
306 &udphdr->uh_dport, sizeof(uint16_t));
307 }
308
309 uint32_t
net_rx_pkt_calc_rss_hash(struct NetRxPkt * pkt,NetRxPktRssType type,uint8_t * key)310 net_rx_pkt_calc_rss_hash(struct NetRxPkt *pkt,
311 NetRxPktRssType type,
312 uint8_t *key)
313 {
314 uint8_t rss_input[36];
315 size_t rss_length = 0;
316 uint32_t rss_hash = 0;
317 net_toeplitz_key key_data;
318
319 switch (type) {
320 case NetPktRssIpV4:
321 assert(pkt->hasip4);
322 trace_net_rx_pkt_rss_ip4();
323 _net_rx_rss_prepare_ip4(&rss_input[0], pkt, &rss_length);
324 break;
325 case NetPktRssIpV4Tcp:
326 assert(pkt->hasip4);
327 assert(pkt->l4hdr_info.proto == ETH_L4_HDR_PROTO_TCP);
328 trace_net_rx_pkt_rss_ip4_tcp();
329 _net_rx_rss_prepare_ip4(&rss_input[0], pkt, &rss_length);
330 _net_rx_rss_prepare_tcp(&rss_input[0], pkt, &rss_length);
331 break;
332 case NetPktRssIpV6Tcp:
333 assert(pkt->hasip6);
334 assert(pkt->l4hdr_info.proto == ETH_L4_HDR_PROTO_TCP);
335 trace_net_rx_pkt_rss_ip6_tcp();
336 _net_rx_rss_prepare_ip6(&rss_input[0], pkt, false, &rss_length);
337 _net_rx_rss_prepare_tcp(&rss_input[0], pkt, &rss_length);
338 break;
339 case NetPktRssIpV6:
340 assert(pkt->hasip6);
341 trace_net_rx_pkt_rss_ip6();
342 _net_rx_rss_prepare_ip6(&rss_input[0], pkt, false, &rss_length);
343 break;
344 case NetPktRssIpV6Ex:
345 assert(pkt->hasip6);
346 trace_net_rx_pkt_rss_ip6_ex();
347 _net_rx_rss_prepare_ip6(&rss_input[0], pkt, true, &rss_length);
348 break;
349 case NetPktRssIpV6TcpEx:
350 assert(pkt->hasip6);
351 assert(pkt->l4hdr_info.proto == ETH_L4_HDR_PROTO_TCP);
352 trace_net_rx_pkt_rss_ip6_ex_tcp();
353 _net_rx_rss_prepare_ip6(&rss_input[0], pkt, true, &rss_length);
354 _net_rx_rss_prepare_tcp(&rss_input[0], pkt, &rss_length);
355 break;
356 case NetPktRssIpV4Udp:
357 assert(pkt->hasip4);
358 assert(pkt->l4hdr_info.proto == ETH_L4_HDR_PROTO_UDP);
359 trace_net_rx_pkt_rss_ip4_udp();
360 _net_rx_rss_prepare_ip4(&rss_input[0], pkt, &rss_length);
361 _net_rx_rss_prepare_udp(&rss_input[0], pkt, &rss_length);
362 break;
363 case NetPktRssIpV6Udp:
364 assert(pkt->hasip6);
365 assert(pkt->l4hdr_info.proto == ETH_L4_HDR_PROTO_UDP);
366 trace_net_rx_pkt_rss_ip6_udp();
367 _net_rx_rss_prepare_ip6(&rss_input[0], pkt, false, &rss_length);
368 _net_rx_rss_prepare_udp(&rss_input[0], pkt, &rss_length);
369 break;
370 case NetPktRssIpV6UdpEx:
371 assert(pkt->hasip6);
372 assert(pkt->l4hdr_info.proto == ETH_L4_HDR_PROTO_UDP);
373 trace_net_rx_pkt_rss_ip6_ex_udp();
374 _net_rx_rss_prepare_ip6(&rss_input[0], pkt, true, &rss_length);
375 _net_rx_rss_prepare_udp(&rss_input[0], pkt, &rss_length);
376 break;
377 default:
378 assert(false);
379 break;
380 }
381
382 net_toeplitz_key_init(&key_data, key);
383 net_toeplitz_add(&rss_hash, rss_input, rss_length, &key_data);
384
385 trace_net_rx_pkt_rss_hash(rss_length, rss_hash);
386
387 return rss_hash;
388 }
389
net_rx_pkt_get_ip_id(struct NetRxPkt * pkt)390 uint16_t net_rx_pkt_get_ip_id(struct NetRxPkt *pkt)
391 {
392 assert(pkt);
393
394 if (pkt->hasip4) {
395 return be16_to_cpu(pkt->ip4hdr_info.ip4_hdr.ip_id);
396 }
397
398 return 0;
399 }
400
net_rx_pkt_is_tcp_ack(struct NetRxPkt * pkt)401 bool net_rx_pkt_is_tcp_ack(struct NetRxPkt *pkt)
402 {
403 assert(pkt);
404
405 if (pkt->l4hdr_info.proto == ETH_L4_HDR_PROTO_TCP) {
406 return TCP_HEADER_FLAGS(&pkt->l4hdr_info.hdr.tcp) & TCP_FLAG_ACK;
407 }
408
409 return false;
410 }
411
net_rx_pkt_has_tcp_data(struct NetRxPkt * pkt)412 bool net_rx_pkt_has_tcp_data(struct NetRxPkt *pkt)
413 {
414 assert(pkt);
415
416 if (pkt->l4hdr_info.proto == ETH_L4_HDR_PROTO_TCP) {
417 return pkt->l4hdr_info.has_tcp_data;
418 }
419
420 return false;
421 }
422
net_rx_pkt_get_iovec(struct NetRxPkt * pkt)423 struct iovec *net_rx_pkt_get_iovec(struct NetRxPkt *pkt)
424 {
425 assert(pkt);
426
427 return pkt->vec;
428 }
429
net_rx_pkt_get_iovec_len(struct NetRxPkt * pkt)430 uint16_t net_rx_pkt_get_iovec_len(struct NetRxPkt *pkt)
431 {
432 assert(pkt);
433
434 return pkt->vec_len;
435 }
436
net_rx_pkt_set_vhdr(struct NetRxPkt * pkt,struct virtio_net_hdr * vhdr)437 void net_rx_pkt_set_vhdr(struct NetRxPkt *pkt,
438 struct virtio_net_hdr *vhdr)
439 {
440 assert(pkt);
441
442 memcpy(&pkt->virt_hdr, vhdr, sizeof pkt->virt_hdr);
443 }
444
net_rx_pkt_set_vhdr_iovec(struct NetRxPkt * pkt,const struct iovec * iov,int iovcnt)445 void net_rx_pkt_set_vhdr_iovec(struct NetRxPkt *pkt,
446 const struct iovec *iov, int iovcnt)
447 {
448 assert(pkt);
449
450 iov_to_buf(iov, iovcnt, 0, &pkt->virt_hdr, sizeof pkt->virt_hdr);
451 }
452
net_rx_pkt_unset_vhdr(struct NetRxPkt * pkt)453 void net_rx_pkt_unset_vhdr(struct NetRxPkt *pkt)
454 {
455 assert(pkt);
456
457 memset(&pkt->virt_hdr, 0, sizeof(pkt->virt_hdr));
458 }
459
net_rx_pkt_is_vlan_stripped(struct NetRxPkt * pkt)460 bool net_rx_pkt_is_vlan_stripped(struct NetRxPkt *pkt)
461 {
462 assert(pkt);
463
464 return pkt->ehdr_buf_len ? true : false;
465 }
466
net_rx_pkt_get_vlan_tag(struct NetRxPkt * pkt)467 uint16_t net_rx_pkt_get_vlan_tag(struct NetRxPkt *pkt)
468 {
469 assert(pkt);
470
471 return pkt->tci;
472 }
473
net_rx_pkt_validate_l3_csum(struct NetRxPkt * pkt,bool * csum_valid)474 bool net_rx_pkt_validate_l3_csum(struct NetRxPkt *pkt, bool *csum_valid)
475 {
476 uint32_t cntr;
477 uint16_t csum;
478 uint32_t csl;
479
480 trace_net_rx_pkt_l3_csum_validate_entry();
481
482 if (!pkt->hasip4) {
483 trace_net_rx_pkt_l3_csum_validate_not_ip4();
484 return false;
485 }
486
487 csl = pkt->l4hdr_off - pkt->l3hdr_off;
488
489 cntr = net_checksum_add_iov(pkt->vec, pkt->vec_len,
490 pkt->l3hdr_off,
491 csl, 0);
492
493 csum = net_checksum_finish(cntr);
494
495 *csum_valid = (csum == 0);
496
497 trace_net_rx_pkt_l3_csum_validate_csum(pkt->l3hdr_off, csl,
498 cntr, csum, *csum_valid);
499
500 return true;
501 }
502
503 static uint16_t
_net_rx_pkt_calc_l4_csum(struct NetRxPkt * pkt)504 _net_rx_pkt_calc_l4_csum(struct NetRxPkt *pkt)
505 {
506 uint32_t cntr;
507 uint16_t csum;
508 uint16_t csl;
509 uint32_t cso;
510
511 trace_net_rx_pkt_l4_csum_calc_entry();
512
513 if (pkt->hasip4) {
514 if (pkt->l4hdr_info.proto == ETH_L4_HDR_PROTO_UDP) {
515 csl = be16_to_cpu(pkt->l4hdr_info.hdr.udp.uh_ulen);
516 trace_net_rx_pkt_l4_csum_calc_ip4_udp();
517 } else {
518 csl = be16_to_cpu(pkt->ip4hdr_info.ip4_hdr.ip_len) -
519 IP_HDR_GET_LEN(&pkt->ip4hdr_info.ip4_hdr);
520 trace_net_rx_pkt_l4_csum_calc_ip4_tcp();
521 }
522
523 cntr = eth_calc_ip4_pseudo_hdr_csum(&pkt->ip4hdr_info.ip4_hdr,
524 csl, &cso);
525 trace_net_rx_pkt_l4_csum_calc_ph_csum(cntr, csl);
526 } else {
527 if (pkt->l4hdr_info.proto == ETH_L4_HDR_PROTO_UDP) {
528 csl = be16_to_cpu(pkt->l4hdr_info.hdr.udp.uh_ulen);
529 trace_net_rx_pkt_l4_csum_calc_ip6_udp();
530 } else {
531 struct ip6_header *ip6hdr = &pkt->ip6hdr_info.ip6_hdr;
532 size_t full_ip6hdr_len = pkt->l4hdr_off - pkt->l3hdr_off;
533 size_t ip6opts_len = full_ip6hdr_len - sizeof(struct ip6_header);
534
535 csl = be16_to_cpu(ip6hdr->ip6_ctlun.ip6_un1.ip6_un1_plen) -
536 ip6opts_len;
537 trace_net_rx_pkt_l4_csum_calc_ip6_tcp();
538 }
539
540 cntr = eth_calc_ip6_pseudo_hdr_csum(&pkt->ip6hdr_info.ip6_hdr, csl,
541 pkt->ip6hdr_info.l4proto, &cso);
542 trace_net_rx_pkt_l4_csum_calc_ph_csum(cntr, csl);
543 }
544
545 cntr += net_checksum_add_iov(pkt->vec, pkt->vec_len,
546 pkt->l4hdr_off, csl, cso);
547
548 csum = net_checksum_finish_nozero(cntr);
549
550 trace_net_rx_pkt_l4_csum_calc_csum(pkt->l4hdr_off, csl, cntr, csum);
551
552 return csum;
553 }
554
555 static bool
_net_rx_pkt_validate_sctp_sum(struct NetRxPkt * pkt)556 _net_rx_pkt_validate_sctp_sum(struct NetRxPkt *pkt)
557 {
558 size_t csum_off;
559 size_t off = pkt->l4hdr_off;
560 size_t vec_len = pkt->vec_len;
561 struct iovec *vec;
562 uint32_t calculated = 0;
563 uint32_t original;
564 bool valid;
565
566 for (vec = pkt->vec; vec->iov_len < off; vec++) {
567 off -= vec->iov_len;
568 vec_len--;
569 }
570
571 csum_off = off + 8;
572
573 if (!iov_to_buf(vec, vec_len, csum_off, &original, sizeof(original))) {
574 return false;
575 }
576
577 if (!iov_from_buf(vec, vec_len, csum_off,
578 &calculated, sizeof(calculated))) {
579 return false;
580 }
581
582 calculated = crc32c(0xffffffff,
583 (uint8_t *)vec->iov_base + off, vec->iov_len - off);
584 calculated = iov_crc32c(calculated ^ 0xffffffff, vec + 1, vec_len - 1);
585 valid = calculated == le32_to_cpu(original);
586 iov_from_buf(vec, vec_len, csum_off, &original, sizeof(original));
587
588 return valid;
589 }
590
net_rx_pkt_validate_l4_csum(struct NetRxPkt * pkt,bool * csum_valid)591 bool net_rx_pkt_validate_l4_csum(struct NetRxPkt *pkt, bool *csum_valid)
592 {
593 uint32_t csum;
594
595 trace_net_rx_pkt_l4_csum_validate_entry();
596
597 if (pkt->hasip4 && pkt->ip4hdr_info.fragment) {
598 trace_net_rx_pkt_l4_csum_validate_ip4_fragment();
599 return false;
600 }
601
602 switch (pkt->l4hdr_info.proto) {
603 case ETH_L4_HDR_PROTO_UDP:
604 if (pkt->l4hdr_info.hdr.udp.uh_sum == 0) {
605 trace_net_rx_pkt_l4_csum_validate_udp_with_no_checksum();
606 return false;
607 }
608 /* fall through */
609 case ETH_L4_HDR_PROTO_TCP:
610 csum = _net_rx_pkt_calc_l4_csum(pkt);
611 *csum_valid = ((csum == 0) || (csum == 0xFFFF));
612 break;
613
614 case ETH_L4_HDR_PROTO_SCTP:
615 *csum_valid = _net_rx_pkt_validate_sctp_sum(pkt);
616 break;
617
618 default:
619 trace_net_rx_pkt_l4_csum_validate_not_xxp();
620 return false;
621 }
622
623 trace_net_rx_pkt_l4_csum_validate_csum(*csum_valid);
624
625 return true;
626 }
627
net_rx_pkt_fix_l4_csum(struct NetRxPkt * pkt)628 bool net_rx_pkt_fix_l4_csum(struct NetRxPkt *pkt)
629 {
630 uint16_t csum = 0;
631 uint32_t l4_cso;
632
633 trace_net_rx_pkt_l4_csum_fix_entry();
634
635 switch (pkt->l4hdr_info.proto) {
636 case ETH_L4_HDR_PROTO_TCP:
637 l4_cso = offsetof(struct tcp_header, th_sum);
638 trace_net_rx_pkt_l4_csum_fix_tcp(l4_cso);
639 break;
640
641 case ETH_L4_HDR_PROTO_UDP:
642 if (pkt->l4hdr_info.hdr.udp.uh_sum == 0) {
643 trace_net_rx_pkt_l4_csum_fix_udp_with_no_checksum();
644 return false;
645 }
646 l4_cso = offsetof(struct udp_header, uh_sum);
647 trace_net_rx_pkt_l4_csum_fix_udp(l4_cso);
648 break;
649
650 default:
651 trace_net_rx_pkt_l4_csum_fix_not_xxp();
652 return false;
653 }
654
655 if (pkt->hasip4 && pkt->ip4hdr_info.fragment) {
656 trace_net_rx_pkt_l4_csum_fix_ip4_fragment();
657 return false;
658 }
659
660 /* Set zero to checksum word */
661 iov_from_buf(pkt->vec, pkt->vec_len,
662 pkt->l4hdr_off + l4_cso,
663 &csum, sizeof(csum));
664
665 /* Calculate L4 checksum */
666 csum = cpu_to_be16(_net_rx_pkt_calc_l4_csum(pkt));
667
668 /* Set calculated checksum to checksum word */
669 iov_from_buf(pkt->vec, pkt->vec_len,
670 pkt->l4hdr_off + l4_cso,
671 &csum, sizeof(csum));
672
673 trace_net_rx_pkt_l4_csum_fix_csum(pkt->l4hdr_off + l4_cso, csum);
674
675 return true;
676 }
677