1 /*
2 * QEMU RX packets abstractions
3 *
4 * Copyright (c) 2012 Ravello Systems LTD (http://ravellosystems.com)
5 *
6 * Developed by Daynix Computing LTD (http://www.daynix.com)
7 *
8 * Authors:
9 * Dmitry Fleytman <dmitry@daynix.com>
10 * Tamir Shomer <tamirs@daynix.com>
11 * Yan Vugenfirer <yan@daynix.com>
12 *
13 * This work is licensed under the terms of the GNU GPL, version 2 or later.
14 * See the COPYING file in the top-level directory.
15 *
16 */
17
18 #include "qemu/osdep.h"
19 #include "qemu/crc32c.h"
20 #include "trace.h"
21 #include "net_rx_pkt.h"
22 #include "net/checksum.h"
23 #include "net/tap.h"
24
25 struct NetRxPkt {
26 struct virtio_net_hdr virt_hdr;
27 struct {
28 struct eth_header eth;
29 struct vlan_header vlan;
30 } ehdr_buf;
31 struct iovec *vec;
32 uint16_t vec_len_total;
33 uint16_t vec_len;
34 uint32_t tot_len;
35 uint16_t tci;
36 size_t ehdr_buf_len;
37 eth_pkt_types_e packet_type;
38
39 /* Analysis results */
40 bool hasip4;
41 bool hasip6;
42
43 size_t l3hdr_off;
44 size_t l4hdr_off;
45 size_t l5hdr_off;
46
47 eth_ip6_hdr_info ip6hdr_info;
48 eth_ip4_hdr_info ip4hdr_info;
49 eth_l4_hdr_info l4hdr_info;
50 };
51
net_rx_pkt_init(struct NetRxPkt ** pkt)52 void net_rx_pkt_init(struct NetRxPkt **pkt)
53 {
54 struct NetRxPkt *p = g_malloc0(sizeof *p);
55 p->vec = NULL;
56 p->vec_len_total = 0;
57 *pkt = p;
58 }
59
net_rx_pkt_uninit(struct NetRxPkt * pkt)60 void net_rx_pkt_uninit(struct NetRxPkt *pkt)
61 {
62 if (pkt->vec_len_total != 0) {
63 g_free(pkt->vec);
64 }
65
66 g_free(pkt);
67 }
68
net_rx_pkt_get_vhdr(struct NetRxPkt * pkt)69 struct virtio_net_hdr *net_rx_pkt_get_vhdr(struct NetRxPkt *pkt)
70 {
71 assert(pkt);
72 return &pkt->virt_hdr;
73 }
74
75 static inline void
net_rx_pkt_iovec_realloc(struct NetRxPkt * pkt,int new_iov_len)76 net_rx_pkt_iovec_realloc(struct NetRxPkt *pkt,
77 int new_iov_len)
78 {
79 if (pkt->vec_len_total < new_iov_len) {
80 g_free(pkt->vec);
81 pkt->vec = g_malloc(sizeof(*pkt->vec) * new_iov_len);
82 pkt->vec_len_total = new_iov_len;
83 }
84 }
85
86 static void
net_rx_pkt_pull_data(struct NetRxPkt * pkt,const struct iovec * iov,int iovcnt,size_t ploff)87 net_rx_pkt_pull_data(struct NetRxPkt *pkt,
88 const struct iovec *iov, int iovcnt,
89 size_t ploff)
90 {
91 uint32_t pllen = iov_size(iov, iovcnt) - ploff;
92
93 if (pkt->ehdr_buf_len) {
94 net_rx_pkt_iovec_realloc(pkt, iovcnt + 1);
95
96 pkt->vec[0].iov_base = &pkt->ehdr_buf;
97 pkt->vec[0].iov_len = pkt->ehdr_buf_len;
98
99 pkt->tot_len = pllen + pkt->ehdr_buf_len;
100 pkt->vec_len = iov_copy(pkt->vec + 1, pkt->vec_len_total - 1,
101 iov, iovcnt, ploff, pllen) + 1;
102 } else {
103 net_rx_pkt_iovec_realloc(pkt, iovcnt);
104
105 pkt->tot_len = pllen;
106 pkt->vec_len = iov_copy(pkt->vec, pkt->vec_len_total,
107 iov, iovcnt, ploff, pkt->tot_len);
108 }
109
110 eth_get_protocols(pkt->vec, pkt->vec_len, 0, &pkt->hasip4, &pkt->hasip6,
111 &pkt->l3hdr_off, &pkt->l4hdr_off, &pkt->l5hdr_off,
112 &pkt->ip6hdr_info, &pkt->ip4hdr_info, &pkt->l4hdr_info);
113
114 trace_net_rx_pkt_parsed(pkt->hasip4, pkt->hasip6, pkt->l4hdr_info.proto,
115 pkt->l3hdr_off, pkt->l4hdr_off, pkt->l5hdr_off);
116 }
117
net_rx_pkt_attach_iovec(struct NetRxPkt * pkt,const struct iovec * iov,int iovcnt,size_t iovoff,bool strip_vlan)118 void net_rx_pkt_attach_iovec(struct NetRxPkt *pkt,
119 const struct iovec *iov, int iovcnt,
120 size_t iovoff, bool strip_vlan)
121 {
122 uint16_t tci = 0;
123 uint16_t ploff = iovoff;
124 assert(pkt);
125
126 if (strip_vlan) {
127 pkt->ehdr_buf_len = eth_strip_vlan(iov, iovcnt, iovoff, &pkt->ehdr_buf,
128 &ploff, &tci);
129 } else {
130 pkt->ehdr_buf_len = 0;
131 }
132
133 pkt->tci = tci;
134
135 net_rx_pkt_pull_data(pkt, iov, iovcnt, ploff);
136 }
137
net_rx_pkt_attach_iovec_ex(struct NetRxPkt * pkt,const struct iovec * iov,int iovcnt,size_t iovoff,int strip_vlan_index,uint16_t vet,uint16_t vet_ext)138 void net_rx_pkt_attach_iovec_ex(struct NetRxPkt *pkt,
139 const struct iovec *iov, int iovcnt,
140 size_t iovoff, int strip_vlan_index,
141 uint16_t vet, uint16_t vet_ext)
142 {
143 uint16_t tci = 0;
144 uint16_t ploff = iovoff;
145 assert(pkt);
146
147 pkt->ehdr_buf_len = eth_strip_vlan_ex(iov, iovcnt, iovoff,
148 strip_vlan_index, vet, vet_ext,
149 &pkt->ehdr_buf,
150 &ploff, &tci);
151
152 pkt->tci = tci;
153
154 net_rx_pkt_pull_data(pkt, iov, iovcnt, ploff);
155 }
156
net_rx_pkt_dump(struct NetRxPkt * pkt)157 void net_rx_pkt_dump(struct NetRxPkt *pkt)
158 {
159 #ifdef NET_RX_PKT_DEBUG
160 assert(pkt);
161
162 printf("RX PKT: tot_len: %d, ehdr_buf_len: %lu, vlan_tag: %d\n",
163 pkt->tot_len, pkt->ehdr_buf_len, pkt->tci);
164 #endif
165 }
166
net_rx_pkt_set_packet_type(struct NetRxPkt * pkt,eth_pkt_types_e packet_type)167 void net_rx_pkt_set_packet_type(struct NetRxPkt *pkt,
168 eth_pkt_types_e packet_type)
169 {
170 assert(pkt);
171
172 pkt->packet_type = packet_type;
173
174 }
175
net_rx_pkt_get_packet_type(struct NetRxPkt * pkt)176 eth_pkt_types_e net_rx_pkt_get_packet_type(struct NetRxPkt *pkt)
177 {
178 assert(pkt);
179
180 return pkt->packet_type;
181 }
182
net_rx_pkt_get_total_len(struct NetRxPkt * pkt)183 size_t net_rx_pkt_get_total_len(struct NetRxPkt *pkt)
184 {
185 assert(pkt);
186
187 return pkt->tot_len;
188 }
189
net_rx_pkt_set_protocols(struct NetRxPkt * pkt,const struct iovec * iov,size_t iovcnt,size_t iovoff)190 void net_rx_pkt_set_protocols(struct NetRxPkt *pkt,
191 const struct iovec *iov, size_t iovcnt,
192 size_t iovoff)
193 {
194 assert(pkt);
195
196 eth_get_protocols(iov, iovcnt, iovoff, &pkt->hasip4, &pkt->hasip6,
197 &pkt->l3hdr_off, &pkt->l4hdr_off, &pkt->l5hdr_off,
198 &pkt->ip6hdr_info, &pkt->ip4hdr_info, &pkt->l4hdr_info);
199 }
200
net_rx_pkt_get_protocols(struct NetRxPkt * pkt,bool * hasip4,bool * hasip6,EthL4HdrProto * l4hdr_proto)201 void net_rx_pkt_get_protocols(struct NetRxPkt *pkt,
202 bool *hasip4, bool *hasip6,
203 EthL4HdrProto *l4hdr_proto)
204 {
205 assert(pkt);
206
207 *hasip4 = pkt->hasip4;
208 *hasip6 = pkt->hasip6;
209 *l4hdr_proto = pkt->l4hdr_info.proto;
210 }
211
net_rx_pkt_get_l4_hdr_offset(struct NetRxPkt * pkt)212 size_t net_rx_pkt_get_l4_hdr_offset(struct NetRxPkt *pkt)
213 {
214 assert(pkt);
215 return pkt->l4hdr_off;
216 }
217
net_rx_pkt_get_l5_hdr_offset(struct NetRxPkt * pkt)218 size_t net_rx_pkt_get_l5_hdr_offset(struct NetRxPkt *pkt)
219 {
220 assert(pkt);
221 return pkt->l5hdr_off;
222 }
223
net_rx_pkt_get_ip6_info(struct NetRxPkt * pkt)224 eth_ip6_hdr_info *net_rx_pkt_get_ip6_info(struct NetRxPkt *pkt)
225 {
226 return &pkt->ip6hdr_info;
227 }
228
net_rx_pkt_get_ip4_info(struct NetRxPkt * pkt)229 eth_ip4_hdr_info *net_rx_pkt_get_ip4_info(struct NetRxPkt *pkt)
230 {
231 return &pkt->ip4hdr_info;
232 }
233
234 static inline void
_net_rx_rss_add_chunk(uint8_t * rss_input,size_t * bytes_written,void * ptr,size_t size)235 _net_rx_rss_add_chunk(uint8_t *rss_input, size_t *bytes_written,
236 void *ptr, size_t size)
237 {
238 memcpy(&rss_input[*bytes_written], ptr, size);
239 trace_net_rx_pkt_rss_add_chunk(ptr, size, *bytes_written);
240 *bytes_written += size;
241 }
242
243 static inline void
_net_rx_rss_prepare_ip4(uint8_t * rss_input,struct NetRxPkt * pkt,size_t * bytes_written)244 _net_rx_rss_prepare_ip4(uint8_t *rss_input,
245 struct NetRxPkt *pkt,
246 size_t *bytes_written)
247 {
248 struct ip_header *ip4_hdr = &pkt->ip4hdr_info.ip4_hdr;
249
250 _net_rx_rss_add_chunk(rss_input, bytes_written,
251 &ip4_hdr->ip_src, sizeof(uint32_t));
252
253 _net_rx_rss_add_chunk(rss_input, bytes_written,
254 &ip4_hdr->ip_dst, sizeof(uint32_t));
255 }
256
257 static inline void
_net_rx_rss_prepare_ip6(uint8_t * rss_input,struct NetRxPkt * pkt,bool ipv6ex,size_t * bytes_written)258 _net_rx_rss_prepare_ip6(uint8_t *rss_input,
259 struct NetRxPkt *pkt,
260 bool ipv6ex, size_t *bytes_written)
261 {
262 eth_ip6_hdr_info *ip6info = &pkt->ip6hdr_info;
263
264 _net_rx_rss_add_chunk(rss_input, bytes_written,
265 (ipv6ex && ip6info->rss_ex_src_valid) ? &ip6info->rss_ex_src
266 : &ip6info->ip6_hdr.ip6_src,
267 sizeof(struct in6_address));
268
269 _net_rx_rss_add_chunk(rss_input, bytes_written,
270 (ipv6ex && ip6info->rss_ex_dst_valid) ? &ip6info->rss_ex_dst
271 : &ip6info->ip6_hdr.ip6_dst,
272 sizeof(struct in6_address));
273 }
274
275 static inline void
_net_rx_rss_prepare_tcp(uint8_t * rss_input,struct NetRxPkt * pkt,size_t * bytes_written)276 _net_rx_rss_prepare_tcp(uint8_t *rss_input,
277 struct NetRxPkt *pkt,
278 size_t *bytes_written)
279 {
280 struct tcp_header *tcphdr = &pkt->l4hdr_info.hdr.tcp;
281
282 _net_rx_rss_add_chunk(rss_input, bytes_written,
283 &tcphdr->th_sport, sizeof(uint16_t));
284
285 _net_rx_rss_add_chunk(rss_input, bytes_written,
286 &tcphdr->th_dport, sizeof(uint16_t));
287 }
288
289 static inline void
_net_rx_rss_prepare_udp(uint8_t * rss_input,struct NetRxPkt * pkt,size_t * bytes_written)290 _net_rx_rss_prepare_udp(uint8_t *rss_input,
291 struct NetRxPkt *pkt,
292 size_t *bytes_written)
293 {
294 struct udp_header *udphdr = &pkt->l4hdr_info.hdr.udp;
295
296 _net_rx_rss_add_chunk(rss_input, bytes_written,
297 &udphdr->uh_sport, sizeof(uint16_t));
298
299 _net_rx_rss_add_chunk(rss_input, bytes_written,
300 &udphdr->uh_dport, sizeof(uint16_t));
301 }
302
303 uint32_t
net_rx_pkt_calc_rss_hash(struct NetRxPkt * pkt,NetRxPktRssType type,uint8_t * key)304 net_rx_pkt_calc_rss_hash(struct NetRxPkt *pkt,
305 NetRxPktRssType type,
306 uint8_t *key)
307 {
308 uint8_t rss_input[36];
309 size_t rss_length = 0;
310 uint32_t rss_hash = 0;
311 net_toeplitz_key key_data;
312
313 switch (type) {
314 case NetPktRssIpV4:
315 assert(pkt->hasip4);
316 trace_net_rx_pkt_rss_ip4();
317 _net_rx_rss_prepare_ip4(&rss_input[0], pkt, &rss_length);
318 break;
319 case NetPktRssIpV4Tcp:
320 assert(pkt->hasip4);
321 assert(pkt->l4hdr_info.proto == ETH_L4_HDR_PROTO_TCP);
322 trace_net_rx_pkt_rss_ip4_tcp();
323 _net_rx_rss_prepare_ip4(&rss_input[0], pkt, &rss_length);
324 _net_rx_rss_prepare_tcp(&rss_input[0], pkt, &rss_length);
325 break;
326 case NetPktRssIpV6Tcp:
327 assert(pkt->hasip6);
328 assert(pkt->l4hdr_info.proto == ETH_L4_HDR_PROTO_TCP);
329 trace_net_rx_pkt_rss_ip6_tcp();
330 _net_rx_rss_prepare_ip6(&rss_input[0], pkt, false, &rss_length);
331 _net_rx_rss_prepare_tcp(&rss_input[0], pkt, &rss_length);
332 break;
333 case NetPktRssIpV6:
334 assert(pkt->hasip6);
335 trace_net_rx_pkt_rss_ip6();
336 _net_rx_rss_prepare_ip6(&rss_input[0], pkt, false, &rss_length);
337 break;
338 case NetPktRssIpV6Ex:
339 assert(pkt->hasip6);
340 trace_net_rx_pkt_rss_ip6_ex();
341 _net_rx_rss_prepare_ip6(&rss_input[0], pkt, true, &rss_length);
342 break;
343 case NetPktRssIpV6TcpEx:
344 assert(pkt->hasip6);
345 assert(pkt->l4hdr_info.proto == ETH_L4_HDR_PROTO_TCP);
346 trace_net_rx_pkt_rss_ip6_ex_tcp();
347 _net_rx_rss_prepare_ip6(&rss_input[0], pkt, true, &rss_length);
348 _net_rx_rss_prepare_tcp(&rss_input[0], pkt, &rss_length);
349 break;
350 case NetPktRssIpV4Udp:
351 assert(pkt->hasip4);
352 assert(pkt->l4hdr_info.proto == ETH_L4_HDR_PROTO_UDP);
353 trace_net_rx_pkt_rss_ip4_udp();
354 _net_rx_rss_prepare_ip4(&rss_input[0], pkt, &rss_length);
355 _net_rx_rss_prepare_udp(&rss_input[0], pkt, &rss_length);
356 break;
357 case NetPktRssIpV6Udp:
358 assert(pkt->hasip6);
359 assert(pkt->l4hdr_info.proto == ETH_L4_HDR_PROTO_UDP);
360 trace_net_rx_pkt_rss_ip6_udp();
361 _net_rx_rss_prepare_ip6(&rss_input[0], pkt, false, &rss_length);
362 _net_rx_rss_prepare_udp(&rss_input[0], pkt, &rss_length);
363 break;
364 case NetPktRssIpV6UdpEx:
365 assert(pkt->hasip6);
366 assert(pkt->l4hdr_info.proto == ETH_L4_HDR_PROTO_UDP);
367 trace_net_rx_pkt_rss_ip6_ex_udp();
368 _net_rx_rss_prepare_ip6(&rss_input[0], pkt, true, &rss_length);
369 _net_rx_rss_prepare_udp(&rss_input[0], pkt, &rss_length);
370 break;
371 default:
372 g_assert_not_reached();
373 }
374
375 net_toeplitz_key_init(&key_data, key);
376 net_toeplitz_add(&rss_hash, rss_input, rss_length, &key_data);
377
378 trace_net_rx_pkt_rss_hash(rss_length, rss_hash);
379
380 return rss_hash;
381 }
382
net_rx_pkt_get_ip_id(struct NetRxPkt * pkt)383 uint16_t net_rx_pkt_get_ip_id(struct NetRxPkt *pkt)
384 {
385 assert(pkt);
386
387 if (pkt->hasip4) {
388 return be16_to_cpu(pkt->ip4hdr_info.ip4_hdr.ip_id);
389 }
390
391 return 0;
392 }
393
net_rx_pkt_is_tcp_ack(struct NetRxPkt * pkt)394 bool net_rx_pkt_is_tcp_ack(struct NetRxPkt *pkt)
395 {
396 assert(pkt);
397
398 if (pkt->l4hdr_info.proto == ETH_L4_HDR_PROTO_TCP) {
399 return TCP_HEADER_FLAGS(&pkt->l4hdr_info.hdr.tcp) & TCP_FLAG_ACK;
400 }
401
402 return false;
403 }
404
net_rx_pkt_has_tcp_data(struct NetRxPkt * pkt)405 bool net_rx_pkt_has_tcp_data(struct NetRxPkt *pkt)
406 {
407 assert(pkt);
408
409 if (pkt->l4hdr_info.proto == ETH_L4_HDR_PROTO_TCP) {
410 return pkt->l4hdr_info.has_tcp_data;
411 }
412
413 return false;
414 }
415
net_rx_pkt_get_iovec(struct NetRxPkt * pkt)416 struct iovec *net_rx_pkt_get_iovec(struct NetRxPkt *pkt)
417 {
418 assert(pkt);
419
420 return pkt->vec;
421 }
422
net_rx_pkt_set_vhdr(struct NetRxPkt * pkt,struct virtio_net_hdr * vhdr)423 void net_rx_pkt_set_vhdr(struct NetRxPkt *pkt,
424 struct virtio_net_hdr *vhdr)
425 {
426 assert(pkt);
427
428 memcpy(&pkt->virt_hdr, vhdr, sizeof pkt->virt_hdr);
429 }
430
net_rx_pkt_set_vhdr_iovec(struct NetRxPkt * pkt,const struct iovec * iov,int iovcnt)431 void net_rx_pkt_set_vhdr_iovec(struct NetRxPkt *pkt,
432 const struct iovec *iov, int iovcnt)
433 {
434 assert(pkt);
435
436 iov_to_buf(iov, iovcnt, 0, &pkt->virt_hdr, sizeof pkt->virt_hdr);
437 }
438
net_rx_pkt_unset_vhdr(struct NetRxPkt * pkt)439 void net_rx_pkt_unset_vhdr(struct NetRxPkt *pkt)
440 {
441 assert(pkt);
442
443 memset(&pkt->virt_hdr, 0, sizeof(pkt->virt_hdr));
444 }
445
net_rx_pkt_is_vlan_stripped(struct NetRxPkt * pkt)446 bool net_rx_pkt_is_vlan_stripped(struct NetRxPkt *pkt)
447 {
448 assert(pkt);
449
450 return pkt->ehdr_buf_len ? true : false;
451 }
452
net_rx_pkt_get_vlan_tag(struct NetRxPkt * pkt)453 uint16_t net_rx_pkt_get_vlan_tag(struct NetRxPkt *pkt)
454 {
455 assert(pkt);
456
457 return pkt->tci;
458 }
459
net_rx_pkt_validate_l3_csum(struct NetRxPkt * pkt,bool * csum_valid)460 bool net_rx_pkt_validate_l3_csum(struct NetRxPkt *pkt, bool *csum_valid)
461 {
462 uint32_t cntr;
463 uint16_t csum;
464 uint32_t csl;
465
466 trace_net_rx_pkt_l3_csum_validate_entry();
467
468 if (!pkt->hasip4) {
469 trace_net_rx_pkt_l3_csum_validate_not_ip4();
470 return false;
471 }
472
473 csl = pkt->l4hdr_off - pkt->l3hdr_off;
474
475 cntr = net_checksum_add_iov(pkt->vec, pkt->vec_len,
476 pkt->l3hdr_off,
477 csl, 0);
478
479 csum = net_checksum_finish(cntr);
480
481 *csum_valid = (csum == 0);
482
483 trace_net_rx_pkt_l3_csum_validate_csum(pkt->l3hdr_off, csl,
484 cntr, csum, *csum_valid);
485
486 return true;
487 }
488
489 static uint16_t
_net_rx_pkt_calc_l4_csum(struct NetRxPkt * pkt)490 _net_rx_pkt_calc_l4_csum(struct NetRxPkt *pkt)
491 {
492 uint32_t cntr;
493 uint16_t csum;
494 uint16_t csl;
495 uint32_t cso;
496
497 trace_net_rx_pkt_l4_csum_calc_entry();
498
499 if (pkt->hasip4) {
500 if (pkt->l4hdr_info.proto == ETH_L4_HDR_PROTO_UDP) {
501 csl = be16_to_cpu(pkt->l4hdr_info.hdr.udp.uh_ulen);
502 trace_net_rx_pkt_l4_csum_calc_ip4_udp();
503 } else {
504 csl = be16_to_cpu(pkt->ip4hdr_info.ip4_hdr.ip_len) -
505 IP_HDR_GET_LEN(&pkt->ip4hdr_info.ip4_hdr);
506 trace_net_rx_pkt_l4_csum_calc_ip4_tcp();
507 }
508
509 cntr = eth_calc_ip4_pseudo_hdr_csum(&pkt->ip4hdr_info.ip4_hdr,
510 csl, &cso);
511 trace_net_rx_pkt_l4_csum_calc_ph_csum(cntr, csl);
512 } else {
513 if (pkt->l4hdr_info.proto == ETH_L4_HDR_PROTO_UDP) {
514 csl = be16_to_cpu(pkt->l4hdr_info.hdr.udp.uh_ulen);
515 trace_net_rx_pkt_l4_csum_calc_ip6_udp();
516 } else {
517 struct ip6_header *ip6hdr = &pkt->ip6hdr_info.ip6_hdr;
518 size_t full_ip6hdr_len = pkt->l4hdr_off - pkt->l3hdr_off;
519 size_t ip6opts_len = full_ip6hdr_len - sizeof(struct ip6_header);
520
521 csl = be16_to_cpu(ip6hdr->ip6_ctlun.ip6_un1.ip6_un1_plen) -
522 ip6opts_len;
523 trace_net_rx_pkt_l4_csum_calc_ip6_tcp();
524 }
525
526 cntr = eth_calc_ip6_pseudo_hdr_csum(&pkt->ip6hdr_info.ip6_hdr, csl,
527 pkt->ip6hdr_info.l4proto, &cso);
528 trace_net_rx_pkt_l4_csum_calc_ph_csum(cntr, csl);
529 }
530
531 cntr += net_checksum_add_iov(pkt->vec, pkt->vec_len,
532 pkt->l4hdr_off, csl, cso);
533
534 csum = net_checksum_finish_nozero(cntr);
535
536 trace_net_rx_pkt_l4_csum_calc_csum(pkt->l4hdr_off, csl, cntr, csum);
537
538 return csum;
539 }
540
541 static bool
_net_rx_pkt_validate_sctp_sum(struct NetRxPkt * pkt)542 _net_rx_pkt_validate_sctp_sum(struct NetRxPkt *pkt)
543 {
544 size_t csum_off;
545 size_t off = pkt->l4hdr_off;
546 size_t vec_len = pkt->vec_len;
547 struct iovec *vec;
548 uint32_t calculated = 0;
549 uint32_t original;
550 bool valid;
551
552 for (vec = pkt->vec; vec->iov_len < off; vec++) {
553 off -= vec->iov_len;
554 vec_len--;
555 }
556
557 csum_off = off + 8;
558
559 if (!iov_to_buf(vec, vec_len, csum_off, &original, sizeof(original))) {
560 return false;
561 }
562
563 if (!iov_from_buf(vec, vec_len, csum_off,
564 &calculated, sizeof(calculated))) {
565 return false;
566 }
567
568 calculated = crc32c(0xffffffff,
569 (uint8_t *)vec->iov_base + off, vec->iov_len - off);
570 calculated = iov_crc32c(calculated ^ 0xffffffff, vec + 1, vec_len - 1);
571 valid = calculated == le32_to_cpu(original);
572 iov_from_buf(vec, vec_len, csum_off, &original, sizeof(original));
573
574 return valid;
575 }
576
net_rx_pkt_validate_l4_csum(struct NetRxPkt * pkt,bool * csum_valid)577 bool net_rx_pkt_validate_l4_csum(struct NetRxPkt *pkt, bool *csum_valid)
578 {
579 uint32_t csum;
580
581 trace_net_rx_pkt_l4_csum_validate_entry();
582
583 if (pkt->hasip4 && pkt->ip4hdr_info.fragment) {
584 trace_net_rx_pkt_l4_csum_validate_ip4_fragment();
585 return false;
586 }
587
588 switch (pkt->l4hdr_info.proto) {
589 case ETH_L4_HDR_PROTO_UDP:
590 if (pkt->l4hdr_info.hdr.udp.uh_sum == 0) {
591 trace_net_rx_pkt_l4_csum_validate_udp_with_no_checksum();
592 return false;
593 }
594 /* fall through */
595 case ETH_L4_HDR_PROTO_TCP:
596 csum = _net_rx_pkt_calc_l4_csum(pkt);
597 *csum_valid = ((csum == 0) || (csum == 0xFFFF));
598 break;
599
600 case ETH_L4_HDR_PROTO_SCTP:
601 *csum_valid = _net_rx_pkt_validate_sctp_sum(pkt);
602 break;
603
604 default:
605 trace_net_rx_pkt_l4_csum_validate_not_xxp();
606 return false;
607 }
608
609 trace_net_rx_pkt_l4_csum_validate_csum(*csum_valid);
610
611 return true;
612 }
613
net_rx_pkt_fix_l4_csum(struct NetRxPkt * pkt)614 bool net_rx_pkt_fix_l4_csum(struct NetRxPkt *pkt)
615 {
616 uint16_t csum = 0;
617 uint32_t l4_cso;
618
619 trace_net_rx_pkt_l4_csum_fix_entry();
620
621 switch (pkt->l4hdr_info.proto) {
622 case ETH_L4_HDR_PROTO_TCP:
623 l4_cso = offsetof(struct tcp_header, th_sum);
624 trace_net_rx_pkt_l4_csum_fix_tcp(l4_cso);
625 break;
626
627 case ETH_L4_HDR_PROTO_UDP:
628 if (pkt->l4hdr_info.hdr.udp.uh_sum == 0) {
629 trace_net_rx_pkt_l4_csum_fix_udp_with_no_checksum();
630 return false;
631 }
632 l4_cso = offsetof(struct udp_header, uh_sum);
633 trace_net_rx_pkt_l4_csum_fix_udp(l4_cso);
634 break;
635
636 default:
637 trace_net_rx_pkt_l4_csum_fix_not_xxp();
638 return false;
639 }
640
641 if (pkt->hasip4 && pkt->ip4hdr_info.fragment) {
642 trace_net_rx_pkt_l4_csum_fix_ip4_fragment();
643 return false;
644 }
645
646 /* Set zero to checksum word */
647 iov_from_buf(pkt->vec, pkt->vec_len,
648 pkt->l4hdr_off + l4_cso,
649 &csum, sizeof(csum));
650
651 /* Calculate L4 checksum */
652 csum = cpu_to_be16(_net_rx_pkt_calc_l4_csum(pkt));
653
654 /* Set calculated checksum to checksum word */
655 iov_from_buf(pkt->vec, pkt->vec_len,
656 pkt->l4hdr_off + l4_cso,
657 &csum, sizeof(csum));
658
659 trace_net_rx_pkt_l4_csum_fix_csum(pkt->l4hdr_off + l4_cso, csum);
660
661 return true;
662 }
663