1 /* 2 * QEMU network structures definitions and helper functions 3 * 4 * Copyright (c) 2012 Ravello Systems LTD (http://ravellosystems.com) 5 * 6 * Developed by Daynix Computing LTD (http://www.daynix.com) 7 * 8 * Authors: 9 * Dmitry Fleytman <dmitry@daynix.com> 10 * Tamir Shomer <tamirs@daynix.com> 11 * Yan Vugenfirer <yan@daynix.com> 12 * 13 * This work is licensed under the terms of the GNU GPL, version 2 or later. 14 * See the COPYING file in the top-level directory. 15 * 16 */ 17 18 #include "net/eth.h" 19 #include "net/checksum.h" 20 #include "qemu-common.h" 21 #include "net/tap.h" 22 23 void eth_setup_vlan_headers(struct eth_header *ehdr, uint16_t vlan_tag, 24 bool *is_new) 25 { 26 struct vlan_header *vhdr = PKT_GET_VLAN_HDR(ehdr); 27 28 switch (be16_to_cpu(ehdr->h_proto)) { 29 case ETH_P_VLAN: 30 case ETH_P_DVLAN: 31 /* vlan hdr exists */ 32 *is_new = false; 33 break; 34 35 default: 36 /* No VLAN header, put a new one */ 37 vhdr->h_proto = ehdr->h_proto; 38 ehdr->h_proto = cpu_to_be16(ETH_P_VLAN); 39 *is_new = true; 40 break; 41 } 42 vhdr->h_tci = cpu_to_be16(vlan_tag); 43 } 44 45 uint8_t 46 eth_get_gso_type(uint16_t l3_proto, uint8_t *l3_hdr, uint8_t l4proto) 47 { 48 uint8_t ecn_state = 0; 49 50 if (l3_proto == ETH_P_IP) { 51 struct ip_header *iphdr = (struct ip_header *) l3_hdr; 52 53 if (IP_HEADER_VERSION(iphdr) == IP_HEADER_VERSION_4) { 54 if (IPTOS_ECN(iphdr->ip_tos) == IPTOS_ECN_CE) { 55 ecn_state = VIRTIO_NET_HDR_GSO_ECN; 56 } 57 if (l4proto == IP_PROTO_TCP) { 58 return VIRTIO_NET_HDR_GSO_TCPV4 | ecn_state; 59 } else if (l4proto == IP_PROTO_UDP) { 60 return VIRTIO_NET_HDR_GSO_UDP | ecn_state; 61 } 62 } 63 } else if (l3_proto == ETH_P_IPV6) { 64 struct ip6_header *ip6hdr = (struct ip6_header *) l3_hdr; 65 66 if (IP6_ECN(ip6hdr->ip6_ecn_acc) == IP6_ECN_CE) { 67 ecn_state = VIRTIO_NET_HDR_GSO_ECN; 68 } 69 70 if (l4proto == IP_PROTO_TCP) { 71 return VIRTIO_NET_HDR_GSO_TCPV6 | ecn_state; 72 } 73 } 74 75 /* Unsupported offload */ 76 assert(false); 77 78 return VIRTIO_NET_HDR_GSO_NONE | ecn_state; 79 } 80 81 void eth_get_protocols(const uint8_t *headers, 82 uint32_t hdr_length, 83 bool *isip4, bool *isip6, 84 bool *isudp, bool *istcp) 85 { 86 int proto; 87 size_t l2hdr_len = eth_get_l2_hdr_length(headers); 88 assert(hdr_length >= eth_get_l2_hdr_length(headers)); 89 *isip4 = *isip6 = *isudp = *istcp = false; 90 91 proto = eth_get_l3_proto(headers, l2hdr_len); 92 if (proto == ETH_P_IP) { 93 *isip4 = true; 94 95 struct ip_header *iphdr; 96 97 assert(hdr_length >= 98 eth_get_l2_hdr_length(headers) + sizeof(struct ip_header)); 99 100 iphdr = PKT_GET_IP_HDR(headers); 101 102 if (IP_HEADER_VERSION(iphdr) == IP_HEADER_VERSION_4) { 103 if (iphdr->ip_p == IP_PROTO_TCP) { 104 *istcp = true; 105 } else if (iphdr->ip_p == IP_PROTO_UDP) { 106 *isudp = true; 107 } 108 } 109 } else if (proto == ETH_P_IPV6) { 110 uint8_t l4proto; 111 size_t full_ip6hdr_len; 112 113 struct iovec hdr_vec; 114 hdr_vec.iov_base = (void *) headers; 115 hdr_vec.iov_len = hdr_length; 116 117 *isip6 = true; 118 if (eth_parse_ipv6_hdr(&hdr_vec, 1, l2hdr_len, 119 &l4proto, &full_ip6hdr_len)) { 120 if (l4proto == IP_PROTO_TCP) { 121 *istcp = true; 122 } else if (l4proto == IP_PROTO_UDP) { 123 *isudp = true; 124 } 125 } 126 } 127 } 128 129 void 130 eth_setup_ip4_fragmentation(const void *l2hdr, size_t l2hdr_len, 131 void *l3hdr, size_t l3hdr_len, 132 size_t l3payload_len, 133 size_t frag_offset, bool more_frags) 134 { 135 if (eth_get_l3_proto(l2hdr, l2hdr_len) == ETH_P_IP) { 136 uint16_t orig_flags; 137 struct ip_header *iphdr = (struct ip_header *) l3hdr; 138 uint16_t frag_off_units = frag_offset / IP_FRAG_UNIT_SIZE; 139 uint16_t new_ip_off; 140 141 assert(frag_offset % IP_FRAG_UNIT_SIZE == 0); 142 assert((frag_off_units & ~IP_OFFMASK) == 0); 143 144 orig_flags = be16_to_cpu(iphdr->ip_off) & ~(IP_OFFMASK|IP_MF); 145 new_ip_off = frag_off_units | orig_flags | (more_frags ? IP_MF : 0); 146 iphdr->ip_off = cpu_to_be16(new_ip_off); 147 iphdr->ip_len = cpu_to_be16(l3payload_len + l3hdr_len); 148 } 149 } 150 151 void 152 eth_fix_ip4_checksum(void *l3hdr, size_t l3hdr_len) 153 { 154 struct ip_header *iphdr = (struct ip_header *) l3hdr; 155 iphdr->ip_sum = 0; 156 iphdr->ip_sum = cpu_to_be16(net_raw_checksum(l3hdr, l3hdr_len)); 157 } 158 159 uint32_t 160 eth_calc_pseudo_hdr_csum(struct ip_header *iphdr, uint16_t csl) 161 { 162 struct ip_pseudo_header ipph; 163 ipph.ip_src = iphdr->ip_src; 164 ipph.ip_dst = iphdr->ip_dst; 165 ipph.ip_payload = cpu_to_be16(csl); 166 ipph.ip_proto = iphdr->ip_p; 167 ipph.zeros = 0; 168 return net_checksum_add(sizeof(ipph), (uint8_t *) &ipph); 169 } 170 171 static bool 172 eth_is_ip6_extension_header_type(uint8_t hdr_type) 173 { 174 switch (hdr_type) { 175 case IP6_HOP_BY_HOP: 176 case IP6_ROUTING: 177 case IP6_FRAGMENT: 178 case IP6_ESP: 179 case IP6_AUTHENTICATION: 180 case IP6_DESTINATON: 181 case IP6_MOBILITY: 182 return true; 183 default: 184 return false; 185 } 186 } 187 188 bool eth_parse_ipv6_hdr(struct iovec *pkt, int pkt_frags, 189 size_t ip6hdr_off, uint8_t *l4proto, 190 size_t *full_hdr_len) 191 { 192 struct ip6_header ip6_hdr; 193 struct ip6_ext_hdr ext_hdr; 194 size_t bytes_read; 195 196 bytes_read = iov_to_buf(pkt, pkt_frags, ip6hdr_off, 197 &ip6_hdr, sizeof(ip6_hdr)); 198 if (bytes_read < sizeof(ip6_hdr)) { 199 return false; 200 } 201 202 *full_hdr_len = sizeof(struct ip6_header); 203 204 if (!eth_is_ip6_extension_header_type(ip6_hdr.ip6_nxt)) { 205 *l4proto = ip6_hdr.ip6_nxt; 206 return true; 207 } 208 209 do { 210 bytes_read = iov_to_buf(pkt, pkt_frags, ip6hdr_off + *full_hdr_len, 211 &ext_hdr, sizeof(ext_hdr)); 212 *full_hdr_len += (ext_hdr.ip6r_len + 1) * IP6_EXT_GRANULARITY; 213 } while (eth_is_ip6_extension_header_type(ext_hdr.ip6r_nxt)); 214 215 *l4proto = ext_hdr.ip6r_nxt; 216 return true; 217 } 218