1 /* 2 * QEMU network structures definitions and helper functions 3 * 4 * Copyright (c) 2012 Ravello Systems LTD (http://ravellosystems.com) 5 * 6 * Developed by Daynix Computing LTD (http://www.daynix.com) 7 * 8 * Authors: 9 * Dmitry Fleytman <dmitry@daynix.com> 10 * Tamir Shomer <tamirs@daynix.com> 11 * Yan Vugenfirer <yan@daynix.com> 12 * 13 * This work is licensed under the terms of the GNU GPL, version 2 or later. 14 * See the COPYING file in the top-level directory. 15 * 16 */ 17 18 #include "qemu/osdep.h" 19 #include "net/eth.h" 20 #include "net/checksum.h" 21 #include "qemu-common.h" 22 #include "net/tap.h" 23 24 void eth_setup_vlan_headers(struct eth_header *ehdr, uint16_t vlan_tag, 25 bool *is_new) 26 { 27 struct vlan_header *vhdr = PKT_GET_VLAN_HDR(ehdr); 28 29 switch (be16_to_cpu(ehdr->h_proto)) { 30 case ETH_P_VLAN: 31 case ETH_P_DVLAN: 32 /* vlan hdr exists */ 33 *is_new = false; 34 break; 35 36 default: 37 /* No VLAN header, put a new one */ 38 vhdr->h_proto = ehdr->h_proto; 39 ehdr->h_proto = cpu_to_be16(ETH_P_VLAN); 40 *is_new = true; 41 break; 42 } 43 vhdr->h_tci = cpu_to_be16(vlan_tag); 44 } 45 46 uint8_t 47 eth_get_gso_type(uint16_t l3_proto, uint8_t *l3_hdr, uint8_t l4proto) 48 { 49 uint8_t ecn_state = 0; 50 51 if (l3_proto == ETH_P_IP) { 52 struct ip_header *iphdr = (struct ip_header *) l3_hdr; 53 54 if (IP_HEADER_VERSION(iphdr) == IP_HEADER_VERSION_4) { 55 if (IPTOS_ECN(iphdr->ip_tos) == IPTOS_ECN_CE) { 56 ecn_state = VIRTIO_NET_HDR_GSO_ECN; 57 } 58 if (l4proto == IP_PROTO_TCP) { 59 return VIRTIO_NET_HDR_GSO_TCPV4 | ecn_state; 60 } else if (l4proto == IP_PROTO_UDP) { 61 return VIRTIO_NET_HDR_GSO_UDP | ecn_state; 62 } 63 } 64 } else if (l3_proto == ETH_P_IPV6) { 65 struct ip6_header *ip6hdr = (struct ip6_header *) l3_hdr; 66 67 if (IP6_ECN(ip6hdr->ip6_ecn_acc) == IP6_ECN_CE) { 68 ecn_state = VIRTIO_NET_HDR_GSO_ECN; 69 } 70 71 if (l4proto == IP_PROTO_TCP) { 72 return VIRTIO_NET_HDR_GSO_TCPV6 | ecn_state; 73 } 74 } 75 76 /* Unsupported offload */ 77 g_assert_not_reached(); 78 79 return VIRTIO_NET_HDR_GSO_NONE | ecn_state; 80 } 81 82 void eth_get_protocols(const uint8_t *headers, 83 uint32_t hdr_length, 84 bool *isip4, bool *isip6, 85 bool *isudp, bool *istcp) 86 { 87 int proto; 88 size_t l2hdr_len = eth_get_l2_hdr_length(headers); 89 assert(hdr_length >= eth_get_l2_hdr_length(headers)); 90 *isip4 = *isip6 = *isudp = *istcp = false; 91 92 proto = eth_get_l3_proto(headers, l2hdr_len); 93 if (proto == ETH_P_IP) { 94 *isip4 = true; 95 96 struct ip_header *iphdr; 97 98 assert(hdr_length >= 99 eth_get_l2_hdr_length(headers) + sizeof(struct ip_header)); 100 101 iphdr = PKT_GET_IP_HDR(headers); 102 103 if (IP_HEADER_VERSION(iphdr) == IP_HEADER_VERSION_4) { 104 if (iphdr->ip_p == IP_PROTO_TCP) { 105 *istcp = true; 106 } else if (iphdr->ip_p == IP_PROTO_UDP) { 107 *isudp = true; 108 } 109 } 110 } else if (proto == ETH_P_IPV6) { 111 uint8_t l4proto; 112 size_t full_ip6hdr_len; 113 114 struct iovec hdr_vec; 115 hdr_vec.iov_base = (void *) headers; 116 hdr_vec.iov_len = hdr_length; 117 118 *isip6 = true; 119 if (eth_parse_ipv6_hdr(&hdr_vec, 1, l2hdr_len, 120 &l4proto, &full_ip6hdr_len)) { 121 if (l4proto == IP_PROTO_TCP) { 122 *istcp = true; 123 } else if (l4proto == IP_PROTO_UDP) { 124 *isudp = true; 125 } 126 } 127 } 128 } 129 130 void 131 eth_setup_ip4_fragmentation(const void *l2hdr, size_t l2hdr_len, 132 void *l3hdr, size_t l3hdr_len, 133 size_t l3payload_len, 134 size_t frag_offset, bool more_frags) 135 { 136 if (eth_get_l3_proto(l2hdr, l2hdr_len) == ETH_P_IP) { 137 uint16_t orig_flags; 138 struct ip_header *iphdr = (struct ip_header *) l3hdr; 139 uint16_t frag_off_units = frag_offset / IP_FRAG_UNIT_SIZE; 140 uint16_t new_ip_off; 141 142 assert(frag_offset % IP_FRAG_UNIT_SIZE == 0); 143 assert((frag_off_units & ~IP_OFFMASK) == 0); 144 145 orig_flags = be16_to_cpu(iphdr->ip_off) & ~(IP_OFFMASK|IP_MF); 146 new_ip_off = frag_off_units | orig_flags | (more_frags ? IP_MF : 0); 147 iphdr->ip_off = cpu_to_be16(new_ip_off); 148 iphdr->ip_len = cpu_to_be16(l3payload_len + l3hdr_len); 149 } 150 } 151 152 void 153 eth_fix_ip4_checksum(void *l3hdr, size_t l3hdr_len) 154 { 155 struct ip_header *iphdr = (struct ip_header *) l3hdr; 156 iphdr->ip_sum = 0; 157 iphdr->ip_sum = cpu_to_be16(net_raw_checksum(l3hdr, l3hdr_len)); 158 } 159 160 uint32_t 161 eth_calc_pseudo_hdr_csum(struct ip_header *iphdr, uint16_t csl) 162 { 163 struct ip_pseudo_header ipph; 164 ipph.ip_src = iphdr->ip_src; 165 ipph.ip_dst = iphdr->ip_dst; 166 ipph.ip_payload = cpu_to_be16(csl); 167 ipph.ip_proto = iphdr->ip_p; 168 ipph.zeros = 0; 169 return net_checksum_add(sizeof(ipph), (uint8_t *) &ipph); 170 } 171 172 static bool 173 eth_is_ip6_extension_header_type(uint8_t hdr_type) 174 { 175 switch (hdr_type) { 176 case IP6_HOP_BY_HOP: 177 case IP6_ROUTING: 178 case IP6_FRAGMENT: 179 case IP6_ESP: 180 case IP6_AUTHENTICATION: 181 case IP6_DESTINATON: 182 case IP6_MOBILITY: 183 return true; 184 default: 185 return false; 186 } 187 } 188 189 bool eth_parse_ipv6_hdr(struct iovec *pkt, int pkt_frags, 190 size_t ip6hdr_off, uint8_t *l4proto, 191 size_t *full_hdr_len) 192 { 193 struct ip6_header ip6_hdr; 194 struct ip6_ext_hdr ext_hdr; 195 size_t bytes_read; 196 197 bytes_read = iov_to_buf(pkt, pkt_frags, ip6hdr_off, 198 &ip6_hdr, sizeof(ip6_hdr)); 199 if (bytes_read < sizeof(ip6_hdr)) { 200 return false; 201 } 202 203 *full_hdr_len = sizeof(struct ip6_header); 204 205 if (!eth_is_ip6_extension_header_type(ip6_hdr.ip6_nxt)) { 206 *l4proto = ip6_hdr.ip6_nxt; 207 return true; 208 } 209 210 do { 211 bytes_read = iov_to_buf(pkt, pkt_frags, ip6hdr_off + *full_hdr_len, 212 &ext_hdr, sizeof(ext_hdr)); 213 *full_hdr_len += (ext_hdr.ip6r_len + 1) * IP6_EXT_GRANULARITY; 214 } while (eth_is_ip6_extension_header_type(ext_hdr.ip6r_nxt)); 215 216 *l4proto = ext_hdr.ip6r_nxt; 217 return true; 218 } 219