1 /* 2 * Copyright (c) 2007-2017 Nicira, Inc. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of version 2 of the GNU General Public 6 * License as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public License 14 * along with this program; if not, write to the Free Software 15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * 02110-1301, USA 17 */ 18 19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 20 21 #include "flow.h" 22 #include "datapath.h" 23 #include <linux/uaccess.h> 24 #include <linux/netdevice.h> 25 #include <linux/etherdevice.h> 26 #include <linux/if_ether.h> 27 #include <linux/if_vlan.h> 28 #include <net/llc_pdu.h> 29 #include <linux/kernel.h> 30 #include <linux/jhash.h> 31 #include <linux/jiffies.h> 32 #include <linux/llc.h> 33 #include <linux/module.h> 34 #include <linux/in.h> 35 #include <linux/rcupdate.h> 36 #include <linux/if_arp.h> 37 #include <linux/ip.h> 38 #include <linux/ipv6.h> 39 #include <linux/sctp.h> 40 #include <linux/tcp.h> 41 #include <linux/udp.h> 42 #include <linux/icmp.h> 43 #include <linux/icmpv6.h> 44 #include <linux/rculist.h> 45 #include <net/geneve.h> 46 #include <net/ip.h> 47 #include <net/ipv6.h> 48 #include <net/ndisc.h> 49 #include <net/mpls.h> 50 #include <net/vxlan.h> 51 #include <net/erspan.h> 52 53 #include "flow_netlink.h" 54 55 struct ovs_len_tbl { 56 int len; 57 const struct ovs_len_tbl *next; 58 }; 59 60 #define OVS_ATTR_NESTED -1 61 #define OVS_ATTR_VARIABLE -2 62 63 static bool actions_may_change_flow(const struct nlattr *actions) 64 { 65 struct nlattr *nla; 66 int rem; 67 68 nla_for_each_nested(nla, actions, rem) { 69 u16 action = nla_type(nla); 70 71 switch (action) { 72 case OVS_ACTION_ATTR_OUTPUT: 73 case OVS_ACTION_ATTR_RECIRC: 74 case OVS_ACTION_ATTR_TRUNC: 75 case OVS_ACTION_ATTR_USERSPACE: 76 break; 77 78 case OVS_ACTION_ATTR_CT: 79 case OVS_ACTION_ATTR_CT_CLEAR: 80 case OVS_ACTION_ATTR_HASH: 81 case OVS_ACTION_ATTR_POP_ETH: 82 case OVS_ACTION_ATTR_POP_MPLS: 83 case OVS_ACTION_ATTR_POP_VLAN: 84 case OVS_ACTION_ATTR_PUSH_ETH: 85 case OVS_ACTION_ATTR_PUSH_MPLS: 86 case OVS_ACTION_ATTR_PUSH_VLAN: 87 case OVS_ACTION_ATTR_SAMPLE: 88 case OVS_ACTION_ATTR_SET: 89 case OVS_ACTION_ATTR_SET_MASKED: 90 default: 91 return true; 92 } 93 } 94 return false; 95 } 96 97 static void update_range(struct sw_flow_match *match, 98 size_t offset, size_t size, bool is_mask) 99 { 100 struct sw_flow_key_range *range; 101 size_t start = rounddown(offset, sizeof(long)); 102 size_t end = roundup(offset + size, sizeof(long)); 103 104 if (!is_mask) 105 range = &match->range; 106 else 107 range = &match->mask->range; 108 109 if (range->start == range->end) { 110 range->start = start; 111 range->end = end; 112 return; 113 } 114 115 if (range->start > start) 116 range->start = start; 117 118 if (range->end < end) 119 range->end = end; 120 } 121 122 #define SW_FLOW_KEY_PUT(match, field, value, is_mask) \ 123 do { \ 124 update_range(match, offsetof(struct sw_flow_key, field), \ 125 sizeof((match)->key->field), is_mask); \ 126 if (is_mask) \ 127 (match)->mask->key.field = value; \ 128 else \ 129 (match)->key->field = value; \ 130 } while (0) 131 132 #define SW_FLOW_KEY_MEMCPY_OFFSET(match, offset, value_p, len, is_mask) \ 133 do { \ 134 update_range(match, offset, len, is_mask); \ 135 if (is_mask) \ 136 memcpy((u8 *)&(match)->mask->key + offset, value_p, \ 137 len); \ 138 else \ 139 memcpy((u8 *)(match)->key + offset, value_p, len); \ 140 } while (0) 141 142 #define SW_FLOW_KEY_MEMCPY(match, field, value_p, len, is_mask) \ 143 SW_FLOW_KEY_MEMCPY_OFFSET(match, offsetof(struct sw_flow_key, field), \ 144 value_p, len, is_mask) 145 146 #define SW_FLOW_KEY_MEMSET_FIELD(match, field, value, is_mask) \ 147 do { \ 148 update_range(match, offsetof(struct sw_flow_key, field), \ 149 sizeof((match)->key->field), is_mask); \ 150 if (is_mask) \ 151 memset((u8 *)&(match)->mask->key.field, value, \ 152 sizeof((match)->mask->key.field)); \ 153 else \ 154 memset((u8 *)&(match)->key->field, value, \ 155 sizeof((match)->key->field)); \ 156 } while (0) 157 158 static bool match_validate(const struct sw_flow_match *match, 159 u64 key_attrs, u64 mask_attrs, bool log) 160 { 161 u64 key_expected = 0; 162 u64 mask_allowed = key_attrs; /* At most allow all key attributes */ 163 164 /* The following mask attributes allowed only if they 165 * pass the validation tests. */ 166 mask_allowed &= ~((1 << OVS_KEY_ATTR_IPV4) 167 | (1 << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4) 168 | (1 << OVS_KEY_ATTR_IPV6) 169 | (1 << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6) 170 | (1 << OVS_KEY_ATTR_TCP) 171 | (1 << OVS_KEY_ATTR_TCP_FLAGS) 172 | (1 << OVS_KEY_ATTR_UDP) 173 | (1 << OVS_KEY_ATTR_SCTP) 174 | (1 << OVS_KEY_ATTR_ICMP) 175 | (1 << OVS_KEY_ATTR_ICMPV6) 176 | (1 << OVS_KEY_ATTR_ARP) 177 | (1 << OVS_KEY_ATTR_ND) 178 | (1 << OVS_KEY_ATTR_MPLS)); 179 180 /* Always allowed mask fields. */ 181 mask_allowed |= ((1 << OVS_KEY_ATTR_TUNNEL) 182 | (1 << OVS_KEY_ATTR_IN_PORT) 183 | (1 << OVS_KEY_ATTR_ETHERTYPE)); 184 185 /* Check key attributes. */ 186 if (match->key->eth.type == htons(ETH_P_ARP) 187 || match->key->eth.type == htons(ETH_P_RARP)) { 188 key_expected |= 1 << OVS_KEY_ATTR_ARP; 189 if (match->mask && (match->mask->key.eth.type == htons(0xffff))) 190 mask_allowed |= 1 << OVS_KEY_ATTR_ARP; 191 } 192 193 if (eth_p_mpls(match->key->eth.type)) { 194 key_expected |= 1 << OVS_KEY_ATTR_MPLS; 195 if (match->mask && (match->mask->key.eth.type == htons(0xffff))) 196 mask_allowed |= 1 << OVS_KEY_ATTR_MPLS; 197 } 198 199 if (match->key->eth.type == htons(ETH_P_IP)) { 200 key_expected |= 1 << OVS_KEY_ATTR_IPV4; 201 if (match->mask && match->mask->key.eth.type == htons(0xffff)) { 202 mask_allowed |= 1 << OVS_KEY_ATTR_IPV4; 203 mask_allowed |= 1 << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4; 204 } 205 206 if (match->key->ip.frag != OVS_FRAG_TYPE_LATER) { 207 if (match->key->ip.proto == IPPROTO_UDP) { 208 key_expected |= 1 << OVS_KEY_ATTR_UDP; 209 if (match->mask && (match->mask->key.ip.proto == 0xff)) 210 mask_allowed |= 1 << OVS_KEY_ATTR_UDP; 211 } 212 213 if (match->key->ip.proto == IPPROTO_SCTP) { 214 key_expected |= 1 << OVS_KEY_ATTR_SCTP; 215 if (match->mask && (match->mask->key.ip.proto == 0xff)) 216 mask_allowed |= 1 << OVS_KEY_ATTR_SCTP; 217 } 218 219 if (match->key->ip.proto == IPPROTO_TCP) { 220 key_expected |= 1 << OVS_KEY_ATTR_TCP; 221 key_expected |= 1 << OVS_KEY_ATTR_TCP_FLAGS; 222 if (match->mask && (match->mask->key.ip.proto == 0xff)) { 223 mask_allowed |= 1 << OVS_KEY_ATTR_TCP; 224 mask_allowed |= 1 << OVS_KEY_ATTR_TCP_FLAGS; 225 } 226 } 227 228 if (match->key->ip.proto == IPPROTO_ICMP) { 229 key_expected |= 1 << OVS_KEY_ATTR_ICMP; 230 if (match->mask && (match->mask->key.ip.proto == 0xff)) 231 mask_allowed |= 1 << OVS_KEY_ATTR_ICMP; 232 } 233 } 234 } 235 236 if (match->key->eth.type == htons(ETH_P_IPV6)) { 237 key_expected |= 1 << OVS_KEY_ATTR_IPV6; 238 if (match->mask && match->mask->key.eth.type == htons(0xffff)) { 239 mask_allowed |= 1 << OVS_KEY_ATTR_IPV6; 240 mask_allowed |= 1 << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6; 241 } 242 243 if (match->key->ip.frag != OVS_FRAG_TYPE_LATER) { 244 if (match->key->ip.proto == IPPROTO_UDP) { 245 key_expected |= 1 << OVS_KEY_ATTR_UDP; 246 if (match->mask && (match->mask->key.ip.proto == 0xff)) 247 mask_allowed |= 1 << OVS_KEY_ATTR_UDP; 248 } 249 250 if (match->key->ip.proto == IPPROTO_SCTP) { 251 key_expected |= 1 << OVS_KEY_ATTR_SCTP; 252 if (match->mask && (match->mask->key.ip.proto == 0xff)) 253 mask_allowed |= 1 << OVS_KEY_ATTR_SCTP; 254 } 255 256 if (match->key->ip.proto == IPPROTO_TCP) { 257 key_expected |= 1 << OVS_KEY_ATTR_TCP; 258 key_expected |= 1 << OVS_KEY_ATTR_TCP_FLAGS; 259 if (match->mask && (match->mask->key.ip.proto == 0xff)) { 260 mask_allowed |= 1 << OVS_KEY_ATTR_TCP; 261 mask_allowed |= 1 << OVS_KEY_ATTR_TCP_FLAGS; 262 } 263 } 264 265 if (match->key->ip.proto == IPPROTO_ICMPV6) { 266 key_expected |= 1 << OVS_KEY_ATTR_ICMPV6; 267 if (match->mask && (match->mask->key.ip.proto == 0xff)) 268 mask_allowed |= 1 << OVS_KEY_ATTR_ICMPV6; 269 270 if (match->key->tp.src == 271 htons(NDISC_NEIGHBOUR_SOLICITATION) || 272 match->key->tp.src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT)) { 273 key_expected |= 1 << OVS_KEY_ATTR_ND; 274 /* Original direction conntrack tuple 275 * uses the same space as the ND fields 276 * in the key, so both are not allowed 277 * at the same time. 278 */ 279 mask_allowed &= ~(1ULL << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6); 280 if (match->mask && (match->mask->key.tp.src == htons(0xff))) 281 mask_allowed |= 1 << OVS_KEY_ATTR_ND; 282 } 283 } 284 } 285 } 286 287 if ((key_attrs & key_expected) != key_expected) { 288 /* Key attributes check failed. */ 289 OVS_NLERR(log, "Missing key (keys=%llx, expected=%llx)", 290 (unsigned long long)key_attrs, 291 (unsigned long long)key_expected); 292 return false; 293 } 294 295 if ((mask_attrs & mask_allowed) != mask_attrs) { 296 /* Mask attributes check failed. */ 297 OVS_NLERR(log, "Unexpected mask (mask=%llx, allowed=%llx)", 298 (unsigned long long)mask_attrs, 299 (unsigned long long)mask_allowed); 300 return false; 301 } 302 303 return true; 304 } 305 306 size_t ovs_tun_key_attr_size(void) 307 { 308 /* Whenever adding new OVS_TUNNEL_KEY_ FIELDS, we should consider 309 * updating this function. 310 */ 311 return nla_total_size_64bit(8) /* OVS_TUNNEL_KEY_ATTR_ID */ 312 + nla_total_size(16) /* OVS_TUNNEL_KEY_ATTR_IPV[46]_SRC */ 313 + nla_total_size(16) /* OVS_TUNNEL_KEY_ATTR_IPV[46]_DST */ 314 + nla_total_size(1) /* OVS_TUNNEL_KEY_ATTR_TOS */ 315 + nla_total_size(1) /* OVS_TUNNEL_KEY_ATTR_TTL */ 316 + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT */ 317 + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_CSUM */ 318 + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_OAM */ 319 + nla_total_size(256) /* OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS */ 320 /* OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS is mutually exclusive with 321 * OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS and covered by it. 322 */ 323 + nla_total_size(2) /* OVS_TUNNEL_KEY_ATTR_TP_SRC */ 324 + nla_total_size(2) /* OVS_TUNNEL_KEY_ATTR_TP_DST */ 325 + nla_total_size(4); /* OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS */ 326 } 327 328 size_t ovs_key_attr_size(void) 329 { 330 /* Whenever adding new OVS_KEY_ FIELDS, we should consider 331 * updating this function. 332 */ 333 BUILD_BUG_ON(OVS_KEY_ATTR_TUNNEL_INFO != 28); 334 335 return nla_total_size(4) /* OVS_KEY_ATTR_PRIORITY */ 336 + nla_total_size(0) /* OVS_KEY_ATTR_TUNNEL */ 337 + ovs_tun_key_attr_size() 338 + nla_total_size(4) /* OVS_KEY_ATTR_IN_PORT */ 339 + nla_total_size(4) /* OVS_KEY_ATTR_SKB_MARK */ 340 + nla_total_size(4) /* OVS_KEY_ATTR_DP_HASH */ 341 + nla_total_size(4) /* OVS_KEY_ATTR_RECIRC_ID */ 342 + nla_total_size(4) /* OVS_KEY_ATTR_CT_STATE */ 343 + nla_total_size(2) /* OVS_KEY_ATTR_CT_ZONE */ 344 + nla_total_size(4) /* OVS_KEY_ATTR_CT_MARK */ 345 + nla_total_size(16) /* OVS_KEY_ATTR_CT_LABELS */ 346 + nla_total_size(40) /* OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6 */ 347 + nla_total_size(12) /* OVS_KEY_ATTR_ETHERNET */ 348 + nla_total_size(2) /* OVS_KEY_ATTR_ETHERTYPE */ 349 + nla_total_size(4) /* OVS_KEY_ATTR_VLAN */ 350 + nla_total_size(0) /* OVS_KEY_ATTR_ENCAP */ 351 + nla_total_size(2) /* OVS_KEY_ATTR_ETHERTYPE */ 352 + nla_total_size(40) /* OVS_KEY_ATTR_IPV6 */ 353 + nla_total_size(2) /* OVS_KEY_ATTR_ICMPV6 */ 354 + nla_total_size(28); /* OVS_KEY_ATTR_ND */ 355 } 356 357 static const struct ovs_len_tbl ovs_vxlan_ext_key_lens[OVS_VXLAN_EXT_MAX + 1] = { 358 [OVS_VXLAN_EXT_GBP] = { .len = sizeof(u32) }, 359 }; 360 361 static const struct ovs_len_tbl ovs_tunnel_key_lens[OVS_TUNNEL_KEY_ATTR_MAX + 1] = { 362 [OVS_TUNNEL_KEY_ATTR_ID] = { .len = sizeof(u64) }, 363 [OVS_TUNNEL_KEY_ATTR_IPV4_SRC] = { .len = sizeof(u32) }, 364 [OVS_TUNNEL_KEY_ATTR_IPV4_DST] = { .len = sizeof(u32) }, 365 [OVS_TUNNEL_KEY_ATTR_TOS] = { .len = 1 }, 366 [OVS_TUNNEL_KEY_ATTR_TTL] = { .len = 1 }, 367 [OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT] = { .len = 0 }, 368 [OVS_TUNNEL_KEY_ATTR_CSUM] = { .len = 0 }, 369 [OVS_TUNNEL_KEY_ATTR_TP_SRC] = { .len = sizeof(u16) }, 370 [OVS_TUNNEL_KEY_ATTR_TP_DST] = { .len = sizeof(u16) }, 371 [OVS_TUNNEL_KEY_ATTR_OAM] = { .len = 0 }, 372 [OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS] = { .len = OVS_ATTR_VARIABLE }, 373 [OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS] = { .len = OVS_ATTR_NESTED, 374 .next = ovs_vxlan_ext_key_lens }, 375 [OVS_TUNNEL_KEY_ATTR_IPV6_SRC] = { .len = sizeof(struct in6_addr) }, 376 [OVS_TUNNEL_KEY_ATTR_IPV6_DST] = { .len = sizeof(struct in6_addr) }, 377 [OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS] = { .len = sizeof(u32) }, 378 }; 379 380 /* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */ 381 static const struct ovs_len_tbl ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = { 382 [OVS_KEY_ATTR_ENCAP] = { .len = OVS_ATTR_NESTED }, 383 [OVS_KEY_ATTR_PRIORITY] = { .len = sizeof(u32) }, 384 [OVS_KEY_ATTR_IN_PORT] = { .len = sizeof(u32) }, 385 [OVS_KEY_ATTR_SKB_MARK] = { .len = sizeof(u32) }, 386 [OVS_KEY_ATTR_ETHERNET] = { .len = sizeof(struct ovs_key_ethernet) }, 387 [OVS_KEY_ATTR_VLAN] = { .len = sizeof(__be16) }, 388 [OVS_KEY_ATTR_ETHERTYPE] = { .len = sizeof(__be16) }, 389 [OVS_KEY_ATTR_IPV4] = { .len = sizeof(struct ovs_key_ipv4) }, 390 [OVS_KEY_ATTR_IPV6] = { .len = sizeof(struct ovs_key_ipv6) }, 391 [OVS_KEY_ATTR_TCP] = { .len = sizeof(struct ovs_key_tcp) }, 392 [OVS_KEY_ATTR_TCP_FLAGS] = { .len = sizeof(__be16) }, 393 [OVS_KEY_ATTR_UDP] = { .len = sizeof(struct ovs_key_udp) }, 394 [OVS_KEY_ATTR_SCTP] = { .len = sizeof(struct ovs_key_sctp) }, 395 [OVS_KEY_ATTR_ICMP] = { .len = sizeof(struct ovs_key_icmp) }, 396 [OVS_KEY_ATTR_ICMPV6] = { .len = sizeof(struct ovs_key_icmpv6) }, 397 [OVS_KEY_ATTR_ARP] = { .len = sizeof(struct ovs_key_arp) }, 398 [OVS_KEY_ATTR_ND] = { .len = sizeof(struct ovs_key_nd) }, 399 [OVS_KEY_ATTR_RECIRC_ID] = { .len = sizeof(u32) }, 400 [OVS_KEY_ATTR_DP_HASH] = { .len = sizeof(u32) }, 401 [OVS_KEY_ATTR_TUNNEL] = { .len = OVS_ATTR_NESTED, 402 .next = ovs_tunnel_key_lens, }, 403 [OVS_KEY_ATTR_MPLS] = { .len = sizeof(struct ovs_key_mpls) }, 404 [OVS_KEY_ATTR_CT_STATE] = { .len = sizeof(u32) }, 405 [OVS_KEY_ATTR_CT_ZONE] = { .len = sizeof(u16) }, 406 [OVS_KEY_ATTR_CT_MARK] = { .len = sizeof(u32) }, 407 [OVS_KEY_ATTR_CT_LABELS] = { .len = sizeof(struct ovs_key_ct_labels) }, 408 [OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4] = { 409 .len = sizeof(struct ovs_key_ct_tuple_ipv4) }, 410 [OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6] = { 411 .len = sizeof(struct ovs_key_ct_tuple_ipv6) }, 412 }; 413 414 static bool check_attr_len(unsigned int attr_len, unsigned int expected_len) 415 { 416 return expected_len == attr_len || 417 expected_len == OVS_ATTR_NESTED || 418 expected_len == OVS_ATTR_VARIABLE; 419 } 420 421 static bool is_all_zero(const u8 *fp, size_t size) 422 { 423 int i; 424 425 if (!fp) 426 return false; 427 428 for (i = 0; i < size; i++) 429 if (fp[i]) 430 return false; 431 432 return true; 433 } 434 435 static int __parse_flow_nlattrs(const struct nlattr *attr, 436 const struct nlattr *a[], 437 u64 *attrsp, bool log, bool nz) 438 { 439 const struct nlattr *nla; 440 u64 attrs; 441 int rem; 442 443 attrs = *attrsp; 444 nla_for_each_nested(nla, attr, rem) { 445 u16 type = nla_type(nla); 446 int expected_len; 447 448 if (type > OVS_KEY_ATTR_MAX) { 449 OVS_NLERR(log, "Key type %d is out of range max %d", 450 type, OVS_KEY_ATTR_MAX); 451 return -EINVAL; 452 } 453 454 if (attrs & (1 << type)) { 455 OVS_NLERR(log, "Duplicate key (type %d).", type); 456 return -EINVAL; 457 } 458 459 expected_len = ovs_key_lens[type].len; 460 if (!check_attr_len(nla_len(nla), expected_len)) { 461 OVS_NLERR(log, "Key %d has unexpected len %d expected %d", 462 type, nla_len(nla), expected_len); 463 return -EINVAL; 464 } 465 466 if (!nz || !is_all_zero(nla_data(nla), expected_len)) { 467 attrs |= 1 << type; 468 a[type] = nla; 469 } 470 } 471 if (rem) { 472 OVS_NLERR(log, "Message has %d unknown bytes.", rem); 473 return -EINVAL; 474 } 475 476 *attrsp = attrs; 477 return 0; 478 } 479 480 static int parse_flow_mask_nlattrs(const struct nlattr *attr, 481 const struct nlattr *a[], u64 *attrsp, 482 bool log) 483 { 484 return __parse_flow_nlattrs(attr, a, attrsp, log, true); 485 } 486 487 int parse_flow_nlattrs(const struct nlattr *attr, const struct nlattr *a[], 488 u64 *attrsp, bool log) 489 { 490 return __parse_flow_nlattrs(attr, a, attrsp, log, false); 491 } 492 493 static int genev_tun_opt_from_nlattr(const struct nlattr *a, 494 struct sw_flow_match *match, bool is_mask, 495 bool log) 496 { 497 unsigned long opt_key_offset; 498 499 if (nla_len(a) > sizeof(match->key->tun_opts)) { 500 OVS_NLERR(log, "Geneve option length err (len %d, max %zu).", 501 nla_len(a), sizeof(match->key->tun_opts)); 502 return -EINVAL; 503 } 504 505 if (nla_len(a) % 4 != 0) { 506 OVS_NLERR(log, "Geneve opt len %d is not a multiple of 4.", 507 nla_len(a)); 508 return -EINVAL; 509 } 510 511 /* We need to record the length of the options passed 512 * down, otherwise packets with the same format but 513 * additional options will be silently matched. 514 */ 515 if (!is_mask) { 516 SW_FLOW_KEY_PUT(match, tun_opts_len, nla_len(a), 517 false); 518 } else { 519 /* This is somewhat unusual because it looks at 520 * both the key and mask while parsing the 521 * attributes (and by extension assumes the key 522 * is parsed first). Normally, we would verify 523 * that each is the correct length and that the 524 * attributes line up in the validate function. 525 * However, that is difficult because this is 526 * variable length and we won't have the 527 * information later. 528 */ 529 if (match->key->tun_opts_len != nla_len(a)) { 530 OVS_NLERR(log, "Geneve option len %d != mask len %d", 531 match->key->tun_opts_len, nla_len(a)); 532 return -EINVAL; 533 } 534 535 SW_FLOW_KEY_PUT(match, tun_opts_len, 0xff, true); 536 } 537 538 opt_key_offset = TUN_METADATA_OFFSET(nla_len(a)); 539 SW_FLOW_KEY_MEMCPY_OFFSET(match, opt_key_offset, nla_data(a), 540 nla_len(a), is_mask); 541 return 0; 542 } 543 544 static int vxlan_tun_opt_from_nlattr(const struct nlattr *attr, 545 struct sw_flow_match *match, bool is_mask, 546 bool log) 547 { 548 struct nlattr *a; 549 int rem; 550 unsigned long opt_key_offset; 551 struct vxlan_metadata opts; 552 553 BUILD_BUG_ON(sizeof(opts) > sizeof(match->key->tun_opts)); 554 555 memset(&opts, 0, sizeof(opts)); 556 nla_for_each_nested(a, attr, rem) { 557 int type = nla_type(a); 558 559 if (type > OVS_VXLAN_EXT_MAX) { 560 OVS_NLERR(log, "VXLAN extension %d out of range max %d", 561 type, OVS_VXLAN_EXT_MAX); 562 return -EINVAL; 563 } 564 565 if (!check_attr_len(nla_len(a), 566 ovs_vxlan_ext_key_lens[type].len)) { 567 OVS_NLERR(log, "VXLAN extension %d has unexpected len %d expected %d", 568 type, nla_len(a), 569 ovs_vxlan_ext_key_lens[type].len); 570 return -EINVAL; 571 } 572 573 switch (type) { 574 case OVS_VXLAN_EXT_GBP: 575 opts.gbp = nla_get_u32(a); 576 break; 577 default: 578 OVS_NLERR(log, "Unknown VXLAN extension attribute %d", 579 type); 580 return -EINVAL; 581 } 582 } 583 if (rem) { 584 OVS_NLERR(log, "VXLAN extension message has %d unknown bytes.", 585 rem); 586 return -EINVAL; 587 } 588 589 if (!is_mask) 590 SW_FLOW_KEY_PUT(match, tun_opts_len, sizeof(opts), false); 591 else 592 SW_FLOW_KEY_PUT(match, tun_opts_len, 0xff, true); 593 594 opt_key_offset = TUN_METADATA_OFFSET(sizeof(opts)); 595 SW_FLOW_KEY_MEMCPY_OFFSET(match, opt_key_offset, &opts, sizeof(opts), 596 is_mask); 597 return 0; 598 } 599 600 static int erspan_tun_opt_from_nlattr(const struct nlattr *attr, 601 struct sw_flow_match *match, bool is_mask, 602 bool log) 603 { 604 unsigned long opt_key_offset; 605 struct erspan_metadata opts; 606 607 BUILD_BUG_ON(sizeof(opts) > sizeof(match->key->tun_opts)); 608 609 memset(&opts, 0, sizeof(opts)); 610 opts.index = nla_get_be32(attr); 611 612 /* Index has only 20-bit */ 613 if (ntohl(opts.index) & ~INDEX_MASK) { 614 OVS_NLERR(log, "ERSPAN index number %x too large.", 615 ntohl(opts.index)); 616 return -EINVAL; 617 } 618 619 SW_FLOW_KEY_PUT(match, tun_opts_len, sizeof(opts), is_mask); 620 opt_key_offset = TUN_METADATA_OFFSET(sizeof(opts)); 621 SW_FLOW_KEY_MEMCPY_OFFSET(match, opt_key_offset, &opts, sizeof(opts), 622 is_mask); 623 624 return 0; 625 } 626 627 static int ip_tun_from_nlattr(const struct nlattr *attr, 628 struct sw_flow_match *match, bool is_mask, 629 bool log) 630 { 631 bool ttl = false, ipv4 = false, ipv6 = false; 632 __be16 tun_flags = 0; 633 int opts_type = 0; 634 struct nlattr *a; 635 int rem; 636 637 nla_for_each_nested(a, attr, rem) { 638 int type = nla_type(a); 639 int err; 640 641 if (type > OVS_TUNNEL_KEY_ATTR_MAX) { 642 OVS_NLERR(log, "Tunnel attr %d out of range max %d", 643 type, OVS_TUNNEL_KEY_ATTR_MAX); 644 return -EINVAL; 645 } 646 647 if (!check_attr_len(nla_len(a), 648 ovs_tunnel_key_lens[type].len)) { 649 OVS_NLERR(log, "Tunnel attr %d has unexpected len %d expected %d", 650 type, nla_len(a), ovs_tunnel_key_lens[type].len); 651 return -EINVAL; 652 } 653 654 switch (type) { 655 case OVS_TUNNEL_KEY_ATTR_ID: 656 SW_FLOW_KEY_PUT(match, tun_key.tun_id, 657 nla_get_be64(a), is_mask); 658 tun_flags |= TUNNEL_KEY; 659 break; 660 case OVS_TUNNEL_KEY_ATTR_IPV4_SRC: 661 SW_FLOW_KEY_PUT(match, tun_key.u.ipv4.src, 662 nla_get_in_addr(a), is_mask); 663 ipv4 = true; 664 break; 665 case OVS_TUNNEL_KEY_ATTR_IPV4_DST: 666 SW_FLOW_KEY_PUT(match, tun_key.u.ipv4.dst, 667 nla_get_in_addr(a), is_mask); 668 ipv4 = true; 669 break; 670 case OVS_TUNNEL_KEY_ATTR_IPV6_SRC: 671 SW_FLOW_KEY_PUT(match, tun_key.u.ipv6.src, 672 nla_get_in6_addr(a), is_mask); 673 ipv6 = true; 674 break; 675 case OVS_TUNNEL_KEY_ATTR_IPV6_DST: 676 SW_FLOW_KEY_PUT(match, tun_key.u.ipv6.dst, 677 nla_get_in6_addr(a), is_mask); 678 ipv6 = true; 679 break; 680 case OVS_TUNNEL_KEY_ATTR_TOS: 681 SW_FLOW_KEY_PUT(match, tun_key.tos, 682 nla_get_u8(a), is_mask); 683 break; 684 case OVS_TUNNEL_KEY_ATTR_TTL: 685 SW_FLOW_KEY_PUT(match, tun_key.ttl, 686 nla_get_u8(a), is_mask); 687 ttl = true; 688 break; 689 case OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT: 690 tun_flags |= TUNNEL_DONT_FRAGMENT; 691 break; 692 case OVS_TUNNEL_KEY_ATTR_CSUM: 693 tun_flags |= TUNNEL_CSUM; 694 break; 695 case OVS_TUNNEL_KEY_ATTR_TP_SRC: 696 SW_FLOW_KEY_PUT(match, tun_key.tp_src, 697 nla_get_be16(a), is_mask); 698 break; 699 case OVS_TUNNEL_KEY_ATTR_TP_DST: 700 SW_FLOW_KEY_PUT(match, tun_key.tp_dst, 701 nla_get_be16(a), is_mask); 702 break; 703 case OVS_TUNNEL_KEY_ATTR_OAM: 704 tun_flags |= TUNNEL_OAM; 705 break; 706 case OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS: 707 if (opts_type) { 708 OVS_NLERR(log, "Multiple metadata blocks provided"); 709 return -EINVAL; 710 } 711 712 err = genev_tun_opt_from_nlattr(a, match, is_mask, log); 713 if (err) 714 return err; 715 716 tun_flags |= TUNNEL_GENEVE_OPT; 717 opts_type = type; 718 break; 719 case OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS: 720 if (opts_type) { 721 OVS_NLERR(log, "Multiple metadata blocks provided"); 722 return -EINVAL; 723 } 724 725 err = vxlan_tun_opt_from_nlattr(a, match, is_mask, log); 726 if (err) 727 return err; 728 729 tun_flags |= TUNNEL_VXLAN_OPT; 730 opts_type = type; 731 break; 732 case OVS_TUNNEL_KEY_ATTR_PAD: 733 break; 734 case OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS: 735 if (opts_type) { 736 OVS_NLERR(log, "Multiple metadata blocks provided"); 737 return -EINVAL; 738 } 739 740 err = erspan_tun_opt_from_nlattr(a, match, is_mask, log); 741 if (err) 742 return err; 743 744 tun_flags |= TUNNEL_ERSPAN_OPT; 745 opts_type = type; 746 break; 747 default: 748 OVS_NLERR(log, "Unknown IP tunnel attribute %d", 749 type); 750 return -EINVAL; 751 } 752 } 753 754 SW_FLOW_KEY_PUT(match, tun_key.tun_flags, tun_flags, is_mask); 755 if (is_mask) 756 SW_FLOW_KEY_MEMSET_FIELD(match, tun_proto, 0xff, true); 757 else 758 SW_FLOW_KEY_PUT(match, tun_proto, ipv6 ? AF_INET6 : AF_INET, 759 false); 760 761 if (rem > 0) { 762 OVS_NLERR(log, "IP tunnel attribute has %d unknown bytes.", 763 rem); 764 return -EINVAL; 765 } 766 767 if (ipv4 && ipv6) { 768 OVS_NLERR(log, "Mixed IPv4 and IPv6 tunnel attributes"); 769 return -EINVAL; 770 } 771 772 if (!is_mask) { 773 if (!ipv4 && !ipv6) { 774 OVS_NLERR(log, "IP tunnel dst address not specified"); 775 return -EINVAL; 776 } 777 if (ipv4 && !match->key->tun_key.u.ipv4.dst) { 778 OVS_NLERR(log, "IPv4 tunnel dst address is zero"); 779 return -EINVAL; 780 } 781 if (ipv6 && ipv6_addr_any(&match->key->tun_key.u.ipv6.dst)) { 782 OVS_NLERR(log, "IPv6 tunnel dst address is zero"); 783 return -EINVAL; 784 } 785 786 if (!ttl) { 787 OVS_NLERR(log, "IP tunnel TTL not specified."); 788 return -EINVAL; 789 } 790 } 791 792 return opts_type; 793 } 794 795 static int vxlan_opt_to_nlattr(struct sk_buff *skb, 796 const void *tun_opts, int swkey_tun_opts_len) 797 { 798 const struct vxlan_metadata *opts = tun_opts; 799 struct nlattr *nla; 800 801 nla = nla_nest_start(skb, OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS); 802 if (!nla) 803 return -EMSGSIZE; 804 805 if (nla_put_u32(skb, OVS_VXLAN_EXT_GBP, opts->gbp) < 0) 806 return -EMSGSIZE; 807 808 nla_nest_end(skb, nla); 809 return 0; 810 } 811 812 static int __ip_tun_to_nlattr(struct sk_buff *skb, 813 const struct ip_tunnel_key *output, 814 const void *tun_opts, int swkey_tun_opts_len, 815 unsigned short tun_proto) 816 { 817 if (output->tun_flags & TUNNEL_KEY && 818 nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, output->tun_id, 819 OVS_TUNNEL_KEY_ATTR_PAD)) 820 return -EMSGSIZE; 821 switch (tun_proto) { 822 case AF_INET: 823 if (output->u.ipv4.src && 824 nla_put_in_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV4_SRC, 825 output->u.ipv4.src)) 826 return -EMSGSIZE; 827 if (output->u.ipv4.dst && 828 nla_put_in_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV4_DST, 829 output->u.ipv4.dst)) 830 return -EMSGSIZE; 831 break; 832 case AF_INET6: 833 if (!ipv6_addr_any(&output->u.ipv6.src) && 834 nla_put_in6_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV6_SRC, 835 &output->u.ipv6.src)) 836 return -EMSGSIZE; 837 if (!ipv6_addr_any(&output->u.ipv6.dst) && 838 nla_put_in6_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV6_DST, 839 &output->u.ipv6.dst)) 840 return -EMSGSIZE; 841 break; 842 } 843 if (output->tos && 844 nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TOS, output->tos)) 845 return -EMSGSIZE; 846 if (nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TTL, output->ttl)) 847 return -EMSGSIZE; 848 if ((output->tun_flags & TUNNEL_DONT_FRAGMENT) && 849 nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT)) 850 return -EMSGSIZE; 851 if ((output->tun_flags & TUNNEL_CSUM) && 852 nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_CSUM)) 853 return -EMSGSIZE; 854 if (output->tp_src && 855 nla_put_be16(skb, OVS_TUNNEL_KEY_ATTR_TP_SRC, output->tp_src)) 856 return -EMSGSIZE; 857 if (output->tp_dst && 858 nla_put_be16(skb, OVS_TUNNEL_KEY_ATTR_TP_DST, output->tp_dst)) 859 return -EMSGSIZE; 860 if ((output->tun_flags & TUNNEL_OAM) && 861 nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_OAM)) 862 return -EMSGSIZE; 863 if (swkey_tun_opts_len) { 864 if (output->tun_flags & TUNNEL_GENEVE_OPT && 865 nla_put(skb, OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS, 866 swkey_tun_opts_len, tun_opts)) 867 return -EMSGSIZE; 868 else if (output->tun_flags & TUNNEL_VXLAN_OPT && 869 vxlan_opt_to_nlattr(skb, tun_opts, swkey_tun_opts_len)) 870 return -EMSGSIZE; 871 else if (output->tun_flags & TUNNEL_ERSPAN_OPT && 872 nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS, 873 ((struct erspan_metadata *)tun_opts)->index)) 874 return -EMSGSIZE; 875 } 876 877 return 0; 878 } 879 880 static int ip_tun_to_nlattr(struct sk_buff *skb, 881 const struct ip_tunnel_key *output, 882 const void *tun_opts, int swkey_tun_opts_len, 883 unsigned short tun_proto) 884 { 885 struct nlattr *nla; 886 int err; 887 888 nla = nla_nest_start(skb, OVS_KEY_ATTR_TUNNEL); 889 if (!nla) 890 return -EMSGSIZE; 891 892 err = __ip_tun_to_nlattr(skb, output, tun_opts, swkey_tun_opts_len, 893 tun_proto); 894 if (err) 895 return err; 896 897 nla_nest_end(skb, nla); 898 return 0; 899 } 900 901 int ovs_nla_put_tunnel_info(struct sk_buff *skb, 902 struct ip_tunnel_info *tun_info) 903 { 904 return __ip_tun_to_nlattr(skb, &tun_info->key, 905 ip_tunnel_info_opts(tun_info), 906 tun_info->options_len, 907 ip_tunnel_info_af(tun_info)); 908 } 909 910 static int encode_vlan_from_nlattrs(struct sw_flow_match *match, 911 const struct nlattr *a[], 912 bool is_mask, bool inner) 913 { 914 __be16 tci = 0; 915 __be16 tpid = 0; 916 917 if (a[OVS_KEY_ATTR_VLAN]) 918 tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]); 919 920 if (a[OVS_KEY_ATTR_ETHERTYPE]) 921 tpid = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]); 922 923 if (likely(!inner)) { 924 SW_FLOW_KEY_PUT(match, eth.vlan.tpid, tpid, is_mask); 925 SW_FLOW_KEY_PUT(match, eth.vlan.tci, tci, is_mask); 926 } else { 927 SW_FLOW_KEY_PUT(match, eth.cvlan.tpid, tpid, is_mask); 928 SW_FLOW_KEY_PUT(match, eth.cvlan.tci, tci, is_mask); 929 } 930 return 0; 931 } 932 933 static int validate_vlan_from_nlattrs(const struct sw_flow_match *match, 934 u64 key_attrs, bool inner, 935 const struct nlattr **a, bool log) 936 { 937 __be16 tci = 0; 938 939 if (!((key_attrs & (1 << OVS_KEY_ATTR_ETHERNET)) && 940 (key_attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) && 941 eth_type_vlan(nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE])))) { 942 /* Not a VLAN. */ 943 return 0; 944 } 945 946 if (!((key_attrs & (1 << OVS_KEY_ATTR_VLAN)) && 947 (key_attrs & (1 << OVS_KEY_ATTR_ENCAP)))) { 948 OVS_NLERR(log, "Invalid %s frame", (inner) ? "C-VLAN" : "VLAN"); 949 return -EINVAL; 950 } 951 952 if (a[OVS_KEY_ATTR_VLAN]) 953 tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]); 954 955 if (!(tci & htons(VLAN_TAG_PRESENT))) { 956 if (tci) { 957 OVS_NLERR(log, "%s TCI does not have VLAN_TAG_PRESENT bit set.", 958 (inner) ? "C-VLAN" : "VLAN"); 959 return -EINVAL; 960 } else if (nla_len(a[OVS_KEY_ATTR_ENCAP])) { 961 /* Corner case for truncated VLAN header. */ 962 OVS_NLERR(log, "Truncated %s header has non-zero encap attribute.", 963 (inner) ? "C-VLAN" : "VLAN"); 964 return -EINVAL; 965 } 966 } 967 968 return 1; 969 } 970 971 static int validate_vlan_mask_from_nlattrs(const struct sw_flow_match *match, 972 u64 key_attrs, bool inner, 973 const struct nlattr **a, bool log) 974 { 975 __be16 tci = 0; 976 __be16 tpid = 0; 977 bool encap_valid = !!(match->key->eth.vlan.tci & 978 htons(VLAN_TAG_PRESENT)); 979 bool i_encap_valid = !!(match->key->eth.cvlan.tci & 980 htons(VLAN_TAG_PRESENT)); 981 982 if (!(key_attrs & (1 << OVS_KEY_ATTR_ENCAP))) { 983 /* Not a VLAN. */ 984 return 0; 985 } 986 987 if ((!inner && !encap_valid) || (inner && !i_encap_valid)) { 988 OVS_NLERR(log, "Encap mask attribute is set for non-%s frame.", 989 (inner) ? "C-VLAN" : "VLAN"); 990 return -EINVAL; 991 } 992 993 if (a[OVS_KEY_ATTR_VLAN]) 994 tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]); 995 996 if (a[OVS_KEY_ATTR_ETHERTYPE]) 997 tpid = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]); 998 999 if (tpid != htons(0xffff)) { 1000 OVS_NLERR(log, "Must have an exact match on %s TPID (mask=%x).", 1001 (inner) ? "C-VLAN" : "VLAN", ntohs(tpid)); 1002 return -EINVAL; 1003 } 1004 if (!(tci & htons(VLAN_TAG_PRESENT))) { 1005 OVS_NLERR(log, "%s TCI mask does not have exact match for VLAN_TAG_PRESENT bit.", 1006 (inner) ? "C-VLAN" : "VLAN"); 1007 return -EINVAL; 1008 } 1009 1010 return 1; 1011 } 1012 1013 static int __parse_vlan_from_nlattrs(struct sw_flow_match *match, 1014 u64 *key_attrs, bool inner, 1015 const struct nlattr **a, bool is_mask, 1016 bool log) 1017 { 1018 int err; 1019 const struct nlattr *encap; 1020 1021 if (!is_mask) 1022 err = validate_vlan_from_nlattrs(match, *key_attrs, inner, 1023 a, log); 1024 else 1025 err = validate_vlan_mask_from_nlattrs(match, *key_attrs, inner, 1026 a, log); 1027 if (err <= 0) 1028 return err; 1029 1030 err = encode_vlan_from_nlattrs(match, a, is_mask, inner); 1031 if (err) 1032 return err; 1033 1034 *key_attrs &= ~(1 << OVS_KEY_ATTR_ENCAP); 1035 *key_attrs &= ~(1 << OVS_KEY_ATTR_VLAN); 1036 *key_attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE); 1037 1038 encap = a[OVS_KEY_ATTR_ENCAP]; 1039 1040 if (!is_mask) 1041 err = parse_flow_nlattrs(encap, a, key_attrs, log); 1042 else 1043 err = parse_flow_mask_nlattrs(encap, a, key_attrs, log); 1044 1045 return err; 1046 } 1047 1048 static int parse_vlan_from_nlattrs(struct sw_flow_match *match, 1049 u64 *key_attrs, const struct nlattr **a, 1050 bool is_mask, bool log) 1051 { 1052 int err; 1053 bool encap_valid = false; 1054 1055 err = __parse_vlan_from_nlattrs(match, key_attrs, false, a, 1056 is_mask, log); 1057 if (err) 1058 return err; 1059 1060 encap_valid = !!(match->key->eth.vlan.tci & htons(VLAN_TAG_PRESENT)); 1061 if (encap_valid) { 1062 err = __parse_vlan_from_nlattrs(match, key_attrs, true, a, 1063 is_mask, log); 1064 if (err) 1065 return err; 1066 } 1067 1068 return 0; 1069 } 1070 1071 static int parse_eth_type_from_nlattrs(struct sw_flow_match *match, 1072 u64 *attrs, const struct nlattr **a, 1073 bool is_mask, bool log) 1074 { 1075 __be16 eth_type; 1076 1077 eth_type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]); 1078 if (is_mask) { 1079 /* Always exact match EtherType. */ 1080 eth_type = htons(0xffff); 1081 } else if (!eth_proto_is_802_3(eth_type)) { 1082 OVS_NLERR(log, "EtherType %x is less than min %x", 1083 ntohs(eth_type), ETH_P_802_3_MIN); 1084 return -EINVAL; 1085 } 1086 1087 SW_FLOW_KEY_PUT(match, eth.type, eth_type, is_mask); 1088 *attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE); 1089 return 0; 1090 } 1091 1092 static int metadata_from_nlattrs(struct net *net, struct sw_flow_match *match, 1093 u64 *attrs, const struct nlattr **a, 1094 bool is_mask, bool log) 1095 { 1096 u8 mac_proto = MAC_PROTO_ETHERNET; 1097 1098 if (*attrs & (1 << OVS_KEY_ATTR_DP_HASH)) { 1099 u32 hash_val = nla_get_u32(a[OVS_KEY_ATTR_DP_HASH]); 1100 1101 SW_FLOW_KEY_PUT(match, ovs_flow_hash, hash_val, is_mask); 1102 *attrs &= ~(1 << OVS_KEY_ATTR_DP_HASH); 1103 } 1104 1105 if (*attrs & (1 << OVS_KEY_ATTR_RECIRC_ID)) { 1106 u32 recirc_id = nla_get_u32(a[OVS_KEY_ATTR_RECIRC_ID]); 1107 1108 SW_FLOW_KEY_PUT(match, recirc_id, recirc_id, is_mask); 1109 *attrs &= ~(1 << OVS_KEY_ATTR_RECIRC_ID); 1110 } 1111 1112 if (*attrs & (1 << OVS_KEY_ATTR_PRIORITY)) { 1113 SW_FLOW_KEY_PUT(match, phy.priority, 1114 nla_get_u32(a[OVS_KEY_ATTR_PRIORITY]), is_mask); 1115 *attrs &= ~(1 << OVS_KEY_ATTR_PRIORITY); 1116 } 1117 1118 if (*attrs & (1 << OVS_KEY_ATTR_IN_PORT)) { 1119 u32 in_port = nla_get_u32(a[OVS_KEY_ATTR_IN_PORT]); 1120 1121 if (is_mask) { 1122 in_port = 0xffffffff; /* Always exact match in_port. */ 1123 } else if (in_port >= DP_MAX_PORTS) { 1124 OVS_NLERR(log, "Port %d exceeds max allowable %d", 1125 in_port, DP_MAX_PORTS); 1126 return -EINVAL; 1127 } 1128 1129 SW_FLOW_KEY_PUT(match, phy.in_port, in_port, is_mask); 1130 *attrs &= ~(1 << OVS_KEY_ATTR_IN_PORT); 1131 } else if (!is_mask) { 1132 SW_FLOW_KEY_PUT(match, phy.in_port, DP_MAX_PORTS, is_mask); 1133 } 1134 1135 if (*attrs & (1 << OVS_KEY_ATTR_SKB_MARK)) { 1136 uint32_t mark = nla_get_u32(a[OVS_KEY_ATTR_SKB_MARK]); 1137 1138 SW_FLOW_KEY_PUT(match, phy.skb_mark, mark, is_mask); 1139 *attrs &= ~(1 << OVS_KEY_ATTR_SKB_MARK); 1140 } 1141 if (*attrs & (1 << OVS_KEY_ATTR_TUNNEL)) { 1142 if (ip_tun_from_nlattr(a[OVS_KEY_ATTR_TUNNEL], match, 1143 is_mask, log) < 0) 1144 return -EINVAL; 1145 *attrs &= ~(1 << OVS_KEY_ATTR_TUNNEL); 1146 } 1147 1148 if (*attrs & (1 << OVS_KEY_ATTR_CT_STATE) && 1149 ovs_ct_verify(net, OVS_KEY_ATTR_CT_STATE)) { 1150 u32 ct_state = nla_get_u32(a[OVS_KEY_ATTR_CT_STATE]); 1151 1152 if (ct_state & ~CT_SUPPORTED_MASK) { 1153 OVS_NLERR(log, "ct_state flags %08x unsupported", 1154 ct_state); 1155 return -EINVAL; 1156 } 1157 1158 SW_FLOW_KEY_PUT(match, ct_state, ct_state, is_mask); 1159 *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_STATE); 1160 } 1161 if (*attrs & (1 << OVS_KEY_ATTR_CT_ZONE) && 1162 ovs_ct_verify(net, OVS_KEY_ATTR_CT_ZONE)) { 1163 u16 ct_zone = nla_get_u16(a[OVS_KEY_ATTR_CT_ZONE]); 1164 1165 SW_FLOW_KEY_PUT(match, ct_zone, ct_zone, is_mask); 1166 *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_ZONE); 1167 } 1168 if (*attrs & (1 << OVS_KEY_ATTR_CT_MARK) && 1169 ovs_ct_verify(net, OVS_KEY_ATTR_CT_MARK)) { 1170 u32 mark = nla_get_u32(a[OVS_KEY_ATTR_CT_MARK]); 1171 1172 SW_FLOW_KEY_PUT(match, ct.mark, mark, is_mask); 1173 *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_MARK); 1174 } 1175 if (*attrs & (1 << OVS_KEY_ATTR_CT_LABELS) && 1176 ovs_ct_verify(net, OVS_KEY_ATTR_CT_LABELS)) { 1177 const struct ovs_key_ct_labels *cl; 1178 1179 cl = nla_data(a[OVS_KEY_ATTR_CT_LABELS]); 1180 SW_FLOW_KEY_MEMCPY(match, ct.labels, cl->ct_labels, 1181 sizeof(*cl), is_mask); 1182 *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_LABELS); 1183 } 1184 if (*attrs & (1ULL << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4)) { 1185 const struct ovs_key_ct_tuple_ipv4 *ct; 1186 1187 ct = nla_data(a[OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4]); 1188 1189 SW_FLOW_KEY_PUT(match, ipv4.ct_orig.src, ct->ipv4_src, is_mask); 1190 SW_FLOW_KEY_PUT(match, ipv4.ct_orig.dst, ct->ipv4_dst, is_mask); 1191 SW_FLOW_KEY_PUT(match, ct.orig_tp.src, ct->src_port, is_mask); 1192 SW_FLOW_KEY_PUT(match, ct.orig_tp.dst, ct->dst_port, is_mask); 1193 SW_FLOW_KEY_PUT(match, ct_orig_proto, ct->ipv4_proto, is_mask); 1194 *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4); 1195 } 1196 if (*attrs & (1ULL << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6)) { 1197 const struct ovs_key_ct_tuple_ipv6 *ct; 1198 1199 ct = nla_data(a[OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6]); 1200 1201 SW_FLOW_KEY_MEMCPY(match, ipv6.ct_orig.src, &ct->ipv6_src, 1202 sizeof(match->key->ipv6.ct_orig.src), 1203 is_mask); 1204 SW_FLOW_KEY_MEMCPY(match, ipv6.ct_orig.dst, &ct->ipv6_dst, 1205 sizeof(match->key->ipv6.ct_orig.dst), 1206 is_mask); 1207 SW_FLOW_KEY_PUT(match, ct.orig_tp.src, ct->src_port, is_mask); 1208 SW_FLOW_KEY_PUT(match, ct.orig_tp.dst, ct->dst_port, is_mask); 1209 SW_FLOW_KEY_PUT(match, ct_orig_proto, ct->ipv6_proto, is_mask); 1210 *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6); 1211 } 1212 1213 /* For layer 3 packets the Ethernet type is provided 1214 * and treated as metadata but no MAC addresses are provided. 1215 */ 1216 if (!(*attrs & (1ULL << OVS_KEY_ATTR_ETHERNET)) && 1217 (*attrs & (1ULL << OVS_KEY_ATTR_ETHERTYPE))) 1218 mac_proto = MAC_PROTO_NONE; 1219 1220 /* Always exact match mac_proto */ 1221 SW_FLOW_KEY_PUT(match, mac_proto, is_mask ? 0xff : mac_proto, is_mask); 1222 1223 if (mac_proto == MAC_PROTO_NONE) 1224 return parse_eth_type_from_nlattrs(match, attrs, a, is_mask, 1225 log); 1226 1227 return 0; 1228 } 1229 1230 static int ovs_key_from_nlattrs(struct net *net, struct sw_flow_match *match, 1231 u64 attrs, const struct nlattr **a, 1232 bool is_mask, bool log) 1233 { 1234 int err; 1235 1236 err = metadata_from_nlattrs(net, match, &attrs, a, is_mask, log); 1237 if (err) 1238 return err; 1239 1240 if (attrs & (1 << OVS_KEY_ATTR_ETHERNET)) { 1241 const struct ovs_key_ethernet *eth_key; 1242 1243 eth_key = nla_data(a[OVS_KEY_ATTR_ETHERNET]); 1244 SW_FLOW_KEY_MEMCPY(match, eth.src, 1245 eth_key->eth_src, ETH_ALEN, is_mask); 1246 SW_FLOW_KEY_MEMCPY(match, eth.dst, 1247 eth_key->eth_dst, ETH_ALEN, is_mask); 1248 attrs &= ~(1 << OVS_KEY_ATTR_ETHERNET); 1249 1250 if (attrs & (1 << OVS_KEY_ATTR_VLAN)) { 1251 /* VLAN attribute is always parsed before getting here since it 1252 * may occur multiple times. 1253 */ 1254 OVS_NLERR(log, "VLAN attribute unexpected."); 1255 return -EINVAL; 1256 } 1257 1258 if (attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) { 1259 err = parse_eth_type_from_nlattrs(match, &attrs, a, is_mask, 1260 log); 1261 if (err) 1262 return err; 1263 } else if (!is_mask) { 1264 SW_FLOW_KEY_PUT(match, eth.type, htons(ETH_P_802_2), is_mask); 1265 } 1266 } else if (!match->key->eth.type) { 1267 OVS_NLERR(log, "Either Ethernet header or EtherType is required."); 1268 return -EINVAL; 1269 } 1270 1271 if (attrs & (1 << OVS_KEY_ATTR_IPV4)) { 1272 const struct ovs_key_ipv4 *ipv4_key; 1273 1274 ipv4_key = nla_data(a[OVS_KEY_ATTR_IPV4]); 1275 if (!is_mask && ipv4_key->ipv4_frag > OVS_FRAG_TYPE_MAX) { 1276 OVS_NLERR(log, "IPv4 frag type %d is out of range max %d", 1277 ipv4_key->ipv4_frag, OVS_FRAG_TYPE_MAX); 1278 return -EINVAL; 1279 } 1280 SW_FLOW_KEY_PUT(match, ip.proto, 1281 ipv4_key->ipv4_proto, is_mask); 1282 SW_FLOW_KEY_PUT(match, ip.tos, 1283 ipv4_key->ipv4_tos, is_mask); 1284 SW_FLOW_KEY_PUT(match, ip.ttl, 1285 ipv4_key->ipv4_ttl, is_mask); 1286 SW_FLOW_KEY_PUT(match, ip.frag, 1287 ipv4_key->ipv4_frag, is_mask); 1288 SW_FLOW_KEY_PUT(match, ipv4.addr.src, 1289 ipv4_key->ipv4_src, is_mask); 1290 SW_FLOW_KEY_PUT(match, ipv4.addr.dst, 1291 ipv4_key->ipv4_dst, is_mask); 1292 attrs &= ~(1 << OVS_KEY_ATTR_IPV4); 1293 } 1294 1295 if (attrs & (1 << OVS_KEY_ATTR_IPV6)) { 1296 const struct ovs_key_ipv6 *ipv6_key; 1297 1298 ipv6_key = nla_data(a[OVS_KEY_ATTR_IPV6]); 1299 if (!is_mask && ipv6_key->ipv6_frag > OVS_FRAG_TYPE_MAX) { 1300 OVS_NLERR(log, "IPv6 frag type %d is out of range max %d", 1301 ipv6_key->ipv6_frag, OVS_FRAG_TYPE_MAX); 1302 return -EINVAL; 1303 } 1304 1305 if (!is_mask && ipv6_key->ipv6_label & htonl(0xFFF00000)) { 1306 OVS_NLERR(log, "IPv6 flow label %x is out of range (max=%x)", 1307 ntohl(ipv6_key->ipv6_label), (1 << 20) - 1); 1308 return -EINVAL; 1309 } 1310 1311 SW_FLOW_KEY_PUT(match, ipv6.label, 1312 ipv6_key->ipv6_label, is_mask); 1313 SW_FLOW_KEY_PUT(match, ip.proto, 1314 ipv6_key->ipv6_proto, is_mask); 1315 SW_FLOW_KEY_PUT(match, ip.tos, 1316 ipv6_key->ipv6_tclass, is_mask); 1317 SW_FLOW_KEY_PUT(match, ip.ttl, 1318 ipv6_key->ipv6_hlimit, is_mask); 1319 SW_FLOW_KEY_PUT(match, ip.frag, 1320 ipv6_key->ipv6_frag, is_mask); 1321 SW_FLOW_KEY_MEMCPY(match, ipv6.addr.src, 1322 ipv6_key->ipv6_src, 1323 sizeof(match->key->ipv6.addr.src), 1324 is_mask); 1325 SW_FLOW_KEY_MEMCPY(match, ipv6.addr.dst, 1326 ipv6_key->ipv6_dst, 1327 sizeof(match->key->ipv6.addr.dst), 1328 is_mask); 1329 1330 attrs &= ~(1 << OVS_KEY_ATTR_IPV6); 1331 } 1332 1333 if (attrs & (1 << OVS_KEY_ATTR_ARP)) { 1334 const struct ovs_key_arp *arp_key; 1335 1336 arp_key = nla_data(a[OVS_KEY_ATTR_ARP]); 1337 if (!is_mask && (arp_key->arp_op & htons(0xff00))) { 1338 OVS_NLERR(log, "Unknown ARP opcode (opcode=%d).", 1339 arp_key->arp_op); 1340 return -EINVAL; 1341 } 1342 1343 SW_FLOW_KEY_PUT(match, ipv4.addr.src, 1344 arp_key->arp_sip, is_mask); 1345 SW_FLOW_KEY_PUT(match, ipv4.addr.dst, 1346 arp_key->arp_tip, is_mask); 1347 SW_FLOW_KEY_PUT(match, ip.proto, 1348 ntohs(arp_key->arp_op), is_mask); 1349 SW_FLOW_KEY_MEMCPY(match, ipv4.arp.sha, 1350 arp_key->arp_sha, ETH_ALEN, is_mask); 1351 SW_FLOW_KEY_MEMCPY(match, ipv4.arp.tha, 1352 arp_key->arp_tha, ETH_ALEN, is_mask); 1353 1354 attrs &= ~(1 << OVS_KEY_ATTR_ARP); 1355 } 1356 1357 if (attrs & (1 << OVS_KEY_ATTR_MPLS)) { 1358 const struct ovs_key_mpls *mpls_key; 1359 1360 mpls_key = nla_data(a[OVS_KEY_ATTR_MPLS]); 1361 SW_FLOW_KEY_PUT(match, mpls.top_lse, 1362 mpls_key->mpls_lse, is_mask); 1363 1364 attrs &= ~(1 << OVS_KEY_ATTR_MPLS); 1365 } 1366 1367 if (attrs & (1 << OVS_KEY_ATTR_TCP)) { 1368 const struct ovs_key_tcp *tcp_key; 1369 1370 tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]); 1371 SW_FLOW_KEY_PUT(match, tp.src, tcp_key->tcp_src, is_mask); 1372 SW_FLOW_KEY_PUT(match, tp.dst, tcp_key->tcp_dst, is_mask); 1373 attrs &= ~(1 << OVS_KEY_ATTR_TCP); 1374 } 1375 1376 if (attrs & (1 << OVS_KEY_ATTR_TCP_FLAGS)) { 1377 SW_FLOW_KEY_PUT(match, tp.flags, 1378 nla_get_be16(a[OVS_KEY_ATTR_TCP_FLAGS]), 1379 is_mask); 1380 attrs &= ~(1 << OVS_KEY_ATTR_TCP_FLAGS); 1381 } 1382 1383 if (attrs & (1 << OVS_KEY_ATTR_UDP)) { 1384 const struct ovs_key_udp *udp_key; 1385 1386 udp_key = nla_data(a[OVS_KEY_ATTR_UDP]); 1387 SW_FLOW_KEY_PUT(match, tp.src, udp_key->udp_src, is_mask); 1388 SW_FLOW_KEY_PUT(match, tp.dst, udp_key->udp_dst, is_mask); 1389 attrs &= ~(1 << OVS_KEY_ATTR_UDP); 1390 } 1391 1392 if (attrs & (1 << OVS_KEY_ATTR_SCTP)) { 1393 const struct ovs_key_sctp *sctp_key; 1394 1395 sctp_key = nla_data(a[OVS_KEY_ATTR_SCTP]); 1396 SW_FLOW_KEY_PUT(match, tp.src, sctp_key->sctp_src, is_mask); 1397 SW_FLOW_KEY_PUT(match, tp.dst, sctp_key->sctp_dst, is_mask); 1398 attrs &= ~(1 << OVS_KEY_ATTR_SCTP); 1399 } 1400 1401 if (attrs & (1 << OVS_KEY_ATTR_ICMP)) { 1402 const struct ovs_key_icmp *icmp_key; 1403 1404 icmp_key = nla_data(a[OVS_KEY_ATTR_ICMP]); 1405 SW_FLOW_KEY_PUT(match, tp.src, 1406 htons(icmp_key->icmp_type), is_mask); 1407 SW_FLOW_KEY_PUT(match, tp.dst, 1408 htons(icmp_key->icmp_code), is_mask); 1409 attrs &= ~(1 << OVS_KEY_ATTR_ICMP); 1410 } 1411 1412 if (attrs & (1 << OVS_KEY_ATTR_ICMPV6)) { 1413 const struct ovs_key_icmpv6 *icmpv6_key; 1414 1415 icmpv6_key = nla_data(a[OVS_KEY_ATTR_ICMPV6]); 1416 SW_FLOW_KEY_PUT(match, tp.src, 1417 htons(icmpv6_key->icmpv6_type), is_mask); 1418 SW_FLOW_KEY_PUT(match, tp.dst, 1419 htons(icmpv6_key->icmpv6_code), is_mask); 1420 attrs &= ~(1 << OVS_KEY_ATTR_ICMPV6); 1421 } 1422 1423 if (attrs & (1 << OVS_KEY_ATTR_ND)) { 1424 const struct ovs_key_nd *nd_key; 1425 1426 nd_key = nla_data(a[OVS_KEY_ATTR_ND]); 1427 SW_FLOW_KEY_MEMCPY(match, ipv6.nd.target, 1428 nd_key->nd_target, 1429 sizeof(match->key->ipv6.nd.target), 1430 is_mask); 1431 SW_FLOW_KEY_MEMCPY(match, ipv6.nd.sll, 1432 nd_key->nd_sll, ETH_ALEN, is_mask); 1433 SW_FLOW_KEY_MEMCPY(match, ipv6.nd.tll, 1434 nd_key->nd_tll, ETH_ALEN, is_mask); 1435 attrs &= ~(1 << OVS_KEY_ATTR_ND); 1436 } 1437 1438 if (attrs != 0) { 1439 OVS_NLERR(log, "Unknown key attributes %llx", 1440 (unsigned long long)attrs); 1441 return -EINVAL; 1442 } 1443 1444 return 0; 1445 } 1446 1447 static void nlattr_set(struct nlattr *attr, u8 val, 1448 const struct ovs_len_tbl *tbl) 1449 { 1450 struct nlattr *nla; 1451 int rem; 1452 1453 /* The nlattr stream should already have been validated */ 1454 nla_for_each_nested(nla, attr, rem) { 1455 if (tbl[nla_type(nla)].len == OVS_ATTR_NESTED) { 1456 if (tbl[nla_type(nla)].next) 1457 tbl = tbl[nla_type(nla)].next; 1458 nlattr_set(nla, val, tbl); 1459 } else { 1460 memset(nla_data(nla), val, nla_len(nla)); 1461 } 1462 1463 if (nla_type(nla) == OVS_KEY_ATTR_CT_STATE) 1464 *(u32 *)nla_data(nla) &= CT_SUPPORTED_MASK; 1465 } 1466 } 1467 1468 static void mask_set_nlattr(struct nlattr *attr, u8 val) 1469 { 1470 nlattr_set(attr, val, ovs_key_lens); 1471 } 1472 1473 /** 1474 * ovs_nla_get_match - parses Netlink attributes into a flow key and 1475 * mask. In case the 'mask' is NULL, the flow is treated as exact match 1476 * flow. Otherwise, it is treated as a wildcarded flow, except the mask 1477 * does not include any don't care bit. 1478 * @net: Used to determine per-namespace field support. 1479 * @match: receives the extracted flow match information. 1480 * @key: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute 1481 * sequence. The fields should of the packet that triggered the creation 1482 * of this flow. 1483 * @mask: Optional. Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink 1484 * attribute specifies the mask field of the wildcarded flow. 1485 * @log: Boolean to allow kernel error logging. Normally true, but when 1486 * probing for feature compatibility this should be passed in as false to 1487 * suppress unnecessary error logging. 1488 */ 1489 int ovs_nla_get_match(struct net *net, struct sw_flow_match *match, 1490 const struct nlattr *nla_key, 1491 const struct nlattr *nla_mask, 1492 bool log) 1493 { 1494 const struct nlattr *a[OVS_KEY_ATTR_MAX + 1]; 1495 struct nlattr *newmask = NULL; 1496 u64 key_attrs = 0; 1497 u64 mask_attrs = 0; 1498 int err; 1499 1500 err = parse_flow_nlattrs(nla_key, a, &key_attrs, log); 1501 if (err) 1502 return err; 1503 1504 err = parse_vlan_from_nlattrs(match, &key_attrs, a, false, log); 1505 if (err) 1506 return err; 1507 1508 err = ovs_key_from_nlattrs(net, match, key_attrs, a, false, log); 1509 if (err) 1510 return err; 1511 1512 if (match->mask) { 1513 if (!nla_mask) { 1514 /* Create an exact match mask. We need to set to 0xff 1515 * all the 'match->mask' fields that have been touched 1516 * in 'match->key'. We cannot simply memset 1517 * 'match->mask', because padding bytes and fields not 1518 * specified in 'match->key' should be left to 0. 1519 * Instead, we use a stream of netlink attributes, 1520 * copied from 'key' and set to 0xff. 1521 * ovs_key_from_nlattrs() will take care of filling 1522 * 'match->mask' appropriately. 1523 */ 1524 newmask = kmemdup(nla_key, 1525 nla_total_size(nla_len(nla_key)), 1526 GFP_KERNEL); 1527 if (!newmask) 1528 return -ENOMEM; 1529 1530 mask_set_nlattr(newmask, 0xff); 1531 1532 /* The userspace does not send tunnel attributes that 1533 * are 0, but we should not wildcard them nonetheless. 1534 */ 1535 if (match->key->tun_proto) 1536 SW_FLOW_KEY_MEMSET_FIELD(match, tun_key, 1537 0xff, true); 1538 1539 nla_mask = newmask; 1540 } 1541 1542 err = parse_flow_mask_nlattrs(nla_mask, a, &mask_attrs, log); 1543 if (err) 1544 goto free_newmask; 1545 1546 /* Always match on tci. */ 1547 SW_FLOW_KEY_PUT(match, eth.vlan.tci, htons(0xffff), true); 1548 SW_FLOW_KEY_PUT(match, eth.cvlan.tci, htons(0xffff), true); 1549 1550 err = parse_vlan_from_nlattrs(match, &mask_attrs, a, true, log); 1551 if (err) 1552 goto free_newmask; 1553 1554 err = ovs_key_from_nlattrs(net, match, mask_attrs, a, true, 1555 log); 1556 if (err) 1557 goto free_newmask; 1558 } 1559 1560 if (!match_validate(match, key_attrs, mask_attrs, log)) 1561 err = -EINVAL; 1562 1563 free_newmask: 1564 kfree(newmask); 1565 return err; 1566 } 1567 1568 static size_t get_ufid_len(const struct nlattr *attr, bool log) 1569 { 1570 size_t len; 1571 1572 if (!attr) 1573 return 0; 1574 1575 len = nla_len(attr); 1576 if (len < 1 || len > MAX_UFID_LENGTH) { 1577 OVS_NLERR(log, "ufid size %u bytes exceeds the range (1, %d)", 1578 nla_len(attr), MAX_UFID_LENGTH); 1579 return 0; 1580 } 1581 1582 return len; 1583 } 1584 1585 /* Initializes 'flow->ufid', returning true if 'attr' contains a valid UFID, 1586 * or false otherwise. 1587 */ 1588 bool ovs_nla_get_ufid(struct sw_flow_id *sfid, const struct nlattr *attr, 1589 bool log) 1590 { 1591 sfid->ufid_len = get_ufid_len(attr, log); 1592 if (sfid->ufid_len) 1593 memcpy(sfid->ufid, nla_data(attr), sfid->ufid_len); 1594 1595 return sfid->ufid_len; 1596 } 1597 1598 int ovs_nla_get_identifier(struct sw_flow_id *sfid, const struct nlattr *ufid, 1599 const struct sw_flow_key *key, bool log) 1600 { 1601 struct sw_flow_key *new_key; 1602 1603 if (ovs_nla_get_ufid(sfid, ufid, log)) 1604 return 0; 1605 1606 /* If UFID was not provided, use unmasked key. */ 1607 new_key = kmalloc(sizeof(*new_key), GFP_KERNEL); 1608 if (!new_key) 1609 return -ENOMEM; 1610 memcpy(new_key, key, sizeof(*key)); 1611 sfid->unmasked_key = new_key; 1612 1613 return 0; 1614 } 1615 1616 u32 ovs_nla_get_ufid_flags(const struct nlattr *attr) 1617 { 1618 return attr ? nla_get_u32(attr) : 0; 1619 } 1620 1621 /** 1622 * ovs_nla_get_flow_metadata - parses Netlink attributes into a flow key. 1623 * @net: Network namespace. 1624 * @key: Receives extracted in_port, priority, tun_key, skb_mark and conntrack 1625 * metadata. 1626 * @a: Array of netlink attributes holding parsed %OVS_KEY_ATTR_* Netlink 1627 * attributes. 1628 * @attrs: Bit mask for the netlink attributes included in @a. 1629 * @log: Boolean to allow kernel error logging. Normally true, but when 1630 * probing for feature compatibility this should be passed in as false to 1631 * suppress unnecessary error logging. 1632 * 1633 * This parses a series of Netlink attributes that form a flow key, which must 1634 * take the same form accepted by flow_from_nlattrs(), but only enough of it to 1635 * get the metadata, that is, the parts of the flow key that cannot be 1636 * extracted from the packet itself. 1637 * 1638 * This must be called before the packet key fields are filled in 'key'. 1639 */ 1640 1641 int ovs_nla_get_flow_metadata(struct net *net, 1642 const struct nlattr *a[OVS_KEY_ATTR_MAX + 1], 1643 u64 attrs, struct sw_flow_key *key, bool log) 1644 { 1645 struct sw_flow_match match; 1646 1647 memset(&match, 0, sizeof(match)); 1648 match.key = key; 1649 1650 key->ct_state = 0; 1651 key->ct_zone = 0; 1652 key->ct_orig_proto = 0; 1653 memset(&key->ct, 0, sizeof(key->ct)); 1654 memset(&key->ipv4.ct_orig, 0, sizeof(key->ipv4.ct_orig)); 1655 memset(&key->ipv6.ct_orig, 0, sizeof(key->ipv6.ct_orig)); 1656 1657 key->phy.in_port = DP_MAX_PORTS; 1658 1659 return metadata_from_nlattrs(net, &match, &attrs, a, false, log); 1660 } 1661 1662 static int ovs_nla_put_vlan(struct sk_buff *skb, const struct vlan_head *vh, 1663 bool is_mask) 1664 { 1665 __be16 eth_type = !is_mask ? vh->tpid : htons(0xffff); 1666 1667 if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, eth_type) || 1668 nla_put_be16(skb, OVS_KEY_ATTR_VLAN, vh->tci)) 1669 return -EMSGSIZE; 1670 return 0; 1671 } 1672 1673 static int __ovs_nla_put_key(const struct sw_flow_key *swkey, 1674 const struct sw_flow_key *output, bool is_mask, 1675 struct sk_buff *skb) 1676 { 1677 struct ovs_key_ethernet *eth_key; 1678 struct nlattr *nla; 1679 struct nlattr *encap = NULL; 1680 struct nlattr *in_encap = NULL; 1681 1682 if (nla_put_u32(skb, OVS_KEY_ATTR_RECIRC_ID, output->recirc_id)) 1683 goto nla_put_failure; 1684 1685 if (nla_put_u32(skb, OVS_KEY_ATTR_DP_HASH, output->ovs_flow_hash)) 1686 goto nla_put_failure; 1687 1688 if (nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, output->phy.priority)) 1689 goto nla_put_failure; 1690 1691 if ((swkey->tun_proto || is_mask)) { 1692 const void *opts = NULL; 1693 1694 if (output->tun_key.tun_flags & TUNNEL_OPTIONS_PRESENT) 1695 opts = TUN_METADATA_OPTS(output, swkey->tun_opts_len); 1696 1697 if (ip_tun_to_nlattr(skb, &output->tun_key, opts, 1698 swkey->tun_opts_len, swkey->tun_proto)) 1699 goto nla_put_failure; 1700 } 1701 1702 if (swkey->phy.in_port == DP_MAX_PORTS) { 1703 if (is_mask && (output->phy.in_port == 0xffff)) 1704 if (nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, 0xffffffff)) 1705 goto nla_put_failure; 1706 } else { 1707 u16 upper_u16; 1708 upper_u16 = !is_mask ? 0 : 0xffff; 1709 1710 if (nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, 1711 (upper_u16 << 16) | output->phy.in_port)) 1712 goto nla_put_failure; 1713 } 1714 1715 if (nla_put_u32(skb, OVS_KEY_ATTR_SKB_MARK, output->phy.skb_mark)) 1716 goto nla_put_failure; 1717 1718 if (ovs_ct_put_key(swkey, output, skb)) 1719 goto nla_put_failure; 1720 1721 if (ovs_key_mac_proto(swkey) == MAC_PROTO_ETHERNET) { 1722 nla = nla_reserve(skb, OVS_KEY_ATTR_ETHERNET, sizeof(*eth_key)); 1723 if (!nla) 1724 goto nla_put_failure; 1725 1726 eth_key = nla_data(nla); 1727 ether_addr_copy(eth_key->eth_src, output->eth.src); 1728 ether_addr_copy(eth_key->eth_dst, output->eth.dst); 1729 1730 if (swkey->eth.vlan.tci || eth_type_vlan(swkey->eth.type)) { 1731 if (ovs_nla_put_vlan(skb, &output->eth.vlan, is_mask)) 1732 goto nla_put_failure; 1733 encap = nla_nest_start(skb, OVS_KEY_ATTR_ENCAP); 1734 if (!swkey->eth.vlan.tci) 1735 goto unencap; 1736 1737 if (swkey->eth.cvlan.tci || eth_type_vlan(swkey->eth.type)) { 1738 if (ovs_nla_put_vlan(skb, &output->eth.cvlan, is_mask)) 1739 goto nla_put_failure; 1740 in_encap = nla_nest_start(skb, OVS_KEY_ATTR_ENCAP); 1741 if (!swkey->eth.cvlan.tci) 1742 goto unencap; 1743 } 1744 } 1745 1746 if (swkey->eth.type == htons(ETH_P_802_2)) { 1747 /* 1748 * Ethertype 802.2 is represented in the netlink with omitted 1749 * OVS_KEY_ATTR_ETHERTYPE in the flow key attribute, and 1750 * 0xffff in the mask attribute. Ethertype can also 1751 * be wildcarded. 1752 */ 1753 if (is_mask && output->eth.type) 1754 if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, 1755 output->eth.type)) 1756 goto nla_put_failure; 1757 goto unencap; 1758 } 1759 } 1760 1761 if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, output->eth.type)) 1762 goto nla_put_failure; 1763 1764 if (eth_type_vlan(swkey->eth.type)) { 1765 /* There are 3 VLAN tags, we don't know anything about the rest 1766 * of the packet, so truncate here. 1767 */ 1768 WARN_ON_ONCE(!(encap && in_encap)); 1769 goto unencap; 1770 } 1771 1772 if (swkey->eth.type == htons(ETH_P_IP)) { 1773 struct ovs_key_ipv4 *ipv4_key; 1774 1775 nla = nla_reserve(skb, OVS_KEY_ATTR_IPV4, sizeof(*ipv4_key)); 1776 if (!nla) 1777 goto nla_put_failure; 1778 ipv4_key = nla_data(nla); 1779 ipv4_key->ipv4_src = output->ipv4.addr.src; 1780 ipv4_key->ipv4_dst = output->ipv4.addr.dst; 1781 ipv4_key->ipv4_proto = output->ip.proto; 1782 ipv4_key->ipv4_tos = output->ip.tos; 1783 ipv4_key->ipv4_ttl = output->ip.ttl; 1784 ipv4_key->ipv4_frag = output->ip.frag; 1785 } else if (swkey->eth.type == htons(ETH_P_IPV6)) { 1786 struct ovs_key_ipv6 *ipv6_key; 1787 1788 nla = nla_reserve(skb, OVS_KEY_ATTR_IPV6, sizeof(*ipv6_key)); 1789 if (!nla) 1790 goto nla_put_failure; 1791 ipv6_key = nla_data(nla); 1792 memcpy(ipv6_key->ipv6_src, &output->ipv6.addr.src, 1793 sizeof(ipv6_key->ipv6_src)); 1794 memcpy(ipv6_key->ipv6_dst, &output->ipv6.addr.dst, 1795 sizeof(ipv6_key->ipv6_dst)); 1796 ipv6_key->ipv6_label = output->ipv6.label; 1797 ipv6_key->ipv6_proto = output->ip.proto; 1798 ipv6_key->ipv6_tclass = output->ip.tos; 1799 ipv6_key->ipv6_hlimit = output->ip.ttl; 1800 ipv6_key->ipv6_frag = output->ip.frag; 1801 } else if (swkey->eth.type == htons(ETH_P_ARP) || 1802 swkey->eth.type == htons(ETH_P_RARP)) { 1803 struct ovs_key_arp *arp_key; 1804 1805 nla = nla_reserve(skb, OVS_KEY_ATTR_ARP, sizeof(*arp_key)); 1806 if (!nla) 1807 goto nla_put_failure; 1808 arp_key = nla_data(nla); 1809 memset(arp_key, 0, sizeof(struct ovs_key_arp)); 1810 arp_key->arp_sip = output->ipv4.addr.src; 1811 arp_key->arp_tip = output->ipv4.addr.dst; 1812 arp_key->arp_op = htons(output->ip.proto); 1813 ether_addr_copy(arp_key->arp_sha, output->ipv4.arp.sha); 1814 ether_addr_copy(arp_key->arp_tha, output->ipv4.arp.tha); 1815 } else if (eth_p_mpls(swkey->eth.type)) { 1816 struct ovs_key_mpls *mpls_key; 1817 1818 nla = nla_reserve(skb, OVS_KEY_ATTR_MPLS, sizeof(*mpls_key)); 1819 if (!nla) 1820 goto nla_put_failure; 1821 mpls_key = nla_data(nla); 1822 mpls_key->mpls_lse = output->mpls.top_lse; 1823 } 1824 1825 if ((swkey->eth.type == htons(ETH_P_IP) || 1826 swkey->eth.type == htons(ETH_P_IPV6)) && 1827 swkey->ip.frag != OVS_FRAG_TYPE_LATER) { 1828 1829 if (swkey->ip.proto == IPPROTO_TCP) { 1830 struct ovs_key_tcp *tcp_key; 1831 1832 nla = nla_reserve(skb, OVS_KEY_ATTR_TCP, sizeof(*tcp_key)); 1833 if (!nla) 1834 goto nla_put_failure; 1835 tcp_key = nla_data(nla); 1836 tcp_key->tcp_src = output->tp.src; 1837 tcp_key->tcp_dst = output->tp.dst; 1838 if (nla_put_be16(skb, OVS_KEY_ATTR_TCP_FLAGS, 1839 output->tp.flags)) 1840 goto nla_put_failure; 1841 } else if (swkey->ip.proto == IPPROTO_UDP) { 1842 struct ovs_key_udp *udp_key; 1843 1844 nla = nla_reserve(skb, OVS_KEY_ATTR_UDP, sizeof(*udp_key)); 1845 if (!nla) 1846 goto nla_put_failure; 1847 udp_key = nla_data(nla); 1848 udp_key->udp_src = output->tp.src; 1849 udp_key->udp_dst = output->tp.dst; 1850 } else if (swkey->ip.proto == IPPROTO_SCTP) { 1851 struct ovs_key_sctp *sctp_key; 1852 1853 nla = nla_reserve(skb, OVS_KEY_ATTR_SCTP, sizeof(*sctp_key)); 1854 if (!nla) 1855 goto nla_put_failure; 1856 sctp_key = nla_data(nla); 1857 sctp_key->sctp_src = output->tp.src; 1858 sctp_key->sctp_dst = output->tp.dst; 1859 } else if (swkey->eth.type == htons(ETH_P_IP) && 1860 swkey->ip.proto == IPPROTO_ICMP) { 1861 struct ovs_key_icmp *icmp_key; 1862 1863 nla = nla_reserve(skb, OVS_KEY_ATTR_ICMP, sizeof(*icmp_key)); 1864 if (!nla) 1865 goto nla_put_failure; 1866 icmp_key = nla_data(nla); 1867 icmp_key->icmp_type = ntohs(output->tp.src); 1868 icmp_key->icmp_code = ntohs(output->tp.dst); 1869 } else if (swkey->eth.type == htons(ETH_P_IPV6) && 1870 swkey->ip.proto == IPPROTO_ICMPV6) { 1871 struct ovs_key_icmpv6 *icmpv6_key; 1872 1873 nla = nla_reserve(skb, OVS_KEY_ATTR_ICMPV6, 1874 sizeof(*icmpv6_key)); 1875 if (!nla) 1876 goto nla_put_failure; 1877 icmpv6_key = nla_data(nla); 1878 icmpv6_key->icmpv6_type = ntohs(output->tp.src); 1879 icmpv6_key->icmpv6_code = ntohs(output->tp.dst); 1880 1881 if (icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_SOLICITATION || 1882 icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_ADVERTISEMENT) { 1883 struct ovs_key_nd *nd_key; 1884 1885 nla = nla_reserve(skb, OVS_KEY_ATTR_ND, sizeof(*nd_key)); 1886 if (!nla) 1887 goto nla_put_failure; 1888 nd_key = nla_data(nla); 1889 memcpy(nd_key->nd_target, &output->ipv6.nd.target, 1890 sizeof(nd_key->nd_target)); 1891 ether_addr_copy(nd_key->nd_sll, output->ipv6.nd.sll); 1892 ether_addr_copy(nd_key->nd_tll, output->ipv6.nd.tll); 1893 } 1894 } 1895 } 1896 1897 unencap: 1898 if (in_encap) 1899 nla_nest_end(skb, in_encap); 1900 if (encap) 1901 nla_nest_end(skb, encap); 1902 1903 return 0; 1904 1905 nla_put_failure: 1906 return -EMSGSIZE; 1907 } 1908 1909 int ovs_nla_put_key(const struct sw_flow_key *swkey, 1910 const struct sw_flow_key *output, int attr, bool is_mask, 1911 struct sk_buff *skb) 1912 { 1913 int err; 1914 struct nlattr *nla; 1915 1916 nla = nla_nest_start(skb, attr); 1917 if (!nla) 1918 return -EMSGSIZE; 1919 err = __ovs_nla_put_key(swkey, output, is_mask, skb); 1920 if (err) 1921 return err; 1922 nla_nest_end(skb, nla); 1923 1924 return 0; 1925 } 1926 1927 /* Called with ovs_mutex or RCU read lock. */ 1928 int ovs_nla_put_identifier(const struct sw_flow *flow, struct sk_buff *skb) 1929 { 1930 if (ovs_identifier_is_ufid(&flow->id)) 1931 return nla_put(skb, OVS_FLOW_ATTR_UFID, flow->id.ufid_len, 1932 flow->id.ufid); 1933 1934 return ovs_nla_put_key(flow->id.unmasked_key, flow->id.unmasked_key, 1935 OVS_FLOW_ATTR_KEY, false, skb); 1936 } 1937 1938 /* Called with ovs_mutex or RCU read lock. */ 1939 int ovs_nla_put_masked_key(const struct sw_flow *flow, struct sk_buff *skb) 1940 { 1941 return ovs_nla_put_key(&flow->key, &flow->key, 1942 OVS_FLOW_ATTR_KEY, false, skb); 1943 } 1944 1945 /* Called with ovs_mutex or RCU read lock. */ 1946 int ovs_nla_put_mask(const struct sw_flow *flow, struct sk_buff *skb) 1947 { 1948 return ovs_nla_put_key(&flow->key, &flow->mask->key, 1949 OVS_FLOW_ATTR_MASK, true, skb); 1950 } 1951 1952 #define MAX_ACTIONS_BUFSIZE (32 * 1024) 1953 1954 static struct sw_flow_actions *nla_alloc_flow_actions(int size, bool log) 1955 { 1956 struct sw_flow_actions *sfa; 1957 1958 if (size > MAX_ACTIONS_BUFSIZE) { 1959 OVS_NLERR(log, "Flow action size %u bytes exceeds max", size); 1960 return ERR_PTR(-EINVAL); 1961 } 1962 1963 sfa = kmalloc(sizeof(*sfa) + size, GFP_KERNEL); 1964 if (!sfa) 1965 return ERR_PTR(-ENOMEM); 1966 1967 sfa->actions_len = 0; 1968 return sfa; 1969 } 1970 1971 static void ovs_nla_free_set_action(const struct nlattr *a) 1972 { 1973 const struct nlattr *ovs_key = nla_data(a); 1974 struct ovs_tunnel_info *ovs_tun; 1975 1976 switch (nla_type(ovs_key)) { 1977 case OVS_KEY_ATTR_TUNNEL_INFO: 1978 ovs_tun = nla_data(ovs_key); 1979 dst_release((struct dst_entry *)ovs_tun->tun_dst); 1980 break; 1981 } 1982 } 1983 1984 void ovs_nla_free_flow_actions(struct sw_flow_actions *sf_acts) 1985 { 1986 const struct nlattr *a; 1987 int rem; 1988 1989 if (!sf_acts) 1990 return; 1991 1992 nla_for_each_attr(a, sf_acts->actions, sf_acts->actions_len, rem) { 1993 switch (nla_type(a)) { 1994 case OVS_ACTION_ATTR_SET: 1995 ovs_nla_free_set_action(a); 1996 break; 1997 case OVS_ACTION_ATTR_CT: 1998 ovs_ct_free_action(a); 1999 break; 2000 } 2001 } 2002 2003 kfree(sf_acts); 2004 } 2005 2006 static void __ovs_nla_free_flow_actions(struct rcu_head *head) 2007 { 2008 ovs_nla_free_flow_actions(container_of(head, struct sw_flow_actions, rcu)); 2009 } 2010 2011 /* Schedules 'sf_acts' to be freed after the next RCU grace period. 2012 * The caller must hold rcu_read_lock for this to be sensible. */ 2013 void ovs_nla_free_flow_actions_rcu(struct sw_flow_actions *sf_acts) 2014 { 2015 call_rcu(&sf_acts->rcu, __ovs_nla_free_flow_actions); 2016 } 2017 2018 static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa, 2019 int attr_len, bool log) 2020 { 2021 2022 struct sw_flow_actions *acts; 2023 int new_acts_size; 2024 int req_size = NLA_ALIGN(attr_len); 2025 int next_offset = offsetof(struct sw_flow_actions, actions) + 2026 (*sfa)->actions_len; 2027 2028 if (req_size <= (ksize(*sfa) - next_offset)) 2029 goto out; 2030 2031 new_acts_size = ksize(*sfa) * 2; 2032 2033 if (new_acts_size > MAX_ACTIONS_BUFSIZE) { 2034 if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) 2035 return ERR_PTR(-EMSGSIZE); 2036 new_acts_size = MAX_ACTIONS_BUFSIZE; 2037 } 2038 2039 acts = nla_alloc_flow_actions(new_acts_size, log); 2040 if (IS_ERR(acts)) 2041 return (void *)acts; 2042 2043 memcpy(acts->actions, (*sfa)->actions, (*sfa)->actions_len); 2044 acts->actions_len = (*sfa)->actions_len; 2045 acts->orig_len = (*sfa)->orig_len; 2046 kfree(*sfa); 2047 *sfa = acts; 2048 2049 out: 2050 (*sfa)->actions_len += req_size; 2051 return (struct nlattr *) ((unsigned char *)(*sfa) + next_offset); 2052 } 2053 2054 static struct nlattr *__add_action(struct sw_flow_actions **sfa, 2055 int attrtype, void *data, int len, bool log) 2056 { 2057 struct nlattr *a; 2058 2059 a = reserve_sfa_size(sfa, nla_attr_size(len), log); 2060 if (IS_ERR(a)) 2061 return a; 2062 2063 a->nla_type = attrtype; 2064 a->nla_len = nla_attr_size(len); 2065 2066 if (data) 2067 memcpy(nla_data(a), data, len); 2068 memset((unsigned char *) a + a->nla_len, 0, nla_padlen(len)); 2069 2070 return a; 2071 } 2072 2073 int ovs_nla_add_action(struct sw_flow_actions **sfa, int attrtype, void *data, 2074 int len, bool log) 2075 { 2076 struct nlattr *a; 2077 2078 a = __add_action(sfa, attrtype, data, len, log); 2079 2080 return PTR_ERR_OR_ZERO(a); 2081 } 2082 2083 static inline int add_nested_action_start(struct sw_flow_actions **sfa, 2084 int attrtype, bool log) 2085 { 2086 int used = (*sfa)->actions_len; 2087 int err; 2088 2089 err = ovs_nla_add_action(sfa, attrtype, NULL, 0, log); 2090 if (err) 2091 return err; 2092 2093 return used; 2094 } 2095 2096 static inline void add_nested_action_end(struct sw_flow_actions *sfa, 2097 int st_offset) 2098 { 2099 struct nlattr *a = (struct nlattr *) ((unsigned char *)sfa->actions + 2100 st_offset); 2101 2102 a->nla_len = sfa->actions_len - st_offset; 2103 } 2104 2105 static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr, 2106 const struct sw_flow_key *key, 2107 struct sw_flow_actions **sfa, 2108 __be16 eth_type, __be16 vlan_tci, bool log); 2109 2110 static int validate_and_copy_sample(struct net *net, const struct nlattr *attr, 2111 const struct sw_flow_key *key, 2112 struct sw_flow_actions **sfa, 2113 __be16 eth_type, __be16 vlan_tci, 2114 bool log, bool last) 2115 { 2116 const struct nlattr *attrs[OVS_SAMPLE_ATTR_MAX + 1]; 2117 const struct nlattr *probability, *actions; 2118 const struct nlattr *a; 2119 int rem, start, err; 2120 struct sample_arg arg; 2121 2122 memset(attrs, 0, sizeof(attrs)); 2123 nla_for_each_nested(a, attr, rem) { 2124 int type = nla_type(a); 2125 if (!type || type > OVS_SAMPLE_ATTR_MAX || attrs[type]) 2126 return -EINVAL; 2127 attrs[type] = a; 2128 } 2129 if (rem) 2130 return -EINVAL; 2131 2132 probability = attrs[OVS_SAMPLE_ATTR_PROBABILITY]; 2133 if (!probability || nla_len(probability) != sizeof(u32)) 2134 return -EINVAL; 2135 2136 actions = attrs[OVS_SAMPLE_ATTR_ACTIONS]; 2137 if (!actions || (nla_len(actions) && nla_len(actions) < NLA_HDRLEN)) 2138 return -EINVAL; 2139 2140 /* validation done, copy sample action. */ 2141 start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SAMPLE, log); 2142 if (start < 0) 2143 return start; 2144 2145 /* When both skb and flow may be changed, put the sample 2146 * into a deferred fifo. On the other hand, if only skb 2147 * may be modified, the actions can be executed in place. 2148 * 2149 * Do this analysis at the flow installation time. 2150 * Set 'clone_action->exec' to true if the actions can be 2151 * executed without being deferred. 2152 * 2153 * If the sample is the last action, it can always be excuted 2154 * rather than deferred. 2155 */ 2156 arg.exec = last || !actions_may_change_flow(actions); 2157 arg.probability = nla_get_u32(probability); 2158 2159 err = ovs_nla_add_action(sfa, OVS_SAMPLE_ATTR_ARG, &arg, sizeof(arg), 2160 log); 2161 if (err) 2162 return err; 2163 2164 err = __ovs_nla_copy_actions(net, actions, key, sfa, 2165 eth_type, vlan_tci, log); 2166 2167 if (err) 2168 return err; 2169 2170 add_nested_action_end(*sfa, start); 2171 2172 return 0; 2173 } 2174 2175 void ovs_match_init(struct sw_flow_match *match, 2176 struct sw_flow_key *key, 2177 bool reset_key, 2178 struct sw_flow_mask *mask) 2179 { 2180 memset(match, 0, sizeof(*match)); 2181 match->key = key; 2182 match->mask = mask; 2183 2184 if (reset_key) 2185 memset(key, 0, sizeof(*key)); 2186 2187 if (mask) { 2188 memset(&mask->key, 0, sizeof(mask->key)); 2189 mask->range.start = mask->range.end = 0; 2190 } 2191 } 2192 2193 static int validate_geneve_opts(struct sw_flow_key *key) 2194 { 2195 struct geneve_opt *option; 2196 int opts_len = key->tun_opts_len; 2197 bool crit_opt = false; 2198 2199 option = (struct geneve_opt *)TUN_METADATA_OPTS(key, key->tun_opts_len); 2200 while (opts_len > 0) { 2201 int len; 2202 2203 if (opts_len < sizeof(*option)) 2204 return -EINVAL; 2205 2206 len = sizeof(*option) + option->length * 4; 2207 if (len > opts_len) 2208 return -EINVAL; 2209 2210 crit_opt |= !!(option->type & GENEVE_CRIT_OPT_TYPE); 2211 2212 option = (struct geneve_opt *)((u8 *)option + len); 2213 opts_len -= len; 2214 }; 2215 2216 key->tun_key.tun_flags |= crit_opt ? TUNNEL_CRIT_OPT : 0; 2217 2218 return 0; 2219 } 2220 2221 static int validate_and_copy_set_tun(const struct nlattr *attr, 2222 struct sw_flow_actions **sfa, bool log) 2223 { 2224 struct sw_flow_match match; 2225 struct sw_flow_key key; 2226 struct metadata_dst *tun_dst; 2227 struct ip_tunnel_info *tun_info; 2228 struct ovs_tunnel_info *ovs_tun; 2229 struct nlattr *a; 2230 int err = 0, start, opts_type; 2231 2232 ovs_match_init(&match, &key, true, NULL); 2233 opts_type = ip_tun_from_nlattr(nla_data(attr), &match, false, log); 2234 if (opts_type < 0) 2235 return opts_type; 2236 2237 if (key.tun_opts_len) { 2238 switch (opts_type) { 2239 case OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS: 2240 err = validate_geneve_opts(&key); 2241 if (err < 0) 2242 return err; 2243 break; 2244 case OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS: 2245 break; 2246 case OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS: 2247 break; 2248 } 2249 }; 2250 2251 start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SET, log); 2252 if (start < 0) 2253 return start; 2254 2255 tun_dst = metadata_dst_alloc(key.tun_opts_len, METADATA_IP_TUNNEL, 2256 GFP_KERNEL); 2257 2258 if (!tun_dst) 2259 return -ENOMEM; 2260 2261 err = dst_cache_init(&tun_dst->u.tun_info.dst_cache, GFP_KERNEL); 2262 if (err) { 2263 dst_release((struct dst_entry *)tun_dst); 2264 return err; 2265 } 2266 2267 a = __add_action(sfa, OVS_KEY_ATTR_TUNNEL_INFO, NULL, 2268 sizeof(*ovs_tun), log); 2269 if (IS_ERR(a)) { 2270 dst_release((struct dst_entry *)tun_dst); 2271 return PTR_ERR(a); 2272 } 2273 2274 ovs_tun = nla_data(a); 2275 ovs_tun->tun_dst = tun_dst; 2276 2277 tun_info = &tun_dst->u.tun_info; 2278 tun_info->mode = IP_TUNNEL_INFO_TX; 2279 if (key.tun_proto == AF_INET6) 2280 tun_info->mode |= IP_TUNNEL_INFO_IPV6; 2281 tun_info->key = key.tun_key; 2282 2283 /* We need to store the options in the action itself since 2284 * everything else will go away after flow setup. We can append 2285 * it to tun_info and then point there. 2286 */ 2287 ip_tunnel_info_opts_set(tun_info, 2288 TUN_METADATA_OPTS(&key, key.tun_opts_len), 2289 key.tun_opts_len); 2290 add_nested_action_end(*sfa, start); 2291 2292 return err; 2293 } 2294 2295 /* Return false if there are any non-masked bits set. 2296 * Mask follows data immediately, before any netlink padding. 2297 */ 2298 static bool validate_masked(u8 *data, int len) 2299 { 2300 u8 *mask = data + len; 2301 2302 while (len--) 2303 if (*data++ & ~*mask++) 2304 return false; 2305 2306 return true; 2307 } 2308 2309 static int validate_set(const struct nlattr *a, 2310 const struct sw_flow_key *flow_key, 2311 struct sw_flow_actions **sfa, bool *skip_copy, 2312 u8 mac_proto, __be16 eth_type, bool masked, bool log) 2313 { 2314 const struct nlattr *ovs_key = nla_data(a); 2315 int key_type = nla_type(ovs_key); 2316 size_t key_len; 2317 2318 /* There can be only one key in a action */ 2319 if (nla_total_size(nla_len(ovs_key)) != nla_len(a)) 2320 return -EINVAL; 2321 2322 key_len = nla_len(ovs_key); 2323 if (masked) 2324 key_len /= 2; 2325 2326 if (key_type > OVS_KEY_ATTR_MAX || 2327 !check_attr_len(key_len, ovs_key_lens[key_type].len)) 2328 return -EINVAL; 2329 2330 if (masked && !validate_masked(nla_data(ovs_key), key_len)) 2331 return -EINVAL; 2332 2333 switch (key_type) { 2334 const struct ovs_key_ipv4 *ipv4_key; 2335 const struct ovs_key_ipv6 *ipv6_key; 2336 int err; 2337 2338 case OVS_KEY_ATTR_PRIORITY: 2339 case OVS_KEY_ATTR_SKB_MARK: 2340 case OVS_KEY_ATTR_CT_MARK: 2341 case OVS_KEY_ATTR_CT_LABELS: 2342 break; 2343 2344 case OVS_KEY_ATTR_ETHERNET: 2345 if (mac_proto != MAC_PROTO_ETHERNET) 2346 return -EINVAL; 2347 break; 2348 2349 case OVS_KEY_ATTR_TUNNEL: 2350 if (masked) 2351 return -EINVAL; /* Masked tunnel set not supported. */ 2352 2353 *skip_copy = true; 2354 err = validate_and_copy_set_tun(a, sfa, log); 2355 if (err) 2356 return err; 2357 break; 2358 2359 case OVS_KEY_ATTR_IPV4: 2360 if (eth_type != htons(ETH_P_IP)) 2361 return -EINVAL; 2362 2363 ipv4_key = nla_data(ovs_key); 2364 2365 if (masked) { 2366 const struct ovs_key_ipv4 *mask = ipv4_key + 1; 2367 2368 /* Non-writeable fields. */ 2369 if (mask->ipv4_proto || mask->ipv4_frag) 2370 return -EINVAL; 2371 } else { 2372 if (ipv4_key->ipv4_proto != flow_key->ip.proto) 2373 return -EINVAL; 2374 2375 if (ipv4_key->ipv4_frag != flow_key->ip.frag) 2376 return -EINVAL; 2377 } 2378 break; 2379 2380 case OVS_KEY_ATTR_IPV6: 2381 if (eth_type != htons(ETH_P_IPV6)) 2382 return -EINVAL; 2383 2384 ipv6_key = nla_data(ovs_key); 2385 2386 if (masked) { 2387 const struct ovs_key_ipv6 *mask = ipv6_key + 1; 2388 2389 /* Non-writeable fields. */ 2390 if (mask->ipv6_proto || mask->ipv6_frag) 2391 return -EINVAL; 2392 2393 /* Invalid bits in the flow label mask? */ 2394 if (ntohl(mask->ipv6_label) & 0xFFF00000) 2395 return -EINVAL; 2396 } else { 2397 if (ipv6_key->ipv6_proto != flow_key->ip.proto) 2398 return -EINVAL; 2399 2400 if (ipv6_key->ipv6_frag != flow_key->ip.frag) 2401 return -EINVAL; 2402 } 2403 if (ntohl(ipv6_key->ipv6_label) & 0xFFF00000) 2404 return -EINVAL; 2405 2406 break; 2407 2408 case OVS_KEY_ATTR_TCP: 2409 if ((eth_type != htons(ETH_P_IP) && 2410 eth_type != htons(ETH_P_IPV6)) || 2411 flow_key->ip.proto != IPPROTO_TCP) 2412 return -EINVAL; 2413 2414 break; 2415 2416 case OVS_KEY_ATTR_UDP: 2417 if ((eth_type != htons(ETH_P_IP) && 2418 eth_type != htons(ETH_P_IPV6)) || 2419 flow_key->ip.proto != IPPROTO_UDP) 2420 return -EINVAL; 2421 2422 break; 2423 2424 case OVS_KEY_ATTR_MPLS: 2425 if (!eth_p_mpls(eth_type)) 2426 return -EINVAL; 2427 break; 2428 2429 case OVS_KEY_ATTR_SCTP: 2430 if ((eth_type != htons(ETH_P_IP) && 2431 eth_type != htons(ETH_P_IPV6)) || 2432 flow_key->ip.proto != IPPROTO_SCTP) 2433 return -EINVAL; 2434 2435 break; 2436 2437 default: 2438 return -EINVAL; 2439 } 2440 2441 /* Convert non-masked non-tunnel set actions to masked set actions. */ 2442 if (!masked && key_type != OVS_KEY_ATTR_TUNNEL) { 2443 int start, len = key_len * 2; 2444 struct nlattr *at; 2445 2446 *skip_copy = true; 2447 2448 start = add_nested_action_start(sfa, 2449 OVS_ACTION_ATTR_SET_TO_MASKED, 2450 log); 2451 if (start < 0) 2452 return start; 2453 2454 at = __add_action(sfa, key_type, NULL, len, log); 2455 if (IS_ERR(at)) 2456 return PTR_ERR(at); 2457 2458 memcpy(nla_data(at), nla_data(ovs_key), key_len); /* Key. */ 2459 memset(nla_data(at) + key_len, 0xff, key_len); /* Mask. */ 2460 /* Clear non-writeable bits from otherwise writeable fields. */ 2461 if (key_type == OVS_KEY_ATTR_IPV6) { 2462 struct ovs_key_ipv6 *mask = nla_data(at) + key_len; 2463 2464 mask->ipv6_label &= htonl(0x000FFFFF); 2465 } 2466 add_nested_action_end(*sfa, start); 2467 } 2468 2469 return 0; 2470 } 2471 2472 static int validate_userspace(const struct nlattr *attr) 2473 { 2474 static const struct nla_policy userspace_policy[OVS_USERSPACE_ATTR_MAX + 1] = { 2475 [OVS_USERSPACE_ATTR_PID] = {.type = NLA_U32 }, 2476 [OVS_USERSPACE_ATTR_USERDATA] = {.type = NLA_UNSPEC }, 2477 [OVS_USERSPACE_ATTR_EGRESS_TUN_PORT] = {.type = NLA_U32 }, 2478 }; 2479 struct nlattr *a[OVS_USERSPACE_ATTR_MAX + 1]; 2480 int error; 2481 2482 error = nla_parse_nested(a, OVS_USERSPACE_ATTR_MAX, attr, 2483 userspace_policy, NULL); 2484 if (error) 2485 return error; 2486 2487 if (!a[OVS_USERSPACE_ATTR_PID] || 2488 !nla_get_u32(a[OVS_USERSPACE_ATTR_PID])) 2489 return -EINVAL; 2490 2491 return 0; 2492 } 2493 2494 static int copy_action(const struct nlattr *from, 2495 struct sw_flow_actions **sfa, bool log) 2496 { 2497 int totlen = NLA_ALIGN(from->nla_len); 2498 struct nlattr *to; 2499 2500 to = reserve_sfa_size(sfa, from->nla_len, log); 2501 if (IS_ERR(to)) 2502 return PTR_ERR(to); 2503 2504 memcpy(to, from, totlen); 2505 return 0; 2506 } 2507 2508 static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr, 2509 const struct sw_flow_key *key, 2510 struct sw_flow_actions **sfa, 2511 __be16 eth_type, __be16 vlan_tci, bool log) 2512 { 2513 u8 mac_proto = ovs_key_mac_proto(key); 2514 const struct nlattr *a; 2515 int rem, err; 2516 2517 nla_for_each_nested(a, attr, rem) { 2518 /* Expected argument lengths, (u32)-1 for variable length. */ 2519 static const u32 action_lens[OVS_ACTION_ATTR_MAX + 1] = { 2520 [OVS_ACTION_ATTR_OUTPUT] = sizeof(u32), 2521 [OVS_ACTION_ATTR_RECIRC] = sizeof(u32), 2522 [OVS_ACTION_ATTR_USERSPACE] = (u32)-1, 2523 [OVS_ACTION_ATTR_PUSH_MPLS] = sizeof(struct ovs_action_push_mpls), 2524 [OVS_ACTION_ATTR_POP_MPLS] = sizeof(__be16), 2525 [OVS_ACTION_ATTR_PUSH_VLAN] = sizeof(struct ovs_action_push_vlan), 2526 [OVS_ACTION_ATTR_POP_VLAN] = 0, 2527 [OVS_ACTION_ATTR_SET] = (u32)-1, 2528 [OVS_ACTION_ATTR_SET_MASKED] = (u32)-1, 2529 [OVS_ACTION_ATTR_SAMPLE] = (u32)-1, 2530 [OVS_ACTION_ATTR_HASH] = sizeof(struct ovs_action_hash), 2531 [OVS_ACTION_ATTR_CT] = (u32)-1, 2532 [OVS_ACTION_ATTR_CT_CLEAR] = 0, 2533 [OVS_ACTION_ATTR_TRUNC] = sizeof(struct ovs_action_trunc), 2534 [OVS_ACTION_ATTR_PUSH_ETH] = sizeof(struct ovs_action_push_eth), 2535 [OVS_ACTION_ATTR_POP_ETH] = 0, 2536 }; 2537 const struct ovs_action_push_vlan *vlan; 2538 int type = nla_type(a); 2539 bool skip_copy; 2540 2541 if (type > OVS_ACTION_ATTR_MAX || 2542 (action_lens[type] != nla_len(a) && 2543 action_lens[type] != (u32)-1)) 2544 return -EINVAL; 2545 2546 skip_copy = false; 2547 switch (type) { 2548 case OVS_ACTION_ATTR_UNSPEC: 2549 return -EINVAL; 2550 2551 case OVS_ACTION_ATTR_USERSPACE: 2552 err = validate_userspace(a); 2553 if (err) 2554 return err; 2555 break; 2556 2557 case OVS_ACTION_ATTR_OUTPUT: 2558 if (nla_get_u32(a) >= DP_MAX_PORTS) 2559 return -EINVAL; 2560 break; 2561 2562 case OVS_ACTION_ATTR_TRUNC: { 2563 const struct ovs_action_trunc *trunc = nla_data(a); 2564 2565 if (trunc->max_len < ETH_HLEN) 2566 return -EINVAL; 2567 break; 2568 } 2569 2570 case OVS_ACTION_ATTR_HASH: { 2571 const struct ovs_action_hash *act_hash = nla_data(a); 2572 2573 switch (act_hash->hash_alg) { 2574 case OVS_HASH_ALG_L4: 2575 break; 2576 default: 2577 return -EINVAL; 2578 } 2579 2580 break; 2581 } 2582 2583 case OVS_ACTION_ATTR_POP_VLAN: 2584 if (mac_proto != MAC_PROTO_ETHERNET) 2585 return -EINVAL; 2586 vlan_tci = htons(0); 2587 break; 2588 2589 case OVS_ACTION_ATTR_PUSH_VLAN: 2590 if (mac_proto != MAC_PROTO_ETHERNET) 2591 return -EINVAL; 2592 vlan = nla_data(a); 2593 if (!eth_type_vlan(vlan->vlan_tpid)) 2594 return -EINVAL; 2595 if (!(vlan->vlan_tci & htons(VLAN_TAG_PRESENT))) 2596 return -EINVAL; 2597 vlan_tci = vlan->vlan_tci; 2598 break; 2599 2600 case OVS_ACTION_ATTR_RECIRC: 2601 break; 2602 2603 case OVS_ACTION_ATTR_PUSH_MPLS: { 2604 const struct ovs_action_push_mpls *mpls = nla_data(a); 2605 2606 if (!eth_p_mpls(mpls->mpls_ethertype)) 2607 return -EINVAL; 2608 /* Prohibit push MPLS other than to a white list 2609 * for packets that have a known tag order. 2610 */ 2611 if (vlan_tci & htons(VLAN_TAG_PRESENT) || 2612 (eth_type != htons(ETH_P_IP) && 2613 eth_type != htons(ETH_P_IPV6) && 2614 eth_type != htons(ETH_P_ARP) && 2615 eth_type != htons(ETH_P_RARP) && 2616 !eth_p_mpls(eth_type))) 2617 return -EINVAL; 2618 eth_type = mpls->mpls_ethertype; 2619 break; 2620 } 2621 2622 case OVS_ACTION_ATTR_POP_MPLS: 2623 if (vlan_tci & htons(VLAN_TAG_PRESENT) || 2624 !eth_p_mpls(eth_type)) 2625 return -EINVAL; 2626 2627 /* Disallow subsequent L2.5+ set and mpls_pop actions 2628 * as there is no check here to ensure that the new 2629 * eth_type is valid and thus set actions could 2630 * write off the end of the packet or otherwise 2631 * corrupt it. 2632 * 2633 * Support for these actions is planned using packet 2634 * recirculation. 2635 */ 2636 eth_type = htons(0); 2637 break; 2638 2639 case OVS_ACTION_ATTR_SET: 2640 err = validate_set(a, key, sfa, 2641 &skip_copy, mac_proto, eth_type, 2642 false, log); 2643 if (err) 2644 return err; 2645 break; 2646 2647 case OVS_ACTION_ATTR_SET_MASKED: 2648 err = validate_set(a, key, sfa, 2649 &skip_copy, mac_proto, eth_type, 2650 true, log); 2651 if (err) 2652 return err; 2653 break; 2654 2655 case OVS_ACTION_ATTR_SAMPLE: { 2656 bool last = nla_is_last(a, rem); 2657 2658 err = validate_and_copy_sample(net, a, key, sfa, 2659 eth_type, vlan_tci, 2660 log, last); 2661 if (err) 2662 return err; 2663 skip_copy = true; 2664 break; 2665 } 2666 2667 case OVS_ACTION_ATTR_CT: 2668 err = ovs_ct_copy_action(net, a, key, sfa, log); 2669 if (err) 2670 return err; 2671 skip_copy = true; 2672 break; 2673 2674 case OVS_ACTION_ATTR_CT_CLEAR: 2675 break; 2676 2677 case OVS_ACTION_ATTR_PUSH_ETH: 2678 /* Disallow pushing an Ethernet header if one 2679 * is already present */ 2680 if (mac_proto != MAC_PROTO_NONE) 2681 return -EINVAL; 2682 mac_proto = MAC_PROTO_NONE; 2683 break; 2684 2685 case OVS_ACTION_ATTR_POP_ETH: 2686 if (mac_proto != MAC_PROTO_ETHERNET) 2687 return -EINVAL; 2688 if (vlan_tci & htons(VLAN_TAG_PRESENT)) 2689 return -EINVAL; 2690 mac_proto = MAC_PROTO_ETHERNET; 2691 break; 2692 2693 default: 2694 OVS_NLERR(log, "Unknown Action type %d", type); 2695 return -EINVAL; 2696 } 2697 if (!skip_copy) { 2698 err = copy_action(a, sfa, log); 2699 if (err) 2700 return err; 2701 } 2702 } 2703 2704 if (rem > 0) 2705 return -EINVAL; 2706 2707 return 0; 2708 } 2709 2710 /* 'key' must be the masked key. */ 2711 int ovs_nla_copy_actions(struct net *net, const struct nlattr *attr, 2712 const struct sw_flow_key *key, 2713 struct sw_flow_actions **sfa, bool log) 2714 { 2715 int err; 2716 2717 *sfa = nla_alloc_flow_actions(nla_len(attr), log); 2718 if (IS_ERR(*sfa)) 2719 return PTR_ERR(*sfa); 2720 2721 (*sfa)->orig_len = nla_len(attr); 2722 err = __ovs_nla_copy_actions(net, attr, key, sfa, key->eth.type, 2723 key->eth.vlan.tci, log); 2724 if (err) 2725 ovs_nla_free_flow_actions(*sfa); 2726 2727 return err; 2728 } 2729 2730 static int sample_action_to_attr(const struct nlattr *attr, 2731 struct sk_buff *skb) 2732 { 2733 struct nlattr *start, *ac_start = NULL, *sample_arg; 2734 int err = 0, rem = nla_len(attr); 2735 const struct sample_arg *arg; 2736 struct nlattr *actions; 2737 2738 start = nla_nest_start(skb, OVS_ACTION_ATTR_SAMPLE); 2739 if (!start) 2740 return -EMSGSIZE; 2741 2742 sample_arg = nla_data(attr); 2743 arg = nla_data(sample_arg); 2744 actions = nla_next(sample_arg, &rem); 2745 2746 if (nla_put_u32(skb, OVS_SAMPLE_ATTR_PROBABILITY, arg->probability)) { 2747 err = -EMSGSIZE; 2748 goto out; 2749 } 2750 2751 ac_start = nla_nest_start(skb, OVS_SAMPLE_ATTR_ACTIONS); 2752 if (!ac_start) { 2753 err = -EMSGSIZE; 2754 goto out; 2755 } 2756 2757 err = ovs_nla_put_actions(actions, rem, skb); 2758 2759 out: 2760 if (err) { 2761 nla_nest_cancel(skb, ac_start); 2762 nla_nest_cancel(skb, start); 2763 } else { 2764 nla_nest_end(skb, ac_start); 2765 nla_nest_end(skb, start); 2766 } 2767 2768 return err; 2769 } 2770 2771 static int set_action_to_attr(const struct nlattr *a, struct sk_buff *skb) 2772 { 2773 const struct nlattr *ovs_key = nla_data(a); 2774 int key_type = nla_type(ovs_key); 2775 struct nlattr *start; 2776 int err; 2777 2778 switch (key_type) { 2779 case OVS_KEY_ATTR_TUNNEL_INFO: { 2780 struct ovs_tunnel_info *ovs_tun = nla_data(ovs_key); 2781 struct ip_tunnel_info *tun_info = &ovs_tun->tun_dst->u.tun_info; 2782 2783 start = nla_nest_start(skb, OVS_ACTION_ATTR_SET); 2784 if (!start) 2785 return -EMSGSIZE; 2786 2787 err = ip_tun_to_nlattr(skb, &tun_info->key, 2788 ip_tunnel_info_opts(tun_info), 2789 tun_info->options_len, 2790 ip_tunnel_info_af(tun_info)); 2791 if (err) 2792 return err; 2793 nla_nest_end(skb, start); 2794 break; 2795 } 2796 default: 2797 if (nla_put(skb, OVS_ACTION_ATTR_SET, nla_len(a), ovs_key)) 2798 return -EMSGSIZE; 2799 break; 2800 } 2801 2802 return 0; 2803 } 2804 2805 static int masked_set_action_to_set_action_attr(const struct nlattr *a, 2806 struct sk_buff *skb) 2807 { 2808 const struct nlattr *ovs_key = nla_data(a); 2809 struct nlattr *nla; 2810 size_t key_len = nla_len(ovs_key) / 2; 2811 2812 /* Revert the conversion we did from a non-masked set action to 2813 * masked set action. 2814 */ 2815 nla = nla_nest_start(skb, OVS_ACTION_ATTR_SET); 2816 if (!nla) 2817 return -EMSGSIZE; 2818 2819 if (nla_put(skb, nla_type(ovs_key), key_len, nla_data(ovs_key))) 2820 return -EMSGSIZE; 2821 2822 nla_nest_end(skb, nla); 2823 return 0; 2824 } 2825 2826 int ovs_nla_put_actions(const struct nlattr *attr, int len, struct sk_buff *skb) 2827 { 2828 const struct nlattr *a; 2829 int rem, err; 2830 2831 nla_for_each_attr(a, attr, len, rem) { 2832 int type = nla_type(a); 2833 2834 switch (type) { 2835 case OVS_ACTION_ATTR_SET: 2836 err = set_action_to_attr(a, skb); 2837 if (err) 2838 return err; 2839 break; 2840 2841 case OVS_ACTION_ATTR_SET_TO_MASKED: 2842 err = masked_set_action_to_set_action_attr(a, skb); 2843 if (err) 2844 return err; 2845 break; 2846 2847 case OVS_ACTION_ATTR_SAMPLE: 2848 err = sample_action_to_attr(a, skb); 2849 if (err) 2850 return err; 2851 break; 2852 2853 case OVS_ACTION_ATTR_CT: 2854 err = ovs_ct_action_to_attr(nla_data(a), skb); 2855 if (err) 2856 return err; 2857 break; 2858 2859 default: 2860 if (nla_put(skb, type, nla_len(a), nla_data(a))) 2861 return -EMSGSIZE; 2862 break; 2863 } 2864 } 2865 2866 return 0; 2867 } 2868