1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */ 3 4 #include <linux/bitfield.h> 5 #include <net/pkt_cls.h> 6 7 #include "cmsg.h" 8 #include "main.h" 9 10 static void 11 nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *ext, 12 struct nfp_flower_meta_tci *msk, 13 struct tc_cls_flower_offload *flow, u8 key_type) 14 { 15 struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow); 16 u16 tmp_tci; 17 18 memset(ext, 0, sizeof(struct nfp_flower_meta_tci)); 19 memset(msk, 0, sizeof(struct nfp_flower_meta_tci)); 20 21 /* Populate the metadata frame. */ 22 ext->nfp_flow_key_layer = key_type; 23 ext->mask_id = ~0; 24 25 msk->nfp_flow_key_layer = key_type; 26 msk->mask_id = ~0; 27 28 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { 29 struct flow_match_vlan match; 30 31 flow_rule_match_vlan(rule, &match); 32 /* Populate the tci field. */ 33 tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT; 34 tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, 35 match.key->vlan_priority) | 36 FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID, 37 match.key->vlan_id); 38 ext->tci = cpu_to_be16(tmp_tci); 39 40 tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT; 41 tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, 42 match.mask->vlan_priority) | 43 FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID, 44 match.mask->vlan_id); 45 msk->tci = cpu_to_be16(tmp_tci); 46 } 47 } 48 49 static void 50 nfp_flower_compile_ext_meta(struct nfp_flower_ext_meta *frame, u32 key_ext) 51 { 52 frame->nfp_flow_key_layer2 = cpu_to_be32(key_ext); 53 } 54 55 static int 56 nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port, 57 bool mask_version, enum nfp_flower_tun_type tun_type) 58 { 59 if (mask_version) { 60 frame->in_port = cpu_to_be32(~0); 61 return 0; 62 } 63 64 if (tun_type) { 65 frame->in_port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type); 66 } else { 67 if (!cmsg_port) 68 return -EOPNOTSUPP; 69 frame->in_port = cpu_to_be32(cmsg_port); 70 } 71 72 return 0; 73 } 74 75 static void 76 nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext, 77 struct nfp_flower_mac_mpls *msk, 78 struct tc_cls_flower_offload *flow) 79 { 80 struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow); 81 82 memset(ext, 0, sizeof(struct nfp_flower_mac_mpls)); 83 memset(msk, 0, sizeof(struct nfp_flower_mac_mpls)); 84 85 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 86 struct flow_match_eth_addrs match; 87 88 flow_rule_match_eth_addrs(rule, &match); 89 /* Populate mac frame. */ 90 ether_addr_copy(ext->mac_dst, &match.key->dst[0]); 91 ether_addr_copy(ext->mac_src, &match.key->src[0]); 92 ether_addr_copy(msk->mac_dst, &match.mask->dst[0]); 93 ether_addr_copy(msk->mac_src, &match.mask->src[0]); 94 } 95 96 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) { 97 struct flow_match_mpls match; 98 u32 t_mpls; 99 100 flow_rule_match_mpls(rule, &match); 101 t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB, match.key->mpls_label) | 102 FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC, match.key->mpls_tc) | 103 FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS, match.key->mpls_bos) | 104 NFP_FLOWER_MASK_MPLS_Q; 105 ext->mpls_lse = cpu_to_be32(t_mpls); 106 t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB, match.mask->mpls_label) | 107 FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC, match.mask->mpls_tc) | 108 FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS, match.mask->mpls_bos) | 109 NFP_FLOWER_MASK_MPLS_Q; 110 msk->mpls_lse = cpu_to_be32(t_mpls); 111 } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { 112 /* Check for mpls ether type and set NFP_FLOWER_MASK_MPLS_Q 113 * bit, which indicates an mpls ether type but without any 114 * mpls fields. 115 */ 116 struct flow_match_basic match; 117 118 flow_rule_match_basic(rule, &match); 119 if (match.key->n_proto == cpu_to_be16(ETH_P_MPLS_UC) || 120 match.key->n_proto == cpu_to_be16(ETH_P_MPLS_MC)) { 121 ext->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q); 122 msk->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q); 123 } 124 } 125 } 126 127 static void 128 nfp_flower_compile_tport(struct nfp_flower_tp_ports *ext, 129 struct nfp_flower_tp_ports *msk, 130 struct tc_cls_flower_offload *flow) 131 { 132 struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow); 133 134 memset(ext, 0, sizeof(struct nfp_flower_tp_ports)); 135 memset(msk, 0, sizeof(struct nfp_flower_tp_ports)); 136 137 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { 138 struct flow_match_ports match; 139 140 flow_rule_match_ports(rule, &match); 141 ext->port_src = match.key->src; 142 ext->port_dst = match.key->dst; 143 msk->port_src = match.mask->src; 144 msk->port_dst = match.mask->dst; 145 } 146 } 147 148 static void 149 nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *ext, 150 struct nfp_flower_ip_ext *msk, 151 struct tc_cls_flower_offload *flow) 152 { 153 struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow); 154 155 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { 156 struct flow_match_basic match; 157 158 flow_rule_match_basic(rule, &match); 159 ext->proto = match.key->ip_proto; 160 msk->proto = match.mask->ip_proto; 161 } 162 163 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) { 164 struct flow_match_ip match; 165 166 flow_rule_match_ip(rule, &match); 167 ext->tos = match.key->tos; 168 ext->ttl = match.key->ttl; 169 msk->tos = match.mask->tos; 170 msk->ttl = match.mask->ttl; 171 } 172 173 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) { 174 u16 tcp_flags, tcp_flags_mask; 175 struct flow_match_tcp match; 176 177 flow_rule_match_tcp(rule, &match); 178 tcp_flags = be16_to_cpu(match.key->flags); 179 tcp_flags_mask = be16_to_cpu(match.mask->flags); 180 181 if (tcp_flags & TCPHDR_FIN) 182 ext->flags |= NFP_FL_TCP_FLAG_FIN; 183 if (tcp_flags_mask & TCPHDR_FIN) 184 msk->flags |= NFP_FL_TCP_FLAG_FIN; 185 186 if (tcp_flags & TCPHDR_SYN) 187 ext->flags |= NFP_FL_TCP_FLAG_SYN; 188 if (tcp_flags_mask & TCPHDR_SYN) 189 msk->flags |= NFP_FL_TCP_FLAG_SYN; 190 191 if (tcp_flags & TCPHDR_RST) 192 ext->flags |= NFP_FL_TCP_FLAG_RST; 193 if (tcp_flags_mask & TCPHDR_RST) 194 msk->flags |= NFP_FL_TCP_FLAG_RST; 195 196 if (tcp_flags & TCPHDR_PSH) 197 ext->flags |= NFP_FL_TCP_FLAG_PSH; 198 if (tcp_flags_mask & TCPHDR_PSH) 199 msk->flags |= NFP_FL_TCP_FLAG_PSH; 200 201 if (tcp_flags & TCPHDR_URG) 202 ext->flags |= NFP_FL_TCP_FLAG_URG; 203 if (tcp_flags_mask & TCPHDR_URG) 204 msk->flags |= NFP_FL_TCP_FLAG_URG; 205 } 206 207 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { 208 struct flow_match_control match; 209 210 flow_rule_match_control(rule, &match); 211 if (match.key->flags & FLOW_DIS_IS_FRAGMENT) 212 ext->flags |= NFP_FL_IP_FRAGMENTED; 213 if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) 214 msk->flags |= NFP_FL_IP_FRAGMENTED; 215 if (match.key->flags & FLOW_DIS_FIRST_FRAG) 216 ext->flags |= NFP_FL_IP_FRAG_FIRST; 217 if (match.mask->flags & FLOW_DIS_FIRST_FRAG) 218 msk->flags |= NFP_FL_IP_FRAG_FIRST; 219 } 220 } 221 222 static void 223 nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *ext, 224 struct nfp_flower_ipv4 *msk, 225 struct tc_cls_flower_offload *flow) 226 { 227 struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow); 228 struct flow_match_ipv4_addrs match; 229 230 memset(ext, 0, sizeof(struct nfp_flower_ipv4)); 231 memset(msk, 0, sizeof(struct nfp_flower_ipv4)); 232 233 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { 234 flow_rule_match_ipv4_addrs(rule, &match); 235 ext->ipv4_src = match.key->src; 236 ext->ipv4_dst = match.key->dst; 237 msk->ipv4_src = match.mask->src; 238 msk->ipv4_dst = match.mask->dst; 239 } 240 241 nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, flow); 242 } 243 244 static void 245 nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *ext, 246 struct nfp_flower_ipv6 *msk, 247 struct tc_cls_flower_offload *flow) 248 { 249 struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow); 250 251 memset(ext, 0, sizeof(struct nfp_flower_ipv6)); 252 memset(msk, 0, sizeof(struct nfp_flower_ipv6)); 253 254 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { 255 struct flow_match_ipv6_addrs match; 256 257 flow_rule_match_ipv6_addrs(rule, &match); 258 ext->ipv6_src = match.key->src; 259 ext->ipv6_dst = match.key->dst; 260 msk->ipv6_src = match.mask->src; 261 msk->ipv6_dst = match.mask->dst; 262 } 263 264 nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, flow); 265 } 266 267 static int 268 nfp_flower_compile_geneve_opt(void *ext, void *msk, 269 struct tc_cls_flower_offload *flow) 270 { 271 struct flow_match_enc_opts match; 272 273 flow_rule_match_enc_opts(flow->rule, &match); 274 memcpy(ext, match.key->data, match.key->len); 275 memcpy(msk, match.mask->data, match.mask->len); 276 277 return 0; 278 } 279 280 static void 281 nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *ext, 282 struct nfp_flower_ipv4_udp_tun *msk, 283 struct tc_cls_flower_offload *flow) 284 { 285 struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow); 286 287 memset(ext, 0, sizeof(struct nfp_flower_ipv4_udp_tun)); 288 memset(msk, 0, sizeof(struct nfp_flower_ipv4_udp_tun)); 289 290 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) { 291 struct flow_match_enc_keyid match; 292 u32 temp_vni; 293 294 flow_rule_match_enc_keyid(rule, &match); 295 temp_vni = be32_to_cpu(match.key->keyid) << NFP_FL_TUN_VNI_OFFSET; 296 ext->tun_id = cpu_to_be32(temp_vni); 297 temp_vni = be32_to_cpu(match.mask->keyid) << NFP_FL_TUN_VNI_OFFSET; 298 msk->tun_id = cpu_to_be32(temp_vni); 299 } 300 301 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) { 302 struct flow_match_ipv4_addrs match; 303 304 flow_rule_match_enc_ipv4_addrs(rule, &match); 305 ext->ip_src = match.key->src; 306 ext->ip_dst = match.key->dst; 307 msk->ip_src = match.mask->src; 308 msk->ip_dst = match.mask->dst; 309 } 310 311 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) { 312 struct flow_match_ip match; 313 314 flow_rule_match_enc_ip(rule, &match); 315 ext->tos = match.key->tos; 316 ext->ttl = match.key->ttl; 317 msk->tos = match.mask->tos; 318 msk->ttl = match.mask->ttl; 319 } 320 } 321 322 int nfp_flower_compile_flow_match(struct nfp_app *app, 323 struct tc_cls_flower_offload *flow, 324 struct nfp_fl_key_ls *key_ls, 325 struct net_device *netdev, 326 struct nfp_fl_payload *nfp_flow, 327 enum nfp_flower_tun_type tun_type) 328 { 329 u32 port_id; 330 int err; 331 u8 *ext; 332 u8 *msk; 333 334 port_id = nfp_flower_get_port_id_from_netdev(app, netdev); 335 336 memset(nfp_flow->unmasked_data, 0, key_ls->key_size); 337 memset(nfp_flow->mask_data, 0, key_ls->key_size); 338 339 ext = nfp_flow->unmasked_data; 340 msk = nfp_flow->mask_data; 341 342 nfp_flower_compile_meta_tci((struct nfp_flower_meta_tci *)ext, 343 (struct nfp_flower_meta_tci *)msk, 344 flow, key_ls->key_layer); 345 ext += sizeof(struct nfp_flower_meta_tci); 346 msk += sizeof(struct nfp_flower_meta_tci); 347 348 /* Populate Extended Metadata if Required. */ 349 if (NFP_FLOWER_LAYER_EXT_META & key_ls->key_layer) { 350 nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)ext, 351 key_ls->key_layer_two); 352 nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)msk, 353 key_ls->key_layer_two); 354 ext += sizeof(struct nfp_flower_ext_meta); 355 msk += sizeof(struct nfp_flower_ext_meta); 356 } 357 358 /* Populate Exact Port data. */ 359 err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext, 360 port_id, false, tun_type); 361 if (err) 362 return err; 363 364 /* Populate Mask Port Data. */ 365 err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk, 366 port_id, true, tun_type); 367 if (err) 368 return err; 369 370 ext += sizeof(struct nfp_flower_in_port); 371 msk += sizeof(struct nfp_flower_in_port); 372 373 if (NFP_FLOWER_LAYER_MAC & key_ls->key_layer) { 374 nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)ext, 375 (struct nfp_flower_mac_mpls *)msk, 376 flow); 377 ext += sizeof(struct nfp_flower_mac_mpls); 378 msk += sizeof(struct nfp_flower_mac_mpls); 379 } 380 381 if (NFP_FLOWER_LAYER_TP & key_ls->key_layer) { 382 nfp_flower_compile_tport((struct nfp_flower_tp_ports *)ext, 383 (struct nfp_flower_tp_ports *)msk, 384 flow); 385 ext += sizeof(struct nfp_flower_tp_ports); 386 msk += sizeof(struct nfp_flower_tp_ports); 387 } 388 389 if (NFP_FLOWER_LAYER_IPV4 & key_ls->key_layer) { 390 nfp_flower_compile_ipv4((struct nfp_flower_ipv4 *)ext, 391 (struct nfp_flower_ipv4 *)msk, 392 flow); 393 ext += sizeof(struct nfp_flower_ipv4); 394 msk += sizeof(struct nfp_flower_ipv4); 395 } 396 397 if (NFP_FLOWER_LAYER_IPV6 & key_ls->key_layer) { 398 nfp_flower_compile_ipv6((struct nfp_flower_ipv6 *)ext, 399 (struct nfp_flower_ipv6 *)msk, 400 flow); 401 ext += sizeof(struct nfp_flower_ipv6); 402 msk += sizeof(struct nfp_flower_ipv6); 403 } 404 405 if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN || 406 key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE) { 407 __be32 tun_dst; 408 409 nfp_flower_compile_ipv4_udp_tun((void *)ext, (void *)msk, flow); 410 tun_dst = ((struct nfp_flower_ipv4_udp_tun *)ext)->ip_dst; 411 ext += sizeof(struct nfp_flower_ipv4_udp_tun); 412 msk += sizeof(struct nfp_flower_ipv4_udp_tun); 413 414 /* Store the tunnel destination in the rule data. 415 * This must be present and be an exact match. 416 */ 417 nfp_flow->nfp_tun_ipv4_addr = tun_dst; 418 nfp_tunnel_add_ipv4_off(app, tun_dst); 419 420 if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) { 421 err = nfp_flower_compile_geneve_opt(ext, msk, flow); 422 if (err) 423 return err; 424 } 425 } 426 427 return 0; 428 } 429