1 /* 2 * Copyright (C) 2017 Netronome Systems, Inc. 3 * 4 * This software is dual licensed under the GNU General License Version 2, 5 * June 1991 as shown in the file COPYING in the top-level directory of this 6 * source tree or the BSD 2-Clause License provided below. You have the 7 * option to license this software under the complete terms of either license. 8 * 9 * The BSD 2-Clause License: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * 1. Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * 2. Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #include <linux/skbuff.h> 35 #include <net/devlink.h> 36 #include <net/pkt_cls.h> 37 38 #include "cmsg.h" 39 #include "main.h" 40 #include "../nfpcore/nfp_cpp.h" 41 #include "../nfpcore/nfp_nsp.h" 42 #include "../nfp_app.h" 43 #include "../nfp_main.h" 44 #include "../nfp_net.h" 45 #include "../nfp_port.h" 46 47 #define NFP_FLOWER_WHITELIST_DISSECTOR \ 48 (BIT(FLOW_DISSECTOR_KEY_CONTROL) | \ 49 BIT(FLOW_DISSECTOR_KEY_BASIC) | \ 50 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | \ 51 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | \ 52 BIT(FLOW_DISSECTOR_KEY_PORTS) | \ 53 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | \ 54 BIT(FLOW_DISSECTOR_KEY_VLAN) | \ 55 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \ 56 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \ 57 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \ 58 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \ 59 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \ 60 BIT(FLOW_DISSECTOR_KEY_IP)) 61 62 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR \ 63 (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \ 64 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \ 65 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \ 66 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \ 67 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS)) 68 69 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R \ 70 (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \ 71 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \ 72 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS)) 73 74 static int 75 nfp_flower_xmit_flow(struct net_device *netdev, 76 struct nfp_fl_payload *nfp_flow, u8 mtype) 77 { 78 u32 meta_len, key_len, mask_len, act_len, tot_len; 79 struct nfp_repr *priv = netdev_priv(netdev); 80 struct sk_buff *skb; 81 unsigned char *msg; 82 83 meta_len = sizeof(struct nfp_fl_rule_metadata); 84 key_len = nfp_flow->meta.key_len; 85 mask_len = nfp_flow->meta.mask_len; 86 act_len = nfp_flow->meta.act_len; 87 88 tot_len = meta_len + key_len + mask_len + act_len; 89 90 /* Convert to long words as firmware expects 91 * lengths in units of NFP_FL_LW_SIZ. 92 */ 93 nfp_flow->meta.key_len >>= NFP_FL_LW_SIZ; 94 nfp_flow->meta.mask_len >>= NFP_FL_LW_SIZ; 95 nfp_flow->meta.act_len >>= NFP_FL_LW_SIZ; 96 97 skb = nfp_flower_cmsg_alloc(priv->app, tot_len, mtype); 98 if (!skb) 99 return -ENOMEM; 100 101 msg = nfp_flower_cmsg_get_data(skb); 102 memcpy(msg, &nfp_flow->meta, meta_len); 103 memcpy(&msg[meta_len], nfp_flow->unmasked_data, key_len); 104 memcpy(&msg[meta_len + key_len], nfp_flow->mask_data, mask_len); 105 memcpy(&msg[meta_len + key_len + mask_len], 106 nfp_flow->action_data, act_len); 107 108 /* Convert back to bytes as software expects 109 * lengths in units of bytes. 110 */ 111 nfp_flow->meta.key_len <<= NFP_FL_LW_SIZ; 112 nfp_flow->meta.mask_len <<= NFP_FL_LW_SIZ; 113 nfp_flow->meta.act_len <<= NFP_FL_LW_SIZ; 114 115 nfp_ctrl_tx(priv->app->ctrl, skb); 116 117 return 0; 118 } 119 120 static bool nfp_flower_check_higher_than_mac(struct tc_cls_flower_offload *f) 121 { 122 return dissector_uses_key(f->dissector, 123 FLOW_DISSECTOR_KEY_IPV4_ADDRS) || 124 dissector_uses_key(f->dissector, 125 FLOW_DISSECTOR_KEY_IPV6_ADDRS) || 126 dissector_uses_key(f->dissector, 127 FLOW_DISSECTOR_KEY_PORTS) || 128 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ICMP); 129 } 130 131 static int 132 nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls, 133 struct tc_cls_flower_offload *flow) 134 { 135 struct flow_dissector_key_basic *mask_basic = NULL; 136 struct flow_dissector_key_basic *key_basic = NULL; 137 struct flow_dissector_key_ip *mask_ip = NULL; 138 u32 key_layer_two; 139 u8 key_layer; 140 int key_size; 141 142 if (flow->dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR) 143 return -EOPNOTSUPP; 144 145 /* If any tun dissector is used then the required set must be used. */ 146 if (flow->dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR && 147 (flow->dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R) 148 != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R) 149 return -EOPNOTSUPP; 150 151 key_layer_two = 0; 152 key_layer = NFP_FLOWER_LAYER_PORT | NFP_FLOWER_LAYER_MAC; 153 key_size = sizeof(struct nfp_flower_meta_one) + 154 sizeof(struct nfp_flower_in_port) + 155 sizeof(struct nfp_flower_mac_mpls); 156 157 if (dissector_uses_key(flow->dissector, 158 FLOW_DISSECTOR_KEY_ENC_CONTROL)) { 159 struct flow_dissector_key_ipv4_addrs *mask_ipv4 = NULL; 160 struct flow_dissector_key_ports *mask_enc_ports = NULL; 161 struct flow_dissector_key_ports *enc_ports = NULL; 162 struct flow_dissector_key_control *mask_enc_ctl = 163 skb_flow_dissector_target(flow->dissector, 164 FLOW_DISSECTOR_KEY_ENC_CONTROL, 165 flow->mask); 166 struct flow_dissector_key_control *enc_ctl = 167 skb_flow_dissector_target(flow->dissector, 168 FLOW_DISSECTOR_KEY_ENC_CONTROL, 169 flow->key); 170 if (mask_enc_ctl->addr_type != 0xffff || 171 enc_ctl->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS) 172 return -EOPNOTSUPP; 173 174 /* These fields are already verified as used. */ 175 mask_ipv4 = 176 skb_flow_dissector_target(flow->dissector, 177 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, 178 flow->mask); 179 if (mask_ipv4->dst != cpu_to_be32(~0)) 180 return -EOPNOTSUPP; 181 182 mask_enc_ports = 183 skb_flow_dissector_target(flow->dissector, 184 FLOW_DISSECTOR_KEY_ENC_PORTS, 185 flow->mask); 186 enc_ports = 187 skb_flow_dissector_target(flow->dissector, 188 FLOW_DISSECTOR_KEY_ENC_PORTS, 189 flow->key); 190 191 if (mask_enc_ports->dst != cpu_to_be16(~0) || 192 enc_ports->dst != htons(NFP_FL_VXLAN_PORT)) 193 return -EOPNOTSUPP; 194 195 key_layer |= NFP_FLOWER_LAYER_VXLAN; 196 key_size += sizeof(struct nfp_flower_vxlan); 197 } 198 199 if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) { 200 mask_basic = skb_flow_dissector_target(flow->dissector, 201 FLOW_DISSECTOR_KEY_BASIC, 202 flow->mask); 203 204 key_basic = skb_flow_dissector_target(flow->dissector, 205 FLOW_DISSECTOR_KEY_BASIC, 206 flow->key); 207 } 208 209 if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_IP)) 210 mask_ip = skb_flow_dissector_target(flow->dissector, 211 FLOW_DISSECTOR_KEY_IP, 212 flow->mask); 213 214 if (mask_basic && mask_basic->n_proto) { 215 /* Ethernet type is present in the key. */ 216 switch (key_basic->n_proto) { 217 case cpu_to_be16(ETH_P_IP): 218 if (mask_ip && mask_ip->tos) 219 return -EOPNOTSUPP; 220 if (mask_ip && mask_ip->ttl) 221 return -EOPNOTSUPP; 222 key_layer |= NFP_FLOWER_LAYER_IPV4; 223 key_size += sizeof(struct nfp_flower_ipv4); 224 break; 225 226 case cpu_to_be16(ETH_P_IPV6): 227 if (mask_ip && mask_ip->tos) 228 return -EOPNOTSUPP; 229 if (mask_ip && mask_ip->ttl) 230 return -EOPNOTSUPP; 231 key_layer |= NFP_FLOWER_LAYER_IPV6; 232 key_size += sizeof(struct nfp_flower_ipv6); 233 break; 234 235 /* Currently we do not offload ARP 236 * because we rely on it to get to the host. 237 */ 238 case cpu_to_be16(ETH_P_ARP): 239 return -EOPNOTSUPP; 240 241 /* Currently we do not offload MPLS. */ 242 case cpu_to_be16(ETH_P_MPLS_UC): 243 case cpu_to_be16(ETH_P_MPLS_MC): 244 return -EOPNOTSUPP; 245 246 /* Will be included in layer 2. */ 247 case cpu_to_be16(ETH_P_8021Q): 248 break; 249 250 default: 251 /* Other ethtype - we need check the masks for the 252 * remainder of the key to ensure we can offload. 253 */ 254 if (nfp_flower_check_higher_than_mac(flow)) 255 return -EOPNOTSUPP; 256 break; 257 } 258 } 259 260 if (mask_basic && mask_basic->ip_proto) { 261 /* Ethernet type is present in the key. */ 262 switch (key_basic->ip_proto) { 263 case IPPROTO_TCP: 264 case IPPROTO_UDP: 265 case IPPROTO_SCTP: 266 case IPPROTO_ICMP: 267 case IPPROTO_ICMPV6: 268 key_layer |= NFP_FLOWER_LAYER_TP; 269 key_size += sizeof(struct nfp_flower_tp_ports); 270 break; 271 default: 272 /* Other ip proto - we need check the masks for the 273 * remainder of the key to ensure we can offload. 274 */ 275 return -EOPNOTSUPP; 276 } 277 } 278 279 ret_key_ls->key_layer = key_layer; 280 ret_key_ls->key_layer_two = key_layer_two; 281 ret_key_ls->key_size = key_size; 282 283 return 0; 284 } 285 286 static struct nfp_fl_payload * 287 nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer) 288 { 289 struct nfp_fl_payload *flow_pay; 290 291 flow_pay = kmalloc(sizeof(*flow_pay), GFP_KERNEL); 292 if (!flow_pay) 293 return NULL; 294 295 flow_pay->meta.key_len = key_layer->key_size; 296 flow_pay->unmasked_data = kmalloc(key_layer->key_size, GFP_KERNEL); 297 if (!flow_pay->unmasked_data) 298 goto err_free_flow; 299 300 flow_pay->meta.mask_len = key_layer->key_size; 301 flow_pay->mask_data = kmalloc(key_layer->key_size, GFP_KERNEL); 302 if (!flow_pay->mask_data) 303 goto err_free_unmasked; 304 305 flow_pay->action_data = kmalloc(NFP_FL_MAX_A_SIZ, GFP_KERNEL); 306 if (!flow_pay->action_data) 307 goto err_free_mask; 308 309 flow_pay->nfp_tun_ipv4_addr = 0; 310 flow_pay->meta.flags = 0; 311 spin_lock_init(&flow_pay->lock); 312 313 return flow_pay; 314 315 err_free_mask: 316 kfree(flow_pay->mask_data); 317 err_free_unmasked: 318 kfree(flow_pay->unmasked_data); 319 err_free_flow: 320 kfree(flow_pay); 321 return NULL; 322 } 323 324 /** 325 * nfp_flower_add_offload() - Adds a new flow to hardware. 326 * @app: Pointer to the APP handle 327 * @netdev: netdev structure. 328 * @flow: TC flower classifier offload structure. 329 * 330 * Adds a new flow to the repeated hash structure and action payload. 331 * 332 * Return: negative value on error, 0 if configured successfully. 333 */ 334 static int 335 nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, 336 struct tc_cls_flower_offload *flow) 337 { 338 struct nfp_flower_priv *priv = app->priv; 339 struct nfp_fl_payload *flow_pay; 340 struct nfp_fl_key_ls *key_layer; 341 int err; 342 343 key_layer = kmalloc(sizeof(*key_layer), GFP_KERNEL); 344 if (!key_layer) 345 return -ENOMEM; 346 347 err = nfp_flower_calculate_key_layers(key_layer, flow); 348 if (err) 349 goto err_free_key_ls; 350 351 flow_pay = nfp_flower_allocate_new(key_layer); 352 if (!flow_pay) { 353 err = -ENOMEM; 354 goto err_free_key_ls; 355 } 356 357 err = nfp_flower_compile_flow_match(flow, key_layer, netdev, flow_pay); 358 if (err) 359 goto err_destroy_flow; 360 361 err = nfp_flower_compile_action(flow, netdev, flow_pay); 362 if (err) 363 goto err_destroy_flow; 364 365 err = nfp_compile_flow_metadata(app, flow, flow_pay); 366 if (err) 367 goto err_destroy_flow; 368 369 err = nfp_flower_xmit_flow(netdev, flow_pay, 370 NFP_FLOWER_CMSG_TYPE_FLOW_ADD); 371 if (err) 372 goto err_destroy_flow; 373 374 INIT_HLIST_NODE(&flow_pay->link); 375 flow_pay->tc_flower_cookie = flow->cookie; 376 hash_add_rcu(priv->flow_table, &flow_pay->link, flow->cookie); 377 378 /* Deallocate flow payload when flower rule has been destroyed. */ 379 kfree(key_layer); 380 381 return 0; 382 383 err_destroy_flow: 384 kfree(flow_pay->action_data); 385 kfree(flow_pay->mask_data); 386 kfree(flow_pay->unmasked_data); 387 kfree(flow_pay); 388 err_free_key_ls: 389 kfree(key_layer); 390 return err; 391 } 392 393 /** 394 * nfp_flower_del_offload() - Removes a flow from hardware. 395 * @app: Pointer to the APP handle 396 * @netdev: netdev structure. 397 * @flow: TC flower classifier offload structure 398 * 399 * Removes a flow from the repeated hash structure and clears the 400 * action payload. 401 * 402 * Return: negative value on error, 0 if removed successfully. 403 */ 404 static int 405 nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev, 406 struct tc_cls_flower_offload *flow) 407 { 408 struct nfp_fl_payload *nfp_flow; 409 int err; 410 411 nfp_flow = nfp_flower_search_fl_table(app, flow->cookie); 412 if (!nfp_flow) 413 return -ENOENT; 414 415 err = nfp_modify_flow_metadata(app, nfp_flow); 416 if (err) 417 goto err_free_flow; 418 419 if (nfp_flow->nfp_tun_ipv4_addr) 420 nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr); 421 422 err = nfp_flower_xmit_flow(netdev, nfp_flow, 423 NFP_FLOWER_CMSG_TYPE_FLOW_DEL); 424 if (err) 425 goto err_free_flow; 426 427 err_free_flow: 428 hash_del_rcu(&nfp_flow->link); 429 kfree(nfp_flow->action_data); 430 kfree(nfp_flow->mask_data); 431 kfree(nfp_flow->unmasked_data); 432 kfree_rcu(nfp_flow, rcu); 433 return err; 434 } 435 436 /** 437 * nfp_flower_get_stats() - Populates flow stats obtained from hardware. 438 * @app: Pointer to the APP handle 439 * @flow: TC flower classifier offload structure 440 * 441 * Populates a flow statistics structure which which corresponds to a 442 * specific flow. 443 * 444 * Return: negative value on error, 0 if stats populated successfully. 445 */ 446 static int 447 nfp_flower_get_stats(struct nfp_app *app, struct tc_cls_flower_offload *flow) 448 { 449 struct nfp_fl_payload *nfp_flow; 450 451 nfp_flow = nfp_flower_search_fl_table(app, flow->cookie); 452 if (!nfp_flow) 453 return -EINVAL; 454 455 spin_lock_bh(&nfp_flow->lock); 456 tcf_exts_stats_update(flow->exts, nfp_flow->stats.bytes, 457 nfp_flow->stats.pkts, nfp_flow->stats.used); 458 459 nfp_flow->stats.pkts = 0; 460 nfp_flow->stats.bytes = 0; 461 spin_unlock_bh(&nfp_flow->lock); 462 463 return 0; 464 } 465 466 static int 467 nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev, 468 struct tc_cls_flower_offload *flower) 469 { 470 switch (flower->command) { 471 case TC_CLSFLOWER_REPLACE: 472 return nfp_flower_add_offload(app, netdev, flower); 473 case TC_CLSFLOWER_DESTROY: 474 return nfp_flower_del_offload(app, netdev, flower); 475 case TC_CLSFLOWER_STATS: 476 return nfp_flower_get_stats(app, flower); 477 } 478 479 return -EOPNOTSUPP; 480 } 481 482 int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev, 483 enum tc_setup_type type, void *type_data) 484 { 485 struct tc_cls_flower_offload *cls_flower = type_data; 486 487 if (type != TC_SETUP_CLSFLOWER || 488 !is_classid_clsact_ingress(cls_flower->common.classid) || 489 !eth_proto_is_802_3(cls_flower->common.protocol) || 490 cls_flower->common.chain_index) 491 return -EOPNOTSUPP; 492 493 return nfp_flower_repr_offload(app, netdev, cls_flower); 494 } 495