1 /* 2 * Copyright (C) 2017 Netronome Systems, Inc. 3 * 4 * This software is dual licensed under the GNU General License Version 2, 5 * June 1991 as shown in the file COPYING in the top-level directory of this 6 * source tree or the BSD 2-Clause License provided below. You have the 7 * option to license this software under the complete terms of either license. 8 * 9 * The BSD 2-Clause License: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * 1. Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * 2. Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #include <linux/skbuff.h> 35 #include <net/devlink.h> 36 #include <net/pkt_cls.h> 37 38 #include "cmsg.h" 39 #include "main.h" 40 #include "../nfpcore/nfp_cpp.h" 41 #include "../nfpcore/nfp_nsp.h" 42 #include "../nfp_app.h" 43 #include "../nfp_main.h" 44 #include "../nfp_net.h" 45 #include "../nfp_port.h" 46 47 #define NFP_FLOWER_WHITELIST_DISSECTOR \ 48 (BIT(FLOW_DISSECTOR_KEY_CONTROL) | \ 49 BIT(FLOW_DISSECTOR_KEY_BASIC) | \ 50 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | \ 51 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | \ 52 BIT(FLOW_DISSECTOR_KEY_PORTS) | \ 53 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | \ 54 BIT(FLOW_DISSECTOR_KEY_VLAN) | \ 55 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \ 56 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \ 57 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \ 58 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \ 59 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \ 60 BIT(FLOW_DISSECTOR_KEY_MPLS) | \ 61 BIT(FLOW_DISSECTOR_KEY_IP)) 62 63 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR \ 64 (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \ 65 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \ 66 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \ 67 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \ 68 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS)) 69 70 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R \ 71 (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \ 72 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \ 73 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS)) 74 75 static int 76 nfp_flower_xmit_flow(struct net_device *netdev, 77 struct nfp_fl_payload *nfp_flow, u8 mtype) 78 { 79 u32 meta_len, key_len, mask_len, act_len, tot_len; 80 struct nfp_repr *priv = netdev_priv(netdev); 81 struct sk_buff *skb; 82 unsigned char *msg; 83 84 meta_len = sizeof(struct nfp_fl_rule_metadata); 85 key_len = nfp_flow->meta.key_len; 86 mask_len = nfp_flow->meta.mask_len; 87 act_len = nfp_flow->meta.act_len; 88 89 tot_len = meta_len + key_len + mask_len + act_len; 90 91 /* Convert to long words as firmware expects 92 * lengths in units of NFP_FL_LW_SIZ. 93 */ 94 nfp_flow->meta.key_len >>= NFP_FL_LW_SIZ; 95 nfp_flow->meta.mask_len >>= NFP_FL_LW_SIZ; 96 nfp_flow->meta.act_len >>= NFP_FL_LW_SIZ; 97 98 skb = nfp_flower_cmsg_alloc(priv->app, tot_len, mtype, GFP_KERNEL); 99 if (!skb) 100 return -ENOMEM; 101 102 msg = nfp_flower_cmsg_get_data(skb); 103 memcpy(msg, &nfp_flow->meta, meta_len); 104 memcpy(&msg[meta_len], nfp_flow->unmasked_data, key_len); 105 memcpy(&msg[meta_len + key_len], nfp_flow->mask_data, mask_len); 106 memcpy(&msg[meta_len + key_len + mask_len], 107 nfp_flow->action_data, act_len); 108 109 /* Convert back to bytes as software expects 110 * lengths in units of bytes. 111 */ 112 nfp_flow->meta.key_len <<= NFP_FL_LW_SIZ; 113 nfp_flow->meta.mask_len <<= NFP_FL_LW_SIZ; 114 nfp_flow->meta.act_len <<= NFP_FL_LW_SIZ; 115 116 nfp_ctrl_tx(priv->app->ctrl, skb); 117 118 return 0; 119 } 120 121 static bool nfp_flower_check_higher_than_mac(struct tc_cls_flower_offload *f) 122 { 123 return dissector_uses_key(f->dissector, 124 FLOW_DISSECTOR_KEY_IPV4_ADDRS) || 125 dissector_uses_key(f->dissector, 126 FLOW_DISSECTOR_KEY_IPV6_ADDRS) || 127 dissector_uses_key(f->dissector, 128 FLOW_DISSECTOR_KEY_PORTS) || 129 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ICMP); 130 } 131 132 static int 133 nfp_flower_calculate_key_layers(struct nfp_app *app, 134 struct nfp_fl_key_ls *ret_key_ls, 135 struct tc_cls_flower_offload *flow, 136 bool egress, 137 enum nfp_flower_tun_type *tun_type) 138 { 139 struct flow_dissector_key_basic *mask_basic = NULL; 140 struct flow_dissector_key_basic *key_basic = NULL; 141 struct nfp_flower_priv *priv = app->priv; 142 u32 key_layer_two; 143 u8 key_layer; 144 int key_size; 145 146 if (flow->dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR) 147 return -EOPNOTSUPP; 148 149 /* If any tun dissector is used then the required set must be used. */ 150 if (flow->dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR && 151 (flow->dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R) 152 != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R) 153 return -EOPNOTSUPP; 154 155 key_layer_two = 0; 156 key_layer = NFP_FLOWER_LAYER_PORT; 157 key_size = sizeof(struct nfp_flower_meta_tci) + 158 sizeof(struct nfp_flower_in_port); 159 160 if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS) || 161 dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_MPLS)) { 162 key_layer |= NFP_FLOWER_LAYER_MAC; 163 key_size += sizeof(struct nfp_flower_mac_mpls); 164 } 165 166 if (dissector_uses_key(flow->dissector, 167 FLOW_DISSECTOR_KEY_ENC_CONTROL)) { 168 struct flow_dissector_key_ipv4_addrs *mask_ipv4 = NULL; 169 struct flow_dissector_key_ports *mask_enc_ports = NULL; 170 struct flow_dissector_key_ports *enc_ports = NULL; 171 struct flow_dissector_key_control *mask_enc_ctl = 172 skb_flow_dissector_target(flow->dissector, 173 FLOW_DISSECTOR_KEY_ENC_CONTROL, 174 flow->mask); 175 struct flow_dissector_key_control *enc_ctl = 176 skb_flow_dissector_target(flow->dissector, 177 FLOW_DISSECTOR_KEY_ENC_CONTROL, 178 flow->key); 179 if (!egress) 180 return -EOPNOTSUPP; 181 182 if (mask_enc_ctl->addr_type != 0xffff || 183 enc_ctl->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS) 184 return -EOPNOTSUPP; 185 186 /* These fields are already verified as used. */ 187 mask_ipv4 = 188 skb_flow_dissector_target(flow->dissector, 189 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, 190 flow->mask); 191 if (mask_ipv4->dst != cpu_to_be32(~0)) 192 return -EOPNOTSUPP; 193 194 mask_enc_ports = 195 skb_flow_dissector_target(flow->dissector, 196 FLOW_DISSECTOR_KEY_ENC_PORTS, 197 flow->mask); 198 enc_ports = 199 skb_flow_dissector_target(flow->dissector, 200 FLOW_DISSECTOR_KEY_ENC_PORTS, 201 flow->key); 202 203 if (mask_enc_ports->dst != cpu_to_be16(~0)) 204 return -EOPNOTSUPP; 205 206 switch (enc_ports->dst) { 207 case htons(NFP_FL_VXLAN_PORT): 208 *tun_type = NFP_FL_TUNNEL_VXLAN; 209 key_layer |= NFP_FLOWER_LAYER_VXLAN; 210 key_size += sizeof(struct nfp_flower_ipv4_udp_tun); 211 break; 212 case htons(NFP_FL_GENEVE_PORT): 213 if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)) 214 return -EOPNOTSUPP; 215 *tun_type = NFP_FL_TUNNEL_GENEVE; 216 key_layer |= NFP_FLOWER_LAYER_EXT_META; 217 key_size += sizeof(struct nfp_flower_ext_meta); 218 key_layer_two |= NFP_FLOWER_LAYER2_GENEVE; 219 key_size += sizeof(struct nfp_flower_ipv4_udp_tun); 220 break; 221 default: 222 return -EOPNOTSUPP; 223 } 224 } else if (egress) { 225 /* Reject non tunnel matches offloaded to egress repr. */ 226 return -EOPNOTSUPP; 227 } 228 229 if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) { 230 mask_basic = skb_flow_dissector_target(flow->dissector, 231 FLOW_DISSECTOR_KEY_BASIC, 232 flow->mask); 233 234 key_basic = skb_flow_dissector_target(flow->dissector, 235 FLOW_DISSECTOR_KEY_BASIC, 236 flow->key); 237 } 238 239 if (mask_basic && mask_basic->n_proto) { 240 /* Ethernet type is present in the key. */ 241 switch (key_basic->n_proto) { 242 case cpu_to_be16(ETH_P_IP): 243 key_layer |= NFP_FLOWER_LAYER_IPV4; 244 key_size += sizeof(struct nfp_flower_ipv4); 245 break; 246 247 case cpu_to_be16(ETH_P_IPV6): 248 key_layer |= NFP_FLOWER_LAYER_IPV6; 249 key_size += sizeof(struct nfp_flower_ipv6); 250 break; 251 252 /* Currently we do not offload ARP 253 * because we rely on it to get to the host. 254 */ 255 case cpu_to_be16(ETH_P_ARP): 256 return -EOPNOTSUPP; 257 258 /* Will be included in layer 2. */ 259 case cpu_to_be16(ETH_P_8021Q): 260 break; 261 262 default: 263 /* Other ethtype - we need check the masks for the 264 * remainder of the key to ensure we can offload. 265 */ 266 if (nfp_flower_check_higher_than_mac(flow)) 267 return -EOPNOTSUPP; 268 break; 269 } 270 } 271 272 if (mask_basic && mask_basic->ip_proto) { 273 /* Ethernet type is present in the key. */ 274 switch (key_basic->ip_proto) { 275 case IPPROTO_TCP: 276 case IPPROTO_UDP: 277 case IPPROTO_SCTP: 278 case IPPROTO_ICMP: 279 case IPPROTO_ICMPV6: 280 key_layer |= NFP_FLOWER_LAYER_TP; 281 key_size += sizeof(struct nfp_flower_tp_ports); 282 break; 283 default: 284 /* Other ip proto - we need check the masks for the 285 * remainder of the key to ensure we can offload. 286 */ 287 return -EOPNOTSUPP; 288 } 289 } 290 291 ret_key_ls->key_layer = key_layer; 292 ret_key_ls->key_layer_two = key_layer_two; 293 ret_key_ls->key_size = key_size; 294 295 return 0; 296 } 297 298 static struct nfp_fl_payload * 299 nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer) 300 { 301 struct nfp_fl_payload *flow_pay; 302 303 flow_pay = kmalloc(sizeof(*flow_pay), GFP_KERNEL); 304 if (!flow_pay) 305 return NULL; 306 307 flow_pay->meta.key_len = key_layer->key_size; 308 flow_pay->unmasked_data = kmalloc(key_layer->key_size, GFP_KERNEL); 309 if (!flow_pay->unmasked_data) 310 goto err_free_flow; 311 312 flow_pay->meta.mask_len = key_layer->key_size; 313 flow_pay->mask_data = kmalloc(key_layer->key_size, GFP_KERNEL); 314 if (!flow_pay->mask_data) 315 goto err_free_unmasked; 316 317 flow_pay->action_data = kmalloc(NFP_FL_MAX_A_SIZ, GFP_KERNEL); 318 if (!flow_pay->action_data) 319 goto err_free_mask; 320 321 flow_pay->nfp_tun_ipv4_addr = 0; 322 flow_pay->meta.flags = 0; 323 spin_lock_init(&flow_pay->lock); 324 325 return flow_pay; 326 327 err_free_mask: 328 kfree(flow_pay->mask_data); 329 err_free_unmasked: 330 kfree(flow_pay->unmasked_data); 331 err_free_flow: 332 kfree(flow_pay); 333 return NULL; 334 } 335 336 /** 337 * nfp_flower_add_offload() - Adds a new flow to hardware. 338 * @app: Pointer to the APP handle 339 * @netdev: netdev structure. 340 * @flow: TC flower classifier offload structure. 341 * @egress: NFP netdev is the egress. 342 * 343 * Adds a new flow to the repeated hash structure and action payload. 344 * 345 * Return: negative value on error, 0 if configured successfully. 346 */ 347 static int 348 nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, 349 struct tc_cls_flower_offload *flow, bool egress) 350 { 351 enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE; 352 struct nfp_port *port = nfp_port_from_netdev(netdev); 353 struct nfp_flower_priv *priv = app->priv; 354 struct nfp_fl_payload *flow_pay; 355 struct nfp_fl_key_ls *key_layer; 356 int err; 357 358 key_layer = kmalloc(sizeof(*key_layer), GFP_KERNEL); 359 if (!key_layer) 360 return -ENOMEM; 361 362 err = nfp_flower_calculate_key_layers(app, key_layer, flow, egress, 363 &tun_type); 364 if (err) 365 goto err_free_key_ls; 366 367 flow_pay = nfp_flower_allocate_new(key_layer); 368 if (!flow_pay) { 369 err = -ENOMEM; 370 goto err_free_key_ls; 371 } 372 373 err = nfp_flower_compile_flow_match(flow, key_layer, netdev, flow_pay, 374 tun_type); 375 if (err) 376 goto err_destroy_flow; 377 378 err = nfp_flower_compile_action(flow, netdev, flow_pay); 379 if (err) 380 goto err_destroy_flow; 381 382 err = nfp_compile_flow_metadata(app, flow, flow_pay); 383 if (err) 384 goto err_destroy_flow; 385 386 err = nfp_flower_xmit_flow(netdev, flow_pay, 387 NFP_FLOWER_CMSG_TYPE_FLOW_ADD); 388 if (err) 389 goto err_destroy_flow; 390 391 INIT_HLIST_NODE(&flow_pay->link); 392 flow_pay->tc_flower_cookie = flow->cookie; 393 hash_add_rcu(priv->flow_table, &flow_pay->link, flow->cookie); 394 port->tc_offload_cnt++; 395 396 /* Deallocate flow payload when flower rule has been destroyed. */ 397 kfree(key_layer); 398 399 return 0; 400 401 err_destroy_flow: 402 kfree(flow_pay->action_data); 403 kfree(flow_pay->mask_data); 404 kfree(flow_pay->unmasked_data); 405 kfree(flow_pay); 406 err_free_key_ls: 407 kfree(key_layer); 408 return err; 409 } 410 411 /** 412 * nfp_flower_del_offload() - Removes a flow from hardware. 413 * @app: Pointer to the APP handle 414 * @netdev: netdev structure. 415 * @flow: TC flower classifier offload structure 416 * 417 * Removes a flow from the repeated hash structure and clears the 418 * action payload. 419 * 420 * Return: negative value on error, 0 if removed successfully. 421 */ 422 static int 423 nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev, 424 struct tc_cls_flower_offload *flow) 425 { 426 struct nfp_port *port = nfp_port_from_netdev(netdev); 427 struct nfp_fl_payload *nfp_flow; 428 int err; 429 430 nfp_flow = nfp_flower_search_fl_table(app, flow->cookie); 431 if (!nfp_flow) 432 return -ENOENT; 433 434 err = nfp_modify_flow_metadata(app, nfp_flow); 435 if (err) 436 goto err_free_flow; 437 438 if (nfp_flow->nfp_tun_ipv4_addr) 439 nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr); 440 441 err = nfp_flower_xmit_flow(netdev, nfp_flow, 442 NFP_FLOWER_CMSG_TYPE_FLOW_DEL); 443 if (err) 444 goto err_free_flow; 445 446 err_free_flow: 447 hash_del_rcu(&nfp_flow->link); 448 port->tc_offload_cnt--; 449 kfree(nfp_flow->action_data); 450 kfree(nfp_flow->mask_data); 451 kfree(nfp_flow->unmasked_data); 452 kfree_rcu(nfp_flow, rcu); 453 return err; 454 } 455 456 /** 457 * nfp_flower_get_stats() - Populates flow stats obtained from hardware. 458 * @app: Pointer to the APP handle 459 * @flow: TC flower classifier offload structure 460 * 461 * Populates a flow statistics structure which which corresponds to a 462 * specific flow. 463 * 464 * Return: negative value on error, 0 if stats populated successfully. 465 */ 466 static int 467 nfp_flower_get_stats(struct nfp_app *app, struct tc_cls_flower_offload *flow) 468 { 469 struct nfp_fl_payload *nfp_flow; 470 471 nfp_flow = nfp_flower_search_fl_table(app, flow->cookie); 472 if (!nfp_flow) 473 return -EINVAL; 474 475 spin_lock_bh(&nfp_flow->lock); 476 tcf_exts_stats_update(flow->exts, nfp_flow->stats.bytes, 477 nfp_flow->stats.pkts, nfp_flow->stats.used); 478 479 nfp_flow->stats.pkts = 0; 480 nfp_flow->stats.bytes = 0; 481 spin_unlock_bh(&nfp_flow->lock); 482 483 return 0; 484 } 485 486 static int 487 nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev, 488 struct tc_cls_flower_offload *flower, bool egress) 489 { 490 if (!eth_proto_is_802_3(flower->common.protocol)) 491 return -EOPNOTSUPP; 492 493 switch (flower->command) { 494 case TC_CLSFLOWER_REPLACE: 495 return nfp_flower_add_offload(app, netdev, flower, egress); 496 case TC_CLSFLOWER_DESTROY: 497 return nfp_flower_del_offload(app, netdev, flower); 498 case TC_CLSFLOWER_STATS: 499 return nfp_flower_get_stats(app, flower); 500 } 501 502 return -EOPNOTSUPP; 503 } 504 505 int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data, 506 void *cb_priv) 507 { 508 struct nfp_repr *repr = cb_priv; 509 510 if (!tc_cls_can_offload_and_chain0(repr->netdev, type_data)) 511 return -EOPNOTSUPP; 512 513 switch (type) { 514 case TC_SETUP_CLSFLOWER: 515 return nfp_flower_repr_offload(repr->app, repr->netdev, 516 type_data, true); 517 default: 518 return -EOPNOTSUPP; 519 } 520 } 521 522 static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type, 523 void *type_data, void *cb_priv) 524 { 525 struct nfp_repr *repr = cb_priv; 526 527 if (!tc_cls_can_offload_and_chain0(repr->netdev, type_data)) 528 return -EOPNOTSUPP; 529 530 switch (type) { 531 case TC_SETUP_CLSFLOWER: 532 return nfp_flower_repr_offload(repr->app, repr->netdev, 533 type_data, false); 534 default: 535 return -EOPNOTSUPP; 536 } 537 } 538 539 static int nfp_flower_setup_tc_block(struct net_device *netdev, 540 struct tc_block_offload *f) 541 { 542 struct nfp_repr *repr = netdev_priv(netdev); 543 544 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) 545 return -EOPNOTSUPP; 546 547 switch (f->command) { 548 case TC_BLOCK_BIND: 549 return tcf_block_cb_register(f->block, 550 nfp_flower_setup_tc_block_cb, 551 repr, repr); 552 case TC_BLOCK_UNBIND: 553 tcf_block_cb_unregister(f->block, 554 nfp_flower_setup_tc_block_cb, 555 repr); 556 return 0; 557 default: 558 return -EOPNOTSUPP; 559 } 560 } 561 562 int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev, 563 enum tc_setup_type type, void *type_data) 564 { 565 switch (type) { 566 case TC_SETUP_BLOCK: 567 return nfp_flower_setup_tc_block(netdev, type_data); 568 default: 569 return -EOPNOTSUPP; 570 } 571 } 572