1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */ 3 4 #include <linux/skbuff.h> 5 #include <net/devlink.h> 6 #include <net/pkt_cls.h> 7 8 #include "cmsg.h" 9 #include "main.h" 10 #include "../nfpcore/nfp_cpp.h" 11 #include "../nfpcore/nfp_nsp.h" 12 #include "../nfp_app.h" 13 #include "../nfp_main.h" 14 #include "../nfp_net.h" 15 #include "../nfp_port.h" 16 17 #define NFP_FLOWER_SUPPORTED_TCPFLAGS \ 18 (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST | \ 19 TCPHDR_PSH | TCPHDR_URG) 20 21 #define NFP_FLOWER_SUPPORTED_CTLFLAGS \ 22 (FLOW_DIS_IS_FRAGMENT | \ 23 FLOW_DIS_FIRST_FRAG) 24 25 #define NFP_FLOWER_WHITELIST_DISSECTOR \ 26 (BIT(FLOW_DISSECTOR_KEY_CONTROL) | \ 27 BIT(FLOW_DISSECTOR_KEY_BASIC) | \ 28 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | \ 29 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | \ 30 BIT(FLOW_DISSECTOR_KEY_TCP) | \ 31 BIT(FLOW_DISSECTOR_KEY_PORTS) | \ 32 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | \ 33 BIT(FLOW_DISSECTOR_KEY_VLAN) | \ 34 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \ 35 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \ 36 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \ 37 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \ 38 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \ 39 BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \ 40 BIT(FLOW_DISSECTOR_KEY_ENC_IP) | \ 41 BIT(FLOW_DISSECTOR_KEY_MPLS) | \ 42 BIT(FLOW_DISSECTOR_KEY_IP)) 43 44 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR \ 45 (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \ 46 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \ 47 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \ 48 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \ 49 BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \ 50 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \ 51 BIT(FLOW_DISSECTOR_KEY_ENC_IP)) 52 53 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R \ 54 (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \ 55 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \ 56 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS)) 57 58 static int 59 nfp_flower_xmit_flow(struct net_device *netdev, 60 struct nfp_fl_payload *nfp_flow, u8 mtype) 61 { 62 u32 meta_len, key_len, mask_len, act_len, tot_len; 63 struct nfp_repr *priv = netdev_priv(netdev); 64 struct sk_buff *skb; 65 unsigned char *msg; 66 67 meta_len = sizeof(struct nfp_fl_rule_metadata); 68 key_len = nfp_flow->meta.key_len; 69 mask_len = nfp_flow->meta.mask_len; 70 act_len = nfp_flow->meta.act_len; 71 72 tot_len = meta_len + key_len + mask_len + act_len; 73 74 /* Convert to long words as firmware expects 75 * lengths in units of NFP_FL_LW_SIZ. 76 */ 77 nfp_flow->meta.key_len >>= NFP_FL_LW_SIZ; 78 nfp_flow->meta.mask_len >>= NFP_FL_LW_SIZ; 79 nfp_flow->meta.act_len >>= NFP_FL_LW_SIZ; 80 81 skb = nfp_flower_cmsg_alloc(priv->app, tot_len, mtype, GFP_KERNEL); 82 if (!skb) 83 return -ENOMEM; 84 85 msg = nfp_flower_cmsg_get_data(skb); 86 memcpy(msg, &nfp_flow->meta, meta_len); 87 memcpy(&msg[meta_len], nfp_flow->unmasked_data, key_len); 88 memcpy(&msg[meta_len + key_len], nfp_flow->mask_data, mask_len); 89 memcpy(&msg[meta_len + key_len + mask_len], 90 nfp_flow->action_data, act_len); 91 92 /* Convert back to bytes as software expects 93 * lengths in units of bytes. 94 */ 95 nfp_flow->meta.key_len <<= NFP_FL_LW_SIZ; 96 nfp_flow->meta.mask_len <<= NFP_FL_LW_SIZ; 97 nfp_flow->meta.act_len <<= NFP_FL_LW_SIZ; 98 99 nfp_ctrl_tx(priv->app->ctrl, skb); 100 101 return 0; 102 } 103 104 static bool nfp_flower_check_higher_than_mac(struct tc_cls_flower_offload *f) 105 { 106 return dissector_uses_key(f->dissector, 107 FLOW_DISSECTOR_KEY_IPV4_ADDRS) || 108 dissector_uses_key(f->dissector, 109 FLOW_DISSECTOR_KEY_IPV6_ADDRS) || 110 dissector_uses_key(f->dissector, 111 FLOW_DISSECTOR_KEY_PORTS) || 112 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ICMP); 113 } 114 115 static int 116 nfp_flower_calc_opt_layer(struct flow_dissector_key_enc_opts *enc_opts, 117 u32 *key_layer_two, int *key_size) 118 { 119 if (enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY) 120 return -EOPNOTSUPP; 121 122 if (enc_opts->len > 0) { 123 *key_layer_two |= NFP_FLOWER_LAYER2_GENEVE_OP; 124 *key_size += sizeof(struct nfp_flower_geneve_options); 125 } 126 127 return 0; 128 } 129 130 static int 131 nfp_flower_calculate_key_layers(struct nfp_app *app, 132 struct nfp_fl_key_ls *ret_key_ls, 133 struct tc_cls_flower_offload *flow, 134 bool egress, 135 enum nfp_flower_tun_type *tun_type) 136 { 137 struct flow_dissector_key_basic *mask_basic = NULL; 138 struct flow_dissector_key_basic *key_basic = NULL; 139 struct nfp_flower_priv *priv = app->priv; 140 u32 key_layer_two; 141 u8 key_layer; 142 int key_size; 143 int err; 144 145 if (flow->dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR) 146 return -EOPNOTSUPP; 147 148 /* If any tun dissector is used then the required set must be used. */ 149 if (flow->dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR && 150 (flow->dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R) 151 != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R) 152 return -EOPNOTSUPP; 153 154 key_layer_two = 0; 155 key_layer = NFP_FLOWER_LAYER_PORT; 156 key_size = sizeof(struct nfp_flower_meta_tci) + 157 sizeof(struct nfp_flower_in_port); 158 159 if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS) || 160 dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_MPLS)) { 161 key_layer |= NFP_FLOWER_LAYER_MAC; 162 key_size += sizeof(struct nfp_flower_mac_mpls); 163 } 164 165 if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_VLAN)) { 166 struct flow_dissector_key_vlan *flow_vlan; 167 168 flow_vlan = skb_flow_dissector_target(flow->dissector, 169 FLOW_DISSECTOR_KEY_VLAN, 170 flow->mask); 171 if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_PCP) && 172 flow_vlan->vlan_priority) 173 return -EOPNOTSUPP; 174 } 175 176 if (dissector_uses_key(flow->dissector, 177 FLOW_DISSECTOR_KEY_ENC_CONTROL)) { 178 struct flow_dissector_key_ipv4_addrs *mask_ipv4 = NULL; 179 struct flow_dissector_key_ports *mask_enc_ports = NULL; 180 struct flow_dissector_key_enc_opts *enc_op = NULL; 181 struct flow_dissector_key_ports *enc_ports = NULL; 182 struct flow_dissector_key_control *mask_enc_ctl = 183 skb_flow_dissector_target(flow->dissector, 184 FLOW_DISSECTOR_KEY_ENC_CONTROL, 185 flow->mask); 186 struct flow_dissector_key_control *enc_ctl = 187 skb_flow_dissector_target(flow->dissector, 188 FLOW_DISSECTOR_KEY_ENC_CONTROL, 189 flow->key); 190 if (!egress) 191 return -EOPNOTSUPP; 192 193 if (mask_enc_ctl->addr_type != 0xffff || 194 enc_ctl->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS) 195 return -EOPNOTSUPP; 196 197 /* These fields are already verified as used. */ 198 mask_ipv4 = 199 skb_flow_dissector_target(flow->dissector, 200 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, 201 flow->mask); 202 if (mask_ipv4->dst != cpu_to_be32(~0)) 203 return -EOPNOTSUPP; 204 205 mask_enc_ports = 206 skb_flow_dissector_target(flow->dissector, 207 FLOW_DISSECTOR_KEY_ENC_PORTS, 208 flow->mask); 209 enc_ports = 210 skb_flow_dissector_target(flow->dissector, 211 FLOW_DISSECTOR_KEY_ENC_PORTS, 212 flow->key); 213 214 if (mask_enc_ports->dst != cpu_to_be16(~0)) 215 return -EOPNOTSUPP; 216 217 if (dissector_uses_key(flow->dissector, 218 FLOW_DISSECTOR_KEY_ENC_OPTS)) { 219 enc_op = skb_flow_dissector_target(flow->dissector, 220 FLOW_DISSECTOR_KEY_ENC_OPTS, 221 flow->key); 222 } 223 224 switch (enc_ports->dst) { 225 case htons(NFP_FL_VXLAN_PORT): 226 *tun_type = NFP_FL_TUNNEL_VXLAN; 227 key_layer |= NFP_FLOWER_LAYER_VXLAN; 228 key_size += sizeof(struct nfp_flower_ipv4_udp_tun); 229 230 if (enc_op) 231 return -EOPNOTSUPP; 232 break; 233 case htons(NFP_FL_GENEVE_PORT): 234 if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)) 235 return -EOPNOTSUPP; 236 *tun_type = NFP_FL_TUNNEL_GENEVE; 237 key_layer |= NFP_FLOWER_LAYER_EXT_META; 238 key_size += sizeof(struct nfp_flower_ext_meta); 239 key_layer_two |= NFP_FLOWER_LAYER2_GENEVE; 240 key_size += sizeof(struct nfp_flower_ipv4_udp_tun); 241 242 if (!enc_op) 243 break; 244 if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT)) 245 return -EOPNOTSUPP; 246 err = nfp_flower_calc_opt_layer(enc_op, &key_layer_two, 247 &key_size); 248 if (err) 249 return err; 250 break; 251 default: 252 return -EOPNOTSUPP; 253 } 254 } else if (egress) { 255 /* Reject non tunnel matches offloaded to egress repr. */ 256 return -EOPNOTSUPP; 257 } 258 259 if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) { 260 mask_basic = skb_flow_dissector_target(flow->dissector, 261 FLOW_DISSECTOR_KEY_BASIC, 262 flow->mask); 263 264 key_basic = skb_flow_dissector_target(flow->dissector, 265 FLOW_DISSECTOR_KEY_BASIC, 266 flow->key); 267 } 268 269 if (mask_basic && mask_basic->n_proto) { 270 /* Ethernet type is present in the key. */ 271 switch (key_basic->n_proto) { 272 case cpu_to_be16(ETH_P_IP): 273 key_layer |= NFP_FLOWER_LAYER_IPV4; 274 key_size += sizeof(struct nfp_flower_ipv4); 275 break; 276 277 case cpu_to_be16(ETH_P_IPV6): 278 key_layer |= NFP_FLOWER_LAYER_IPV6; 279 key_size += sizeof(struct nfp_flower_ipv6); 280 break; 281 282 /* Currently we do not offload ARP 283 * because we rely on it to get to the host. 284 */ 285 case cpu_to_be16(ETH_P_ARP): 286 return -EOPNOTSUPP; 287 288 case cpu_to_be16(ETH_P_MPLS_UC): 289 case cpu_to_be16(ETH_P_MPLS_MC): 290 if (!(key_layer & NFP_FLOWER_LAYER_MAC)) { 291 key_layer |= NFP_FLOWER_LAYER_MAC; 292 key_size += sizeof(struct nfp_flower_mac_mpls); 293 } 294 break; 295 296 /* Will be included in layer 2. */ 297 case cpu_to_be16(ETH_P_8021Q): 298 break; 299 300 default: 301 /* Other ethtype - we need check the masks for the 302 * remainder of the key to ensure we can offload. 303 */ 304 if (nfp_flower_check_higher_than_mac(flow)) 305 return -EOPNOTSUPP; 306 break; 307 } 308 } 309 310 if (mask_basic && mask_basic->ip_proto) { 311 /* Ethernet type is present in the key. */ 312 switch (key_basic->ip_proto) { 313 case IPPROTO_TCP: 314 case IPPROTO_UDP: 315 case IPPROTO_SCTP: 316 case IPPROTO_ICMP: 317 case IPPROTO_ICMPV6: 318 key_layer |= NFP_FLOWER_LAYER_TP; 319 key_size += sizeof(struct nfp_flower_tp_ports); 320 break; 321 default: 322 /* Other ip proto - we need check the masks for the 323 * remainder of the key to ensure we can offload. 324 */ 325 return -EOPNOTSUPP; 326 } 327 } 328 329 if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_TCP)) { 330 struct flow_dissector_key_tcp *tcp; 331 u32 tcp_flags; 332 333 tcp = skb_flow_dissector_target(flow->dissector, 334 FLOW_DISSECTOR_KEY_TCP, 335 flow->key); 336 tcp_flags = be16_to_cpu(tcp->flags); 337 338 if (tcp_flags & ~NFP_FLOWER_SUPPORTED_TCPFLAGS) 339 return -EOPNOTSUPP; 340 341 /* We only support PSH and URG flags when either 342 * FIN, SYN or RST is present as well. 343 */ 344 if ((tcp_flags & (TCPHDR_PSH | TCPHDR_URG)) && 345 !(tcp_flags & (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST))) 346 return -EOPNOTSUPP; 347 348 /* We need to store TCP flags in the IPv4 key space, thus 349 * we need to ensure we include a IPv4 key layer if we have 350 * not done so already. 351 */ 352 if (!(key_layer & NFP_FLOWER_LAYER_IPV4)) { 353 key_layer |= NFP_FLOWER_LAYER_IPV4; 354 key_size += sizeof(struct nfp_flower_ipv4); 355 } 356 } 357 358 if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_CONTROL)) { 359 struct flow_dissector_key_control *key_ctl; 360 361 key_ctl = skb_flow_dissector_target(flow->dissector, 362 FLOW_DISSECTOR_KEY_CONTROL, 363 flow->key); 364 365 if (key_ctl->flags & ~NFP_FLOWER_SUPPORTED_CTLFLAGS) 366 return -EOPNOTSUPP; 367 } 368 369 ret_key_ls->key_layer = key_layer; 370 ret_key_ls->key_layer_two = key_layer_two; 371 ret_key_ls->key_size = key_size; 372 373 return 0; 374 } 375 376 static struct nfp_fl_payload * 377 nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer, bool egress) 378 { 379 struct nfp_fl_payload *flow_pay; 380 381 flow_pay = kmalloc(sizeof(*flow_pay), GFP_KERNEL); 382 if (!flow_pay) 383 return NULL; 384 385 flow_pay->meta.key_len = key_layer->key_size; 386 flow_pay->unmasked_data = kmalloc(key_layer->key_size, GFP_KERNEL); 387 if (!flow_pay->unmasked_data) 388 goto err_free_flow; 389 390 flow_pay->meta.mask_len = key_layer->key_size; 391 flow_pay->mask_data = kmalloc(key_layer->key_size, GFP_KERNEL); 392 if (!flow_pay->mask_data) 393 goto err_free_unmasked; 394 395 flow_pay->action_data = kmalloc(NFP_FL_MAX_A_SIZ, GFP_KERNEL); 396 if (!flow_pay->action_data) 397 goto err_free_mask; 398 399 flow_pay->nfp_tun_ipv4_addr = 0; 400 flow_pay->meta.flags = 0; 401 flow_pay->ingress_offload = !egress; 402 403 return flow_pay; 404 405 err_free_mask: 406 kfree(flow_pay->mask_data); 407 err_free_unmasked: 408 kfree(flow_pay->unmasked_data); 409 err_free_flow: 410 kfree(flow_pay); 411 return NULL; 412 } 413 414 /** 415 * nfp_flower_add_offload() - Adds a new flow to hardware. 416 * @app: Pointer to the APP handle 417 * @netdev: netdev structure. 418 * @flow: TC flower classifier offload structure. 419 * @egress: NFP netdev is the egress. 420 * 421 * Adds a new flow to the repeated hash structure and action payload. 422 * 423 * Return: negative value on error, 0 if configured successfully. 424 */ 425 static int 426 nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, 427 struct tc_cls_flower_offload *flow, bool egress) 428 { 429 enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE; 430 struct nfp_port *port = nfp_port_from_netdev(netdev); 431 struct nfp_flower_priv *priv = app->priv; 432 struct nfp_fl_payload *flow_pay; 433 struct nfp_fl_key_ls *key_layer; 434 struct net_device *ingr_dev; 435 int err; 436 437 ingr_dev = egress ? NULL : netdev; 438 flow_pay = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev, 439 NFP_FL_STATS_CTX_DONT_CARE); 440 if (flow_pay) { 441 /* Ignore as duplicate if it has been added by different cb. */ 442 if (flow_pay->ingress_offload && egress) 443 return 0; 444 else 445 return -EOPNOTSUPP; 446 } 447 448 key_layer = kmalloc(sizeof(*key_layer), GFP_KERNEL); 449 if (!key_layer) 450 return -ENOMEM; 451 452 err = nfp_flower_calculate_key_layers(app, key_layer, flow, egress, 453 &tun_type); 454 if (err) 455 goto err_free_key_ls; 456 457 flow_pay = nfp_flower_allocate_new(key_layer, egress); 458 if (!flow_pay) { 459 err = -ENOMEM; 460 goto err_free_key_ls; 461 } 462 463 flow_pay->ingress_dev = egress ? NULL : netdev; 464 465 err = nfp_flower_compile_flow_match(flow, key_layer, netdev, flow_pay, 466 tun_type); 467 if (err) 468 goto err_destroy_flow; 469 470 err = nfp_flower_compile_action(app, flow, netdev, flow_pay); 471 if (err) 472 goto err_destroy_flow; 473 474 err = nfp_compile_flow_metadata(app, flow, flow_pay, 475 flow_pay->ingress_dev); 476 if (err) 477 goto err_destroy_flow; 478 479 err = nfp_flower_xmit_flow(netdev, flow_pay, 480 NFP_FLOWER_CMSG_TYPE_FLOW_ADD); 481 if (err) 482 goto err_destroy_flow; 483 484 flow_pay->tc_flower_cookie = flow->cookie; 485 err = rhashtable_insert_fast(&priv->flow_table, &flow_pay->fl_node, 486 nfp_flower_table_params); 487 if (err) 488 goto err_destroy_flow; 489 490 port->tc_offload_cnt++; 491 492 /* Deallocate flow payload when flower rule has been destroyed. */ 493 kfree(key_layer); 494 495 return 0; 496 497 err_destroy_flow: 498 kfree(flow_pay->action_data); 499 kfree(flow_pay->mask_data); 500 kfree(flow_pay->unmasked_data); 501 kfree(flow_pay); 502 err_free_key_ls: 503 kfree(key_layer); 504 return err; 505 } 506 507 /** 508 * nfp_flower_del_offload() - Removes a flow from hardware. 509 * @app: Pointer to the APP handle 510 * @netdev: netdev structure. 511 * @flow: TC flower classifier offload structure 512 * @egress: Netdev is the egress dev. 513 * 514 * Removes a flow from the repeated hash structure and clears the 515 * action payload. 516 * 517 * Return: negative value on error, 0 if removed successfully. 518 */ 519 static int 520 nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev, 521 struct tc_cls_flower_offload *flow, bool egress) 522 { 523 struct nfp_port *port = nfp_port_from_netdev(netdev); 524 struct nfp_flower_priv *priv = app->priv; 525 struct nfp_fl_payload *nfp_flow; 526 struct net_device *ingr_dev; 527 int err; 528 529 ingr_dev = egress ? NULL : netdev; 530 nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev, 531 NFP_FL_STATS_CTX_DONT_CARE); 532 if (!nfp_flow) 533 return egress ? 0 : -ENOENT; 534 535 err = nfp_modify_flow_metadata(app, nfp_flow); 536 if (err) 537 goto err_free_flow; 538 539 if (nfp_flow->nfp_tun_ipv4_addr) 540 nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr); 541 542 err = nfp_flower_xmit_flow(netdev, nfp_flow, 543 NFP_FLOWER_CMSG_TYPE_FLOW_DEL); 544 if (err) 545 goto err_free_flow; 546 547 err_free_flow: 548 port->tc_offload_cnt--; 549 kfree(nfp_flow->action_data); 550 kfree(nfp_flow->mask_data); 551 kfree(nfp_flow->unmasked_data); 552 WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table, 553 &nfp_flow->fl_node, 554 nfp_flower_table_params)); 555 kfree_rcu(nfp_flow, rcu); 556 return err; 557 } 558 559 /** 560 * nfp_flower_get_stats() - Populates flow stats obtained from hardware. 561 * @app: Pointer to the APP handle 562 * @netdev: Netdev structure. 563 * @flow: TC flower classifier offload structure 564 * @egress: Netdev is the egress dev. 565 * 566 * Populates a flow statistics structure which which corresponds to a 567 * specific flow. 568 * 569 * Return: negative value on error, 0 if stats populated successfully. 570 */ 571 static int 572 nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev, 573 struct tc_cls_flower_offload *flow, bool egress) 574 { 575 struct nfp_flower_priv *priv = app->priv; 576 struct nfp_fl_payload *nfp_flow; 577 struct net_device *ingr_dev; 578 u32 ctx_id; 579 580 ingr_dev = egress ? NULL : netdev; 581 nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev, 582 NFP_FL_STATS_CTX_DONT_CARE); 583 if (!nfp_flow) 584 return -EINVAL; 585 586 if (nfp_flow->ingress_offload && egress) 587 return 0; 588 589 ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id); 590 591 spin_lock_bh(&priv->stats_lock); 592 tcf_exts_stats_update(flow->exts, priv->stats[ctx_id].bytes, 593 priv->stats[ctx_id].pkts, 594 priv->stats[ctx_id].used); 595 596 priv->stats[ctx_id].pkts = 0; 597 priv->stats[ctx_id].bytes = 0; 598 spin_unlock_bh(&priv->stats_lock); 599 600 return 0; 601 } 602 603 static int 604 nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev, 605 struct tc_cls_flower_offload *flower, bool egress) 606 { 607 if (!eth_proto_is_802_3(flower->common.protocol)) 608 return -EOPNOTSUPP; 609 610 switch (flower->command) { 611 case TC_CLSFLOWER_REPLACE: 612 return nfp_flower_add_offload(app, netdev, flower, egress); 613 case TC_CLSFLOWER_DESTROY: 614 return nfp_flower_del_offload(app, netdev, flower, egress); 615 case TC_CLSFLOWER_STATS: 616 return nfp_flower_get_stats(app, netdev, flower, egress); 617 default: 618 return -EOPNOTSUPP; 619 } 620 } 621 622 int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data, 623 void *cb_priv) 624 { 625 struct nfp_repr *repr = cb_priv; 626 627 if (!tc_cls_can_offload_and_chain0(repr->netdev, type_data)) 628 return -EOPNOTSUPP; 629 630 switch (type) { 631 case TC_SETUP_CLSFLOWER: 632 return nfp_flower_repr_offload(repr->app, repr->netdev, 633 type_data, true); 634 default: 635 return -EOPNOTSUPP; 636 } 637 } 638 639 static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type, 640 void *type_data, void *cb_priv) 641 { 642 struct nfp_repr *repr = cb_priv; 643 644 if (!tc_cls_can_offload_and_chain0(repr->netdev, type_data)) 645 return -EOPNOTSUPP; 646 647 switch (type) { 648 case TC_SETUP_CLSFLOWER: 649 return nfp_flower_repr_offload(repr->app, repr->netdev, 650 type_data, false); 651 default: 652 return -EOPNOTSUPP; 653 } 654 } 655 656 static int nfp_flower_setup_tc_block(struct net_device *netdev, 657 struct tc_block_offload *f) 658 { 659 struct nfp_repr *repr = netdev_priv(netdev); 660 661 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) 662 return -EOPNOTSUPP; 663 664 switch (f->command) { 665 case TC_BLOCK_BIND: 666 return tcf_block_cb_register(f->block, 667 nfp_flower_setup_tc_block_cb, 668 repr, repr, f->extack); 669 case TC_BLOCK_UNBIND: 670 tcf_block_cb_unregister(f->block, 671 nfp_flower_setup_tc_block_cb, 672 repr); 673 return 0; 674 default: 675 return -EOPNOTSUPP; 676 } 677 } 678 679 int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev, 680 enum tc_setup_type type, void *type_data) 681 { 682 switch (type) { 683 case TC_SETUP_BLOCK: 684 return nfp_flower_setup_tc_block(netdev, type_data); 685 default: 686 return -EOPNOTSUPP; 687 } 688 } 689