1 /* Broadcom NetXtreme-C/E network driver. 2 * 3 * Copyright (c) 2017 Broadcom Limited 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation. 8 */ 9 10 #include <linux/netdevice.h> 11 #include <linux/inetdevice.h> 12 #include <linux/if_vlan.h> 13 #include <net/flow_dissector.h> 14 #include <net/pkt_cls.h> 15 #include <net/tc_act/tc_gact.h> 16 #include <net/tc_act/tc_skbedit.h> 17 #include <net/tc_act/tc_mirred.h> 18 #include <net/tc_act/tc_vlan.h> 19 #include <net/tc_act/tc_tunnel_key.h> 20 21 #include "bnxt_hsi.h" 22 #include "bnxt.h" 23 #include "bnxt_sriov.h" 24 #include "bnxt_tc.h" 25 #include "bnxt_vfr.h" 26 27 #define BNXT_FID_INVALID 0xffff 28 #define VLAN_TCI(vid, prio) ((vid) | ((prio) << VLAN_PRIO_SHIFT)) 29 30 #define is_vlan_pcp_wildcarded(vlan_tci_mask) \ 31 ((ntohs(vlan_tci_mask) & VLAN_PRIO_MASK) == 0x0000) 32 #define is_vlan_pcp_exactmatch(vlan_tci_mask) \ 33 ((ntohs(vlan_tci_mask) & VLAN_PRIO_MASK) == VLAN_PRIO_MASK) 34 #define is_vlan_pcp_zero(vlan_tci) \ 35 ((ntohs(vlan_tci) & VLAN_PRIO_MASK) == 0x0000) 36 #define is_vid_exactmatch(vlan_tci_mask) \ 37 ((ntohs(vlan_tci_mask) & VLAN_VID_MASK) == VLAN_VID_MASK) 38 39 /* Return the dst fid of the func for flow forwarding 40 * For PFs: src_fid is the fid of the PF 41 * For VF-reps: src_fid the fid of the VF 42 */ 43 static u16 bnxt_flow_get_dst_fid(struct bnxt *pf_bp, struct net_device *dev) 44 { 45 struct bnxt *bp; 46 47 /* check if dev belongs to the same switch */ 48 if (!switchdev_port_same_parent_id(pf_bp->dev, dev)) { 49 netdev_info(pf_bp->dev, "dev(ifindex=%d) not on same switch", 50 dev->ifindex); 51 return BNXT_FID_INVALID; 52 } 53 54 /* Is dev a VF-rep? */ 55 if (bnxt_dev_is_vf_rep(dev)) 56 return bnxt_vf_rep_get_fid(dev); 57 58 bp = netdev_priv(dev); 59 return bp->pf.fw_fid; 60 } 61 62 static int bnxt_tc_parse_redir(struct bnxt *bp, 63 struct bnxt_tc_actions *actions, 64 const struct tc_action *tc_act) 65 { 66 struct net_device *dev = tcf_mirred_dev(tc_act); 67 68 if (!dev) { 69 netdev_info(bp->dev, "no dev in mirred action"); 70 return -EINVAL; 71 } 72 73 actions->flags |= BNXT_TC_ACTION_FLAG_FWD; 74 actions->dst_dev = dev; 75 return 0; 76 } 77 78 static void bnxt_tc_parse_vlan(struct bnxt *bp, 79 struct bnxt_tc_actions *actions, 80 const struct tc_action *tc_act) 81 { 82 if (tcf_vlan_action(tc_act) == TCA_VLAN_ACT_POP) { 83 actions->flags |= BNXT_TC_ACTION_FLAG_POP_VLAN; 84 } else if (tcf_vlan_action(tc_act) == TCA_VLAN_ACT_PUSH) { 85 actions->flags |= BNXT_TC_ACTION_FLAG_PUSH_VLAN; 86 actions->push_vlan_tci = htons(tcf_vlan_push_vid(tc_act)); 87 actions->push_vlan_tpid = tcf_vlan_push_proto(tc_act); 88 } 89 } 90 91 static int bnxt_tc_parse_tunnel_set(struct bnxt *bp, 92 struct bnxt_tc_actions *actions, 93 const struct tc_action *tc_act) 94 { 95 struct ip_tunnel_info *tun_info = tcf_tunnel_info(tc_act); 96 struct ip_tunnel_key *tun_key = &tun_info->key; 97 98 if (ip_tunnel_info_af(tun_info) != AF_INET) { 99 netdev_info(bp->dev, "only IPv4 tunnel-encap is supported"); 100 return -EOPNOTSUPP; 101 } 102 103 actions->tun_encap_key = *tun_key; 104 actions->flags |= BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP; 105 return 0; 106 } 107 108 static int bnxt_tc_parse_actions(struct bnxt *bp, 109 struct bnxt_tc_actions *actions, 110 struct tcf_exts *tc_exts) 111 { 112 const struct tc_action *tc_act; 113 int i, rc; 114 115 if (!tcf_exts_has_actions(tc_exts)) { 116 netdev_info(bp->dev, "no actions"); 117 return -EINVAL; 118 } 119 120 tcf_exts_for_each_action(i, tc_act, tc_exts) { 121 /* Drop action */ 122 if (is_tcf_gact_shot(tc_act)) { 123 actions->flags |= BNXT_TC_ACTION_FLAG_DROP; 124 return 0; /* don't bother with other actions */ 125 } 126 127 /* Redirect action */ 128 if (is_tcf_mirred_egress_redirect(tc_act)) { 129 rc = bnxt_tc_parse_redir(bp, actions, tc_act); 130 if (rc) 131 return rc; 132 continue; 133 } 134 135 /* Push/pop VLAN */ 136 if (is_tcf_vlan(tc_act)) { 137 bnxt_tc_parse_vlan(bp, actions, tc_act); 138 continue; 139 } 140 141 /* Tunnel encap */ 142 if (is_tcf_tunnel_set(tc_act)) { 143 rc = bnxt_tc_parse_tunnel_set(bp, actions, tc_act); 144 if (rc) 145 return rc; 146 continue; 147 } 148 149 /* Tunnel decap */ 150 if (is_tcf_tunnel_release(tc_act)) { 151 actions->flags |= BNXT_TC_ACTION_FLAG_TUNNEL_DECAP; 152 continue; 153 } 154 } 155 156 if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) { 157 if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) { 158 /* dst_fid is PF's fid */ 159 actions->dst_fid = bp->pf.fw_fid; 160 } else { 161 /* find the FID from dst_dev */ 162 actions->dst_fid = 163 bnxt_flow_get_dst_fid(bp, actions->dst_dev); 164 if (actions->dst_fid == BNXT_FID_INVALID) 165 return -EINVAL; 166 } 167 } 168 169 return 0; 170 } 171 172 #define GET_KEY(flow_cmd, key_type) \ 173 skb_flow_dissector_target((flow_cmd)->dissector, key_type,\ 174 (flow_cmd)->key) 175 #define GET_MASK(flow_cmd, key_type) \ 176 skb_flow_dissector_target((flow_cmd)->dissector, key_type,\ 177 (flow_cmd)->mask) 178 179 static int bnxt_tc_parse_flow(struct bnxt *bp, 180 struct tc_cls_flower_offload *tc_flow_cmd, 181 struct bnxt_tc_flow *flow) 182 { 183 struct flow_dissector *dissector = tc_flow_cmd->dissector; 184 u16 addr_type = 0; 185 186 /* KEY_CONTROL and KEY_BASIC are needed for forming a meaningful key */ 187 if ((dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) == 0 || 188 (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_BASIC)) == 0) { 189 netdev_info(bp->dev, "cannot form TC key: used_keys = 0x%x", 190 dissector->used_keys); 191 return -EOPNOTSUPP; 192 } 193 194 if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_CONTROL)) { 195 struct flow_dissector_key_control *key = 196 GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_CONTROL); 197 198 addr_type = key->addr_type; 199 } 200 201 if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_BASIC)) { 202 struct flow_dissector_key_basic *key = 203 GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_BASIC); 204 struct flow_dissector_key_basic *mask = 205 GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_BASIC); 206 207 flow->l2_key.ether_type = key->n_proto; 208 flow->l2_mask.ether_type = mask->n_proto; 209 210 if (key->n_proto == htons(ETH_P_IP) || 211 key->n_proto == htons(ETH_P_IPV6)) { 212 flow->l4_key.ip_proto = key->ip_proto; 213 flow->l4_mask.ip_proto = mask->ip_proto; 214 } 215 } 216 217 if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 218 struct flow_dissector_key_eth_addrs *key = 219 GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ETH_ADDRS); 220 struct flow_dissector_key_eth_addrs *mask = 221 GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ETH_ADDRS); 222 223 flow->flags |= BNXT_TC_FLOW_FLAGS_ETH_ADDRS; 224 ether_addr_copy(flow->l2_key.dmac, key->dst); 225 ether_addr_copy(flow->l2_mask.dmac, mask->dst); 226 ether_addr_copy(flow->l2_key.smac, key->src); 227 ether_addr_copy(flow->l2_mask.smac, mask->src); 228 } 229 230 if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_VLAN)) { 231 struct flow_dissector_key_vlan *key = 232 GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_VLAN); 233 struct flow_dissector_key_vlan *mask = 234 GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_VLAN); 235 236 flow->l2_key.inner_vlan_tci = 237 cpu_to_be16(VLAN_TCI(key->vlan_id, key->vlan_priority)); 238 flow->l2_mask.inner_vlan_tci = 239 cpu_to_be16((VLAN_TCI(mask->vlan_id, mask->vlan_priority))); 240 flow->l2_key.inner_vlan_tpid = htons(ETH_P_8021Q); 241 flow->l2_mask.inner_vlan_tpid = htons(0xffff); 242 flow->l2_key.num_vlans = 1; 243 } 244 245 if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { 246 struct flow_dissector_key_ipv4_addrs *key = 247 GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV4_ADDRS); 248 struct flow_dissector_key_ipv4_addrs *mask = 249 GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV4_ADDRS); 250 251 flow->flags |= BNXT_TC_FLOW_FLAGS_IPV4_ADDRS; 252 flow->l3_key.ipv4.daddr.s_addr = key->dst; 253 flow->l3_mask.ipv4.daddr.s_addr = mask->dst; 254 flow->l3_key.ipv4.saddr.s_addr = key->src; 255 flow->l3_mask.ipv4.saddr.s_addr = mask->src; 256 } else if (dissector_uses_key(dissector, 257 FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { 258 struct flow_dissector_key_ipv6_addrs *key = 259 GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV6_ADDRS); 260 struct flow_dissector_key_ipv6_addrs *mask = 261 GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV6_ADDRS); 262 263 flow->flags |= BNXT_TC_FLOW_FLAGS_IPV6_ADDRS; 264 flow->l3_key.ipv6.daddr = key->dst; 265 flow->l3_mask.ipv6.daddr = mask->dst; 266 flow->l3_key.ipv6.saddr = key->src; 267 flow->l3_mask.ipv6.saddr = mask->src; 268 } 269 270 if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_PORTS)) { 271 struct flow_dissector_key_ports *key = 272 GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_PORTS); 273 struct flow_dissector_key_ports *mask = 274 GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_PORTS); 275 276 flow->flags |= BNXT_TC_FLOW_FLAGS_PORTS; 277 flow->l4_key.ports.dport = key->dst; 278 flow->l4_mask.ports.dport = mask->dst; 279 flow->l4_key.ports.sport = key->src; 280 flow->l4_mask.ports.sport = mask->src; 281 } 282 283 if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ICMP)) { 284 struct flow_dissector_key_icmp *key = 285 GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ICMP); 286 struct flow_dissector_key_icmp *mask = 287 GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ICMP); 288 289 flow->flags |= BNXT_TC_FLOW_FLAGS_ICMP; 290 flow->l4_key.icmp.type = key->type; 291 flow->l4_key.icmp.code = key->code; 292 flow->l4_mask.icmp.type = mask->type; 293 flow->l4_mask.icmp.code = mask->code; 294 } 295 296 if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) { 297 struct flow_dissector_key_control *key = 298 GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_CONTROL); 299 300 addr_type = key->addr_type; 301 } 302 303 if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) { 304 struct flow_dissector_key_ipv4_addrs *key = 305 GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS); 306 struct flow_dissector_key_ipv4_addrs *mask = 307 GET_MASK(tc_flow_cmd, 308 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS); 309 310 flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS; 311 flow->tun_key.u.ipv4.dst = key->dst; 312 flow->tun_mask.u.ipv4.dst = mask->dst; 313 flow->tun_key.u.ipv4.src = key->src; 314 flow->tun_mask.u.ipv4.src = mask->src; 315 } else if (dissector_uses_key(dissector, 316 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) { 317 return -EOPNOTSUPP; 318 } 319 320 if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) { 321 struct flow_dissector_key_keyid *key = 322 GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_KEYID); 323 struct flow_dissector_key_keyid *mask = 324 GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_KEYID); 325 326 flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_ID; 327 flow->tun_key.tun_id = key32_to_tunnel_id(key->keyid); 328 flow->tun_mask.tun_id = key32_to_tunnel_id(mask->keyid); 329 } 330 331 if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) { 332 struct flow_dissector_key_ports *key = 333 GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_PORTS); 334 struct flow_dissector_key_ports *mask = 335 GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_PORTS); 336 337 flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_PORTS; 338 flow->tun_key.tp_dst = key->dst; 339 flow->tun_mask.tp_dst = mask->dst; 340 flow->tun_key.tp_src = key->src; 341 flow->tun_mask.tp_src = mask->src; 342 } 343 344 return bnxt_tc_parse_actions(bp, &flow->actions, tc_flow_cmd->exts); 345 } 346 347 static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp, __le16 flow_handle) 348 { 349 struct hwrm_cfa_flow_free_input req = { 0 }; 350 int rc; 351 352 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_FREE, -1, -1); 353 req.flow_handle = flow_handle; 354 355 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 356 if (rc) 357 netdev_info(bp->dev, "Error: %s: flow_handle=0x%x rc=%d", 358 __func__, flow_handle, rc); 359 360 if (rc) 361 rc = -EIO; 362 return rc; 363 } 364 365 static int ipv6_mask_len(struct in6_addr *mask) 366 { 367 int mask_len = 0, i; 368 369 for (i = 0; i < 4; i++) 370 mask_len += inet_mask_len(mask->s6_addr32[i]); 371 372 return mask_len; 373 } 374 375 static bool is_wildcard(void *mask, int len) 376 { 377 const u8 *p = mask; 378 int i; 379 380 for (i = 0; i < len; i++) { 381 if (p[i] != 0) 382 return false; 383 } 384 return true; 385 } 386 387 static bool is_exactmatch(void *mask, int len) 388 { 389 const u8 *p = mask; 390 int i; 391 392 for (i = 0; i < len; i++) 393 if (p[i] != 0xff) 394 return false; 395 396 return true; 397 } 398 399 static bool is_vlan_tci_allowed(__be16 vlan_tci_mask, 400 __be16 vlan_tci) 401 { 402 /* VLAN priority must be either exactly zero or fully wildcarded and 403 * VLAN id must be exact match. 404 */ 405 if (is_vid_exactmatch(vlan_tci_mask) && 406 ((is_vlan_pcp_exactmatch(vlan_tci_mask) && 407 is_vlan_pcp_zero(vlan_tci)) || 408 is_vlan_pcp_wildcarded(vlan_tci_mask))) 409 return true; 410 411 return false; 412 } 413 414 static bool bits_set(void *key, int len) 415 { 416 const u8 *p = key; 417 int i; 418 419 for (i = 0; i < len; i++) 420 if (p[i] != 0) 421 return true; 422 423 return false; 424 } 425 426 static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow, 427 __le16 ref_flow_handle, 428 __le32 tunnel_handle, __le16 *flow_handle) 429 { 430 struct hwrm_cfa_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr; 431 struct bnxt_tc_actions *actions = &flow->actions; 432 struct bnxt_tc_l3_key *l3_mask = &flow->l3_mask; 433 struct bnxt_tc_l3_key *l3_key = &flow->l3_key; 434 struct hwrm_cfa_flow_alloc_input req = { 0 }; 435 u16 flow_flags = 0, action_flags = 0; 436 int rc; 437 438 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_ALLOC, -1, -1); 439 440 req.src_fid = cpu_to_le16(flow->src_fid); 441 req.ref_flow_handle = ref_flow_handle; 442 443 if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP || 444 actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) { 445 req.tunnel_handle = tunnel_handle; 446 flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL; 447 action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL; 448 } 449 450 req.ethertype = flow->l2_key.ether_type; 451 req.ip_proto = flow->l4_key.ip_proto; 452 453 if (flow->flags & BNXT_TC_FLOW_FLAGS_ETH_ADDRS) { 454 memcpy(req.dmac, flow->l2_key.dmac, ETH_ALEN); 455 memcpy(req.smac, flow->l2_key.smac, ETH_ALEN); 456 } 457 458 if (flow->l2_key.num_vlans > 0) { 459 flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_ONE; 460 /* FW expects the inner_vlan_tci value to be set 461 * in outer_vlan_tci when num_vlans is 1 (which is 462 * always the case in TC.) 463 */ 464 req.outer_vlan_tci = flow->l2_key.inner_vlan_tci; 465 } 466 467 /* If all IP and L4 fields are wildcarded then this is an L2 flow */ 468 if (is_wildcard(l3_mask, sizeof(*l3_mask)) && 469 is_wildcard(&flow->l4_mask, sizeof(flow->l4_mask))) { 470 flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2; 471 } else { 472 flow_flags |= flow->l2_key.ether_type == htons(ETH_P_IP) ? 473 CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV4 : 474 CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6; 475 476 if (flow->flags & BNXT_TC_FLOW_FLAGS_IPV4_ADDRS) { 477 req.ip_dst[0] = l3_key->ipv4.daddr.s_addr; 478 req.ip_dst_mask_len = 479 inet_mask_len(l3_mask->ipv4.daddr.s_addr); 480 req.ip_src[0] = l3_key->ipv4.saddr.s_addr; 481 req.ip_src_mask_len = 482 inet_mask_len(l3_mask->ipv4.saddr.s_addr); 483 } else if (flow->flags & BNXT_TC_FLOW_FLAGS_IPV6_ADDRS) { 484 memcpy(req.ip_dst, l3_key->ipv6.daddr.s6_addr32, 485 sizeof(req.ip_dst)); 486 req.ip_dst_mask_len = 487 ipv6_mask_len(&l3_mask->ipv6.daddr); 488 memcpy(req.ip_src, l3_key->ipv6.saddr.s6_addr32, 489 sizeof(req.ip_src)); 490 req.ip_src_mask_len = 491 ipv6_mask_len(&l3_mask->ipv6.saddr); 492 } 493 } 494 495 if (flow->flags & BNXT_TC_FLOW_FLAGS_PORTS) { 496 req.l4_src_port = flow->l4_key.ports.sport; 497 req.l4_src_port_mask = flow->l4_mask.ports.sport; 498 req.l4_dst_port = flow->l4_key.ports.dport; 499 req.l4_dst_port_mask = flow->l4_mask.ports.dport; 500 } else if (flow->flags & BNXT_TC_FLOW_FLAGS_ICMP) { 501 /* l4 ports serve as type/code when ip_proto is ICMP */ 502 req.l4_src_port = htons(flow->l4_key.icmp.type); 503 req.l4_src_port_mask = htons(flow->l4_mask.icmp.type); 504 req.l4_dst_port = htons(flow->l4_key.icmp.code); 505 req.l4_dst_port_mask = htons(flow->l4_mask.icmp.code); 506 } 507 req.flags = cpu_to_le16(flow_flags); 508 509 if (actions->flags & BNXT_TC_ACTION_FLAG_DROP) { 510 action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP; 511 } else { 512 if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) { 513 action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD; 514 req.dst_fid = cpu_to_le16(actions->dst_fid); 515 } 516 if (actions->flags & BNXT_TC_ACTION_FLAG_PUSH_VLAN) { 517 action_flags |= 518 CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE; 519 req.l2_rewrite_vlan_tpid = actions->push_vlan_tpid; 520 req.l2_rewrite_vlan_tci = actions->push_vlan_tci; 521 memcpy(&req.l2_rewrite_dmac, &req.dmac, ETH_ALEN); 522 memcpy(&req.l2_rewrite_smac, &req.smac, ETH_ALEN); 523 } 524 if (actions->flags & BNXT_TC_ACTION_FLAG_POP_VLAN) { 525 action_flags |= 526 CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE; 527 /* Rewrite config with tpid = 0 implies vlan pop */ 528 req.l2_rewrite_vlan_tpid = 0; 529 memcpy(&req.l2_rewrite_dmac, &req.dmac, ETH_ALEN); 530 memcpy(&req.l2_rewrite_smac, &req.smac, ETH_ALEN); 531 } 532 } 533 req.action_flags = cpu_to_le16(action_flags); 534 535 mutex_lock(&bp->hwrm_cmd_lock); 536 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 537 if (!rc) 538 *flow_handle = resp->flow_handle; 539 mutex_unlock(&bp->hwrm_cmd_lock); 540 541 if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) 542 rc = -ENOSPC; 543 else if (rc) 544 rc = -EIO; 545 return rc; 546 } 547 548 static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp, 549 struct bnxt_tc_flow *flow, 550 struct bnxt_tc_l2_key *l2_info, 551 __le32 ref_decap_handle, 552 __le32 *decap_filter_handle) 553 { 554 struct hwrm_cfa_decap_filter_alloc_output *resp = 555 bp->hwrm_cmd_resp_addr; 556 struct hwrm_cfa_decap_filter_alloc_input req = { 0 }; 557 struct ip_tunnel_key *tun_key = &flow->tun_key; 558 u32 enables = 0; 559 int rc; 560 561 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_DECAP_FILTER_ALLOC, -1, -1); 562 563 req.flags = cpu_to_le32(CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL); 564 enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE | 565 CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL; 566 req.tunnel_type = CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN; 567 req.ip_protocol = CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP; 568 569 if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ID) { 570 enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_ID; 571 /* tunnel_id is wrongly defined in hsi defn. as __le32 */ 572 req.tunnel_id = tunnel_id_to_key32(tun_key->tun_id); 573 } 574 575 if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS) { 576 enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR; 577 ether_addr_copy(req.dst_macaddr, l2_info->dmac); 578 } 579 if (l2_info->num_vlans) { 580 enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID; 581 req.t_ivlan_vid = l2_info->inner_vlan_tci; 582 } 583 584 enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE; 585 req.ethertype = htons(ETH_P_IP); 586 587 if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS) { 588 enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | 589 CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | 590 CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE; 591 req.ip_addr_type = CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4; 592 req.dst_ipaddr[0] = tun_key->u.ipv4.dst; 593 req.src_ipaddr[0] = tun_key->u.ipv4.src; 594 } 595 596 if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_PORTS) { 597 enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_PORT; 598 req.dst_port = tun_key->tp_dst; 599 } 600 601 /* Eventhough the decap_handle returned by hwrm_cfa_decap_filter_alloc 602 * is defined as __le32, l2_ctxt_ref_id is defined in HSI as __le16. 603 */ 604 req.l2_ctxt_ref_id = (__force __le16)ref_decap_handle; 605 req.enables = cpu_to_le32(enables); 606 607 mutex_lock(&bp->hwrm_cmd_lock); 608 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 609 if (!rc) 610 *decap_filter_handle = resp->decap_filter_id; 611 else 612 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); 613 mutex_unlock(&bp->hwrm_cmd_lock); 614 615 if (rc) 616 rc = -EIO; 617 return rc; 618 } 619 620 static int hwrm_cfa_decap_filter_free(struct bnxt *bp, 621 __le32 decap_filter_handle) 622 { 623 struct hwrm_cfa_decap_filter_free_input req = { 0 }; 624 int rc; 625 626 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_DECAP_FILTER_FREE, -1, -1); 627 req.decap_filter_id = decap_filter_handle; 628 629 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 630 if (rc) 631 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); 632 633 if (rc) 634 rc = -EIO; 635 return rc; 636 } 637 638 static int hwrm_cfa_encap_record_alloc(struct bnxt *bp, 639 struct ip_tunnel_key *encap_key, 640 struct bnxt_tc_l2_key *l2_info, 641 __le32 *encap_record_handle) 642 { 643 struct hwrm_cfa_encap_record_alloc_output *resp = 644 bp->hwrm_cmd_resp_addr; 645 struct hwrm_cfa_encap_record_alloc_input req = { 0 }; 646 struct hwrm_cfa_encap_data_vxlan *encap = 647 (struct hwrm_cfa_encap_data_vxlan *)&req.encap_data; 648 struct hwrm_vxlan_ipv4_hdr *encap_ipv4 = 649 (struct hwrm_vxlan_ipv4_hdr *)encap->l3; 650 int rc; 651 652 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ENCAP_RECORD_ALLOC, -1, -1); 653 654 req.encap_type = CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN; 655 656 ether_addr_copy(encap->dst_mac_addr, l2_info->dmac); 657 ether_addr_copy(encap->src_mac_addr, l2_info->smac); 658 if (l2_info->num_vlans) { 659 encap->num_vlan_tags = l2_info->num_vlans; 660 encap->ovlan_tci = l2_info->inner_vlan_tci; 661 encap->ovlan_tpid = l2_info->inner_vlan_tpid; 662 } 663 664 encap_ipv4->ver_hlen = 4 << VXLAN_IPV4_HDR_VER_HLEN_VERSION_SFT; 665 encap_ipv4->ver_hlen |= 5 << VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_SFT; 666 encap_ipv4->ttl = encap_key->ttl; 667 668 encap_ipv4->dest_ip_addr = encap_key->u.ipv4.dst; 669 encap_ipv4->src_ip_addr = encap_key->u.ipv4.src; 670 encap_ipv4->protocol = IPPROTO_UDP; 671 672 encap->dst_port = encap_key->tp_dst; 673 encap->vni = tunnel_id_to_key32(encap_key->tun_id); 674 675 mutex_lock(&bp->hwrm_cmd_lock); 676 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 677 if (!rc) 678 *encap_record_handle = resp->encap_record_id; 679 else 680 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); 681 mutex_unlock(&bp->hwrm_cmd_lock); 682 683 if (rc) 684 rc = -EIO; 685 return rc; 686 } 687 688 static int hwrm_cfa_encap_record_free(struct bnxt *bp, 689 __le32 encap_record_handle) 690 { 691 struct hwrm_cfa_encap_record_free_input req = { 0 }; 692 int rc; 693 694 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ENCAP_RECORD_FREE, -1, -1); 695 req.encap_record_id = encap_record_handle; 696 697 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 698 if (rc) 699 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); 700 701 if (rc) 702 rc = -EIO; 703 return rc; 704 } 705 706 static int bnxt_tc_put_l2_node(struct bnxt *bp, 707 struct bnxt_tc_flow_node *flow_node) 708 { 709 struct bnxt_tc_l2_node *l2_node = flow_node->l2_node; 710 struct bnxt_tc_info *tc_info = bp->tc_info; 711 int rc; 712 713 /* remove flow_node from the L2 shared flow list */ 714 list_del(&flow_node->l2_list_node); 715 if (--l2_node->refcount == 0) { 716 rc = rhashtable_remove_fast(&tc_info->l2_table, &l2_node->node, 717 tc_info->l2_ht_params); 718 if (rc) 719 netdev_err(bp->dev, 720 "Error: %s: rhashtable_remove_fast: %d", 721 __func__, rc); 722 kfree_rcu(l2_node, rcu); 723 } 724 return 0; 725 } 726 727 static struct bnxt_tc_l2_node * 728 bnxt_tc_get_l2_node(struct bnxt *bp, struct rhashtable *l2_table, 729 struct rhashtable_params ht_params, 730 struct bnxt_tc_l2_key *l2_key) 731 { 732 struct bnxt_tc_l2_node *l2_node; 733 int rc; 734 735 l2_node = rhashtable_lookup_fast(l2_table, l2_key, ht_params); 736 if (!l2_node) { 737 l2_node = kzalloc(sizeof(*l2_node), GFP_KERNEL); 738 if (!l2_node) { 739 rc = -ENOMEM; 740 return NULL; 741 } 742 743 l2_node->key = *l2_key; 744 rc = rhashtable_insert_fast(l2_table, &l2_node->node, 745 ht_params); 746 if (rc) { 747 kfree_rcu(l2_node, rcu); 748 netdev_err(bp->dev, 749 "Error: %s: rhashtable_insert_fast: %d", 750 __func__, rc); 751 return NULL; 752 } 753 INIT_LIST_HEAD(&l2_node->common_l2_flows); 754 } 755 return l2_node; 756 } 757 758 /* Get the ref_flow_handle for a flow by checking if there are any other 759 * flows that share the same L2 key as this flow. 760 */ 761 static int 762 bnxt_tc_get_ref_flow_handle(struct bnxt *bp, struct bnxt_tc_flow *flow, 763 struct bnxt_tc_flow_node *flow_node, 764 __le16 *ref_flow_handle) 765 { 766 struct bnxt_tc_info *tc_info = bp->tc_info; 767 struct bnxt_tc_flow_node *ref_flow_node; 768 struct bnxt_tc_l2_node *l2_node; 769 770 l2_node = bnxt_tc_get_l2_node(bp, &tc_info->l2_table, 771 tc_info->l2_ht_params, 772 &flow->l2_key); 773 if (!l2_node) 774 return -1; 775 776 /* If any other flow is using this l2_node, use it's flow_handle 777 * as the ref_flow_handle 778 */ 779 if (l2_node->refcount > 0) { 780 ref_flow_node = list_first_entry(&l2_node->common_l2_flows, 781 struct bnxt_tc_flow_node, 782 l2_list_node); 783 *ref_flow_handle = ref_flow_node->flow_handle; 784 } else { 785 *ref_flow_handle = cpu_to_le16(0xffff); 786 } 787 788 /* Insert the l2_node into the flow_node so that subsequent flows 789 * with a matching l2 key can use the flow_handle of this flow 790 * as their ref_flow_handle 791 */ 792 flow_node->l2_node = l2_node; 793 list_add(&flow_node->l2_list_node, &l2_node->common_l2_flows); 794 l2_node->refcount++; 795 return 0; 796 } 797 798 /* After the flow parsing is done, this routine is used for checking 799 * if there are any aspects of the flow that prevent it from being 800 * offloaded. 801 */ 802 static bool bnxt_tc_can_offload(struct bnxt *bp, struct bnxt_tc_flow *flow) 803 { 804 /* If L4 ports are specified then ip_proto must be TCP or UDP */ 805 if ((flow->flags & BNXT_TC_FLOW_FLAGS_PORTS) && 806 (flow->l4_key.ip_proto != IPPROTO_TCP && 807 flow->l4_key.ip_proto != IPPROTO_UDP)) { 808 netdev_info(bp->dev, "Cannot offload non-TCP/UDP (%d) ports", 809 flow->l4_key.ip_proto); 810 return false; 811 } 812 813 /* Currently source/dest MAC cannot be partial wildcard */ 814 if (bits_set(&flow->l2_key.smac, sizeof(flow->l2_key.smac)) && 815 !is_exactmatch(flow->l2_mask.smac, sizeof(flow->l2_mask.smac))) { 816 netdev_info(bp->dev, "Wildcard match unsupported for Source MAC\n"); 817 return false; 818 } 819 if (bits_set(&flow->l2_key.dmac, sizeof(flow->l2_key.dmac)) && 820 !is_exactmatch(&flow->l2_mask.dmac, sizeof(flow->l2_mask.dmac))) { 821 netdev_info(bp->dev, "Wildcard match unsupported for Dest MAC\n"); 822 return false; 823 } 824 825 /* Currently VLAN fields cannot be partial wildcard */ 826 if (bits_set(&flow->l2_key.inner_vlan_tci, 827 sizeof(flow->l2_key.inner_vlan_tci)) && 828 !is_vlan_tci_allowed(flow->l2_mask.inner_vlan_tci, 829 flow->l2_key.inner_vlan_tci)) { 830 netdev_info(bp->dev, "Unsupported VLAN TCI\n"); 831 return false; 832 } 833 if (bits_set(&flow->l2_key.inner_vlan_tpid, 834 sizeof(flow->l2_key.inner_vlan_tpid)) && 835 !is_exactmatch(&flow->l2_mask.inner_vlan_tpid, 836 sizeof(flow->l2_mask.inner_vlan_tpid))) { 837 netdev_info(bp->dev, "Wildcard match unsupported for VLAN TPID\n"); 838 return false; 839 } 840 841 /* Currently Ethertype must be set */ 842 if (!is_exactmatch(&flow->l2_mask.ether_type, 843 sizeof(flow->l2_mask.ether_type))) { 844 netdev_info(bp->dev, "Wildcard match unsupported for Ethertype\n"); 845 return false; 846 } 847 848 return true; 849 } 850 851 /* Returns the final refcount of the node on success 852 * or a -ve error code on failure 853 */ 854 static int bnxt_tc_put_tunnel_node(struct bnxt *bp, 855 struct rhashtable *tunnel_table, 856 struct rhashtable_params *ht_params, 857 struct bnxt_tc_tunnel_node *tunnel_node) 858 { 859 int rc; 860 861 if (--tunnel_node->refcount == 0) { 862 rc = rhashtable_remove_fast(tunnel_table, &tunnel_node->node, 863 *ht_params); 864 if (rc) { 865 netdev_err(bp->dev, "rhashtable_remove_fast rc=%d", rc); 866 rc = -1; 867 } 868 kfree_rcu(tunnel_node, rcu); 869 return rc; 870 } else { 871 return tunnel_node->refcount; 872 } 873 } 874 875 /* Get (or add) either encap or decap tunnel node from/to the supplied 876 * hash table. 877 */ 878 static struct bnxt_tc_tunnel_node * 879 bnxt_tc_get_tunnel_node(struct bnxt *bp, struct rhashtable *tunnel_table, 880 struct rhashtable_params *ht_params, 881 struct ip_tunnel_key *tun_key) 882 { 883 struct bnxt_tc_tunnel_node *tunnel_node; 884 int rc; 885 886 tunnel_node = rhashtable_lookup_fast(tunnel_table, tun_key, *ht_params); 887 if (!tunnel_node) { 888 tunnel_node = kzalloc(sizeof(*tunnel_node), GFP_KERNEL); 889 if (!tunnel_node) { 890 rc = -ENOMEM; 891 goto err; 892 } 893 894 tunnel_node->key = *tun_key; 895 tunnel_node->tunnel_handle = INVALID_TUNNEL_HANDLE; 896 rc = rhashtable_insert_fast(tunnel_table, &tunnel_node->node, 897 *ht_params); 898 if (rc) { 899 kfree_rcu(tunnel_node, rcu); 900 goto err; 901 } 902 } 903 tunnel_node->refcount++; 904 return tunnel_node; 905 err: 906 netdev_info(bp->dev, "error rc=%d", rc); 907 return NULL; 908 } 909 910 static int bnxt_tc_get_ref_decap_handle(struct bnxt *bp, 911 struct bnxt_tc_flow *flow, 912 struct bnxt_tc_l2_key *l2_key, 913 struct bnxt_tc_flow_node *flow_node, 914 __le32 *ref_decap_handle) 915 { 916 struct bnxt_tc_info *tc_info = bp->tc_info; 917 struct bnxt_tc_flow_node *ref_flow_node; 918 struct bnxt_tc_l2_node *decap_l2_node; 919 920 decap_l2_node = bnxt_tc_get_l2_node(bp, &tc_info->decap_l2_table, 921 tc_info->decap_l2_ht_params, 922 l2_key); 923 if (!decap_l2_node) 924 return -1; 925 926 /* If any other flow is using this decap_l2_node, use it's decap_handle 927 * as the ref_decap_handle 928 */ 929 if (decap_l2_node->refcount > 0) { 930 ref_flow_node = 931 list_first_entry(&decap_l2_node->common_l2_flows, 932 struct bnxt_tc_flow_node, 933 decap_l2_list_node); 934 *ref_decap_handle = ref_flow_node->decap_node->tunnel_handle; 935 } else { 936 *ref_decap_handle = INVALID_TUNNEL_HANDLE; 937 } 938 939 /* Insert the l2_node into the flow_node so that subsequent flows 940 * with a matching decap l2 key can use the decap_filter_handle of 941 * this flow as their ref_decap_handle 942 */ 943 flow_node->decap_l2_node = decap_l2_node; 944 list_add(&flow_node->decap_l2_list_node, 945 &decap_l2_node->common_l2_flows); 946 decap_l2_node->refcount++; 947 return 0; 948 } 949 950 static void bnxt_tc_put_decap_l2_node(struct bnxt *bp, 951 struct bnxt_tc_flow_node *flow_node) 952 { 953 struct bnxt_tc_l2_node *decap_l2_node = flow_node->decap_l2_node; 954 struct bnxt_tc_info *tc_info = bp->tc_info; 955 int rc; 956 957 /* remove flow_node from the decap L2 sharing flow list */ 958 list_del(&flow_node->decap_l2_list_node); 959 if (--decap_l2_node->refcount == 0) { 960 rc = rhashtable_remove_fast(&tc_info->decap_l2_table, 961 &decap_l2_node->node, 962 tc_info->decap_l2_ht_params); 963 if (rc) 964 netdev_err(bp->dev, "rhashtable_remove_fast rc=%d", rc); 965 kfree_rcu(decap_l2_node, rcu); 966 } 967 } 968 969 static void bnxt_tc_put_decap_handle(struct bnxt *bp, 970 struct bnxt_tc_flow_node *flow_node) 971 { 972 __le32 decap_handle = flow_node->decap_node->tunnel_handle; 973 struct bnxt_tc_info *tc_info = bp->tc_info; 974 int rc; 975 976 if (flow_node->decap_l2_node) 977 bnxt_tc_put_decap_l2_node(bp, flow_node); 978 979 rc = bnxt_tc_put_tunnel_node(bp, &tc_info->decap_table, 980 &tc_info->decap_ht_params, 981 flow_node->decap_node); 982 if (!rc && decap_handle != INVALID_TUNNEL_HANDLE) 983 hwrm_cfa_decap_filter_free(bp, decap_handle); 984 } 985 986 static int bnxt_tc_resolve_tunnel_hdrs(struct bnxt *bp, 987 struct ip_tunnel_key *tun_key, 988 struct bnxt_tc_l2_key *l2_info) 989 { 990 #ifdef CONFIG_INET 991 struct net_device *real_dst_dev = bp->dev; 992 struct flowi4 flow = { {0} }; 993 struct net_device *dst_dev; 994 struct neighbour *nbr; 995 struct rtable *rt; 996 int rc; 997 998 flow.flowi4_proto = IPPROTO_UDP; 999 flow.fl4_dport = tun_key->tp_dst; 1000 flow.daddr = tun_key->u.ipv4.dst; 1001 1002 rt = ip_route_output_key(dev_net(real_dst_dev), &flow); 1003 if (IS_ERR(rt)) { 1004 netdev_info(bp->dev, "no route to %pI4b", &flow.daddr); 1005 return -EOPNOTSUPP; 1006 } 1007 1008 /* The route must either point to the real_dst_dev or a dst_dev that 1009 * uses the real_dst_dev. 1010 */ 1011 dst_dev = rt->dst.dev; 1012 if (is_vlan_dev(dst_dev)) { 1013 #if IS_ENABLED(CONFIG_VLAN_8021Q) 1014 struct vlan_dev_priv *vlan = vlan_dev_priv(dst_dev); 1015 1016 if (vlan->real_dev != real_dst_dev) { 1017 netdev_info(bp->dev, 1018 "dst_dev(%s) doesn't use PF-if(%s)", 1019 netdev_name(dst_dev), 1020 netdev_name(real_dst_dev)); 1021 rc = -EOPNOTSUPP; 1022 goto put_rt; 1023 } 1024 l2_info->inner_vlan_tci = htons(vlan->vlan_id); 1025 l2_info->inner_vlan_tpid = vlan->vlan_proto; 1026 l2_info->num_vlans = 1; 1027 #endif 1028 } else if (dst_dev != real_dst_dev) { 1029 netdev_info(bp->dev, 1030 "dst_dev(%s) for %pI4b is not PF-if(%s)", 1031 netdev_name(dst_dev), &flow.daddr, 1032 netdev_name(real_dst_dev)); 1033 rc = -EOPNOTSUPP; 1034 goto put_rt; 1035 } 1036 1037 nbr = dst_neigh_lookup(&rt->dst, &flow.daddr); 1038 if (!nbr) { 1039 netdev_info(bp->dev, "can't lookup neighbor for %pI4b", 1040 &flow.daddr); 1041 rc = -EOPNOTSUPP; 1042 goto put_rt; 1043 } 1044 1045 tun_key->u.ipv4.src = flow.saddr; 1046 tun_key->ttl = ip4_dst_hoplimit(&rt->dst); 1047 neigh_ha_snapshot(l2_info->dmac, nbr, dst_dev); 1048 ether_addr_copy(l2_info->smac, dst_dev->dev_addr); 1049 neigh_release(nbr); 1050 ip_rt_put(rt); 1051 1052 return 0; 1053 put_rt: 1054 ip_rt_put(rt); 1055 return rc; 1056 #else 1057 return -EOPNOTSUPP; 1058 #endif 1059 } 1060 1061 static int bnxt_tc_get_decap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow, 1062 struct bnxt_tc_flow_node *flow_node, 1063 __le32 *decap_filter_handle) 1064 { 1065 struct ip_tunnel_key *decap_key = &flow->tun_key; 1066 struct bnxt_tc_info *tc_info = bp->tc_info; 1067 struct bnxt_tc_l2_key l2_info = { {0} }; 1068 struct bnxt_tc_tunnel_node *decap_node; 1069 struct ip_tunnel_key tun_key = { 0 }; 1070 struct bnxt_tc_l2_key *decap_l2_info; 1071 __le32 ref_decap_handle; 1072 int rc; 1073 1074 /* Check if there's another flow using the same tunnel decap. 1075 * If not, add this tunnel to the table and resolve the other 1076 * tunnel header fileds. Ignore src_port in the tunnel_key, 1077 * since it is not required for decap filters. 1078 */ 1079 decap_key->tp_src = 0; 1080 decap_node = bnxt_tc_get_tunnel_node(bp, &tc_info->decap_table, 1081 &tc_info->decap_ht_params, 1082 decap_key); 1083 if (!decap_node) 1084 return -ENOMEM; 1085 1086 flow_node->decap_node = decap_node; 1087 1088 if (decap_node->tunnel_handle != INVALID_TUNNEL_HANDLE) 1089 goto done; 1090 1091 /* Resolve the L2 fields for tunnel decap 1092 * Resolve the route for remote vtep (saddr) of the decap key 1093 * Find it's next-hop mac addrs 1094 */ 1095 tun_key.u.ipv4.dst = flow->tun_key.u.ipv4.src; 1096 tun_key.tp_dst = flow->tun_key.tp_dst; 1097 rc = bnxt_tc_resolve_tunnel_hdrs(bp, &tun_key, &l2_info); 1098 if (rc) 1099 goto put_decap; 1100 1101 decap_l2_info = &decap_node->l2_info; 1102 /* decap smac is wildcarded */ 1103 ether_addr_copy(decap_l2_info->dmac, l2_info.smac); 1104 if (l2_info.num_vlans) { 1105 decap_l2_info->num_vlans = l2_info.num_vlans; 1106 decap_l2_info->inner_vlan_tpid = l2_info.inner_vlan_tpid; 1107 decap_l2_info->inner_vlan_tci = l2_info.inner_vlan_tci; 1108 } 1109 flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS; 1110 1111 /* For getting a decap_filter_handle we first need to check if 1112 * there are any other decap flows that share the same tunnel L2 1113 * key and if so, pass that flow's decap_filter_handle as the 1114 * ref_decap_handle for this flow. 1115 */ 1116 rc = bnxt_tc_get_ref_decap_handle(bp, flow, decap_l2_info, flow_node, 1117 &ref_decap_handle); 1118 if (rc) 1119 goto put_decap; 1120 1121 /* Issue the hwrm cmd to allocate a decap filter handle */ 1122 rc = hwrm_cfa_decap_filter_alloc(bp, flow, decap_l2_info, 1123 ref_decap_handle, 1124 &decap_node->tunnel_handle); 1125 if (rc) 1126 goto put_decap_l2; 1127 1128 done: 1129 *decap_filter_handle = decap_node->tunnel_handle; 1130 return 0; 1131 1132 put_decap_l2: 1133 bnxt_tc_put_decap_l2_node(bp, flow_node); 1134 put_decap: 1135 bnxt_tc_put_tunnel_node(bp, &tc_info->decap_table, 1136 &tc_info->decap_ht_params, 1137 flow_node->decap_node); 1138 return rc; 1139 } 1140 1141 static void bnxt_tc_put_encap_handle(struct bnxt *bp, 1142 struct bnxt_tc_tunnel_node *encap_node) 1143 { 1144 __le32 encap_handle = encap_node->tunnel_handle; 1145 struct bnxt_tc_info *tc_info = bp->tc_info; 1146 int rc; 1147 1148 rc = bnxt_tc_put_tunnel_node(bp, &tc_info->encap_table, 1149 &tc_info->encap_ht_params, encap_node); 1150 if (!rc && encap_handle != INVALID_TUNNEL_HANDLE) 1151 hwrm_cfa_encap_record_free(bp, encap_handle); 1152 } 1153 1154 /* Lookup the tunnel encap table and check if there's an encap_handle 1155 * alloc'd already. 1156 * If not, query L2 info via a route lookup and issue an encap_record_alloc 1157 * cmd to FW. 1158 */ 1159 static int bnxt_tc_get_encap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow, 1160 struct bnxt_tc_flow_node *flow_node, 1161 __le32 *encap_handle) 1162 { 1163 struct ip_tunnel_key *encap_key = &flow->actions.tun_encap_key; 1164 struct bnxt_tc_info *tc_info = bp->tc_info; 1165 struct bnxt_tc_tunnel_node *encap_node; 1166 int rc; 1167 1168 /* Check if there's another flow using the same tunnel encap. 1169 * If not, add this tunnel to the table and resolve the other 1170 * tunnel header fileds 1171 */ 1172 encap_node = bnxt_tc_get_tunnel_node(bp, &tc_info->encap_table, 1173 &tc_info->encap_ht_params, 1174 encap_key); 1175 if (!encap_node) 1176 return -ENOMEM; 1177 1178 flow_node->encap_node = encap_node; 1179 1180 if (encap_node->tunnel_handle != INVALID_TUNNEL_HANDLE) 1181 goto done; 1182 1183 rc = bnxt_tc_resolve_tunnel_hdrs(bp, encap_key, &encap_node->l2_info); 1184 if (rc) 1185 goto put_encap; 1186 1187 /* Allocate a new tunnel encap record */ 1188 rc = hwrm_cfa_encap_record_alloc(bp, encap_key, &encap_node->l2_info, 1189 &encap_node->tunnel_handle); 1190 if (rc) 1191 goto put_encap; 1192 1193 done: 1194 *encap_handle = encap_node->tunnel_handle; 1195 return 0; 1196 1197 put_encap: 1198 bnxt_tc_put_tunnel_node(bp, &tc_info->encap_table, 1199 &tc_info->encap_ht_params, encap_node); 1200 return rc; 1201 } 1202 1203 static void bnxt_tc_put_tunnel_handle(struct bnxt *bp, 1204 struct bnxt_tc_flow *flow, 1205 struct bnxt_tc_flow_node *flow_node) 1206 { 1207 if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP) 1208 bnxt_tc_put_decap_handle(bp, flow_node); 1209 else if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) 1210 bnxt_tc_put_encap_handle(bp, flow_node->encap_node); 1211 } 1212 1213 static int bnxt_tc_get_tunnel_handle(struct bnxt *bp, 1214 struct bnxt_tc_flow *flow, 1215 struct bnxt_tc_flow_node *flow_node, 1216 __le32 *tunnel_handle) 1217 { 1218 if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP) 1219 return bnxt_tc_get_decap_handle(bp, flow, flow_node, 1220 tunnel_handle); 1221 else if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) 1222 return bnxt_tc_get_encap_handle(bp, flow, flow_node, 1223 tunnel_handle); 1224 else 1225 return 0; 1226 } 1227 static int __bnxt_tc_del_flow(struct bnxt *bp, 1228 struct bnxt_tc_flow_node *flow_node) 1229 { 1230 struct bnxt_tc_info *tc_info = bp->tc_info; 1231 int rc; 1232 1233 /* send HWRM cmd to free the flow-id */ 1234 bnxt_hwrm_cfa_flow_free(bp, flow_node->flow_handle); 1235 1236 mutex_lock(&tc_info->lock); 1237 1238 /* release references to any tunnel encap/decap nodes */ 1239 bnxt_tc_put_tunnel_handle(bp, &flow_node->flow, flow_node); 1240 1241 /* release reference to l2 node */ 1242 bnxt_tc_put_l2_node(bp, flow_node); 1243 1244 mutex_unlock(&tc_info->lock); 1245 1246 rc = rhashtable_remove_fast(&tc_info->flow_table, &flow_node->node, 1247 tc_info->flow_ht_params); 1248 if (rc) 1249 netdev_err(bp->dev, "Error: %s: rhashtable_remove_fast rc=%d", 1250 __func__, rc); 1251 1252 kfree_rcu(flow_node, rcu); 1253 return 0; 1254 } 1255 1256 static void bnxt_tc_set_src_fid(struct bnxt *bp, struct bnxt_tc_flow *flow, 1257 u16 src_fid) 1258 { 1259 if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP) 1260 flow->src_fid = bp->pf.fw_fid; 1261 else 1262 flow->src_fid = src_fid; 1263 } 1264 1265 /* Add a new flow or replace an existing flow. 1266 * Notes on locking: 1267 * There are essentially two critical sections here. 1268 * 1. while adding a new flow 1269 * a) lookup l2-key 1270 * b) issue HWRM cmd and get flow_handle 1271 * c) link l2-key with flow 1272 * 2. while deleting a flow 1273 * a) unlinking l2-key from flow 1274 * A lock is needed to protect these two critical sections. 1275 * 1276 * The hash-tables are already protected by the rhashtable API. 1277 */ 1278 static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid, 1279 struct tc_cls_flower_offload *tc_flow_cmd) 1280 { 1281 struct bnxt_tc_flow_node *new_node, *old_node; 1282 struct bnxt_tc_info *tc_info = bp->tc_info; 1283 struct bnxt_tc_flow *flow; 1284 __le32 tunnel_handle = 0; 1285 __le16 ref_flow_handle; 1286 int rc; 1287 1288 /* allocate memory for the new flow and it's node */ 1289 new_node = kzalloc(sizeof(*new_node), GFP_KERNEL); 1290 if (!new_node) { 1291 rc = -ENOMEM; 1292 goto done; 1293 } 1294 new_node->cookie = tc_flow_cmd->cookie; 1295 flow = &new_node->flow; 1296 1297 rc = bnxt_tc_parse_flow(bp, tc_flow_cmd, flow); 1298 if (rc) 1299 goto free_node; 1300 1301 bnxt_tc_set_src_fid(bp, flow, src_fid); 1302 1303 if (!bnxt_tc_can_offload(bp, flow)) { 1304 rc = -ENOSPC; 1305 goto free_node; 1306 } 1307 1308 /* If a flow exists with the same cookie, delete it */ 1309 old_node = rhashtable_lookup_fast(&tc_info->flow_table, 1310 &tc_flow_cmd->cookie, 1311 tc_info->flow_ht_params); 1312 if (old_node) 1313 __bnxt_tc_del_flow(bp, old_node); 1314 1315 /* Check if the L2 part of the flow has been offloaded already. 1316 * If so, bump up it's refcnt and get it's reference handle. 1317 */ 1318 mutex_lock(&tc_info->lock); 1319 rc = bnxt_tc_get_ref_flow_handle(bp, flow, new_node, &ref_flow_handle); 1320 if (rc) 1321 goto unlock; 1322 1323 /* If the flow involves tunnel encap/decap, get tunnel_handle */ 1324 rc = bnxt_tc_get_tunnel_handle(bp, flow, new_node, &tunnel_handle); 1325 if (rc) 1326 goto put_l2; 1327 1328 /* send HWRM cmd to alloc the flow */ 1329 rc = bnxt_hwrm_cfa_flow_alloc(bp, flow, ref_flow_handle, 1330 tunnel_handle, &new_node->flow_handle); 1331 if (rc) 1332 goto put_tunnel; 1333 1334 flow->lastused = jiffies; 1335 spin_lock_init(&flow->stats_lock); 1336 /* add new flow to flow-table */ 1337 rc = rhashtable_insert_fast(&tc_info->flow_table, &new_node->node, 1338 tc_info->flow_ht_params); 1339 if (rc) 1340 goto hwrm_flow_free; 1341 1342 mutex_unlock(&tc_info->lock); 1343 return 0; 1344 1345 hwrm_flow_free: 1346 bnxt_hwrm_cfa_flow_free(bp, new_node->flow_handle); 1347 put_tunnel: 1348 bnxt_tc_put_tunnel_handle(bp, flow, new_node); 1349 put_l2: 1350 bnxt_tc_put_l2_node(bp, new_node); 1351 unlock: 1352 mutex_unlock(&tc_info->lock); 1353 free_node: 1354 kfree_rcu(new_node, rcu); 1355 done: 1356 netdev_err(bp->dev, "Error: %s: cookie=0x%lx error=%d", 1357 __func__, tc_flow_cmd->cookie, rc); 1358 return rc; 1359 } 1360 1361 static int bnxt_tc_del_flow(struct bnxt *bp, 1362 struct tc_cls_flower_offload *tc_flow_cmd) 1363 { 1364 struct bnxt_tc_info *tc_info = bp->tc_info; 1365 struct bnxt_tc_flow_node *flow_node; 1366 1367 flow_node = rhashtable_lookup_fast(&tc_info->flow_table, 1368 &tc_flow_cmd->cookie, 1369 tc_info->flow_ht_params); 1370 if (!flow_node) 1371 return -EINVAL; 1372 1373 return __bnxt_tc_del_flow(bp, flow_node); 1374 } 1375 1376 static int bnxt_tc_get_flow_stats(struct bnxt *bp, 1377 struct tc_cls_flower_offload *tc_flow_cmd) 1378 { 1379 struct bnxt_tc_flow_stats stats, *curr_stats, *prev_stats; 1380 struct bnxt_tc_info *tc_info = bp->tc_info; 1381 struct bnxt_tc_flow_node *flow_node; 1382 struct bnxt_tc_flow *flow; 1383 unsigned long lastused; 1384 1385 flow_node = rhashtable_lookup_fast(&tc_info->flow_table, 1386 &tc_flow_cmd->cookie, 1387 tc_info->flow_ht_params); 1388 if (!flow_node) 1389 return -1; 1390 1391 flow = &flow_node->flow; 1392 curr_stats = &flow->stats; 1393 prev_stats = &flow->prev_stats; 1394 1395 spin_lock(&flow->stats_lock); 1396 stats.packets = curr_stats->packets - prev_stats->packets; 1397 stats.bytes = curr_stats->bytes - prev_stats->bytes; 1398 *prev_stats = *curr_stats; 1399 lastused = flow->lastused; 1400 spin_unlock(&flow->stats_lock); 1401 1402 tcf_exts_stats_update(tc_flow_cmd->exts, stats.bytes, stats.packets, 1403 lastused); 1404 return 0; 1405 } 1406 1407 static int 1408 bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows, 1409 struct bnxt_tc_stats_batch stats_batch[]) 1410 { 1411 struct hwrm_cfa_flow_stats_output *resp = bp->hwrm_cmd_resp_addr; 1412 struct hwrm_cfa_flow_stats_input req = { 0 }; 1413 __le16 *req_flow_handles = &req.flow_handle_0; 1414 int rc, i; 1415 1416 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_STATS, -1, -1); 1417 req.num_flows = cpu_to_le16(num_flows); 1418 for (i = 0; i < num_flows; i++) { 1419 struct bnxt_tc_flow_node *flow_node = stats_batch[i].flow_node; 1420 1421 req_flow_handles[i] = flow_node->flow_handle; 1422 } 1423 1424 mutex_lock(&bp->hwrm_cmd_lock); 1425 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 1426 if (!rc) { 1427 __le64 *resp_packets = &resp->packet_0; 1428 __le64 *resp_bytes = &resp->byte_0; 1429 1430 for (i = 0; i < num_flows; i++) { 1431 stats_batch[i].hw_stats.packets = 1432 le64_to_cpu(resp_packets[i]); 1433 stats_batch[i].hw_stats.bytes = 1434 le64_to_cpu(resp_bytes[i]); 1435 } 1436 } else { 1437 netdev_info(bp->dev, "error rc=%d", rc); 1438 } 1439 mutex_unlock(&bp->hwrm_cmd_lock); 1440 1441 if (rc) 1442 rc = -EIO; 1443 return rc; 1444 } 1445 1446 /* Add val to accum while handling a possible wraparound 1447 * of val. Eventhough val is of type u64, its actual width 1448 * is denoted by mask and will wrap-around beyond that width. 1449 */ 1450 static void accumulate_val(u64 *accum, u64 val, u64 mask) 1451 { 1452 #define low_bits(x, mask) ((x) & (mask)) 1453 #define high_bits(x, mask) ((x) & ~(mask)) 1454 bool wrapped = val < low_bits(*accum, mask); 1455 1456 *accum = high_bits(*accum, mask) + val; 1457 if (wrapped) 1458 *accum += (mask + 1); 1459 } 1460 1461 /* The HW counters' width is much less than 64bits. 1462 * Handle possible wrap-around while updating the stat counters 1463 */ 1464 static void bnxt_flow_stats_accum(struct bnxt_tc_info *tc_info, 1465 struct bnxt_tc_flow_stats *acc_stats, 1466 struct bnxt_tc_flow_stats *hw_stats) 1467 { 1468 accumulate_val(&acc_stats->bytes, hw_stats->bytes, tc_info->bytes_mask); 1469 accumulate_val(&acc_stats->packets, hw_stats->packets, 1470 tc_info->packets_mask); 1471 } 1472 1473 static int 1474 bnxt_tc_flow_stats_batch_update(struct bnxt *bp, int num_flows, 1475 struct bnxt_tc_stats_batch stats_batch[]) 1476 { 1477 struct bnxt_tc_info *tc_info = bp->tc_info; 1478 int rc, i; 1479 1480 rc = bnxt_hwrm_cfa_flow_stats_get(bp, num_flows, stats_batch); 1481 if (rc) 1482 return rc; 1483 1484 for (i = 0; i < num_flows; i++) { 1485 struct bnxt_tc_flow_node *flow_node = stats_batch[i].flow_node; 1486 struct bnxt_tc_flow *flow = &flow_node->flow; 1487 1488 spin_lock(&flow->stats_lock); 1489 bnxt_flow_stats_accum(tc_info, &flow->stats, 1490 &stats_batch[i].hw_stats); 1491 if (flow->stats.packets != flow->prev_stats.packets) 1492 flow->lastused = jiffies; 1493 spin_unlock(&flow->stats_lock); 1494 } 1495 1496 return 0; 1497 } 1498 1499 static int 1500 bnxt_tc_flow_stats_batch_prep(struct bnxt *bp, 1501 struct bnxt_tc_stats_batch stats_batch[], 1502 int *num_flows) 1503 { 1504 struct bnxt_tc_info *tc_info = bp->tc_info; 1505 struct rhashtable_iter *iter = &tc_info->iter; 1506 void *flow_node; 1507 int rc, i; 1508 1509 rhashtable_walk_start(iter); 1510 1511 rc = 0; 1512 for (i = 0; i < BNXT_FLOW_STATS_BATCH_MAX; i++) { 1513 flow_node = rhashtable_walk_next(iter); 1514 if (IS_ERR(flow_node)) { 1515 i = 0; 1516 if (PTR_ERR(flow_node) == -EAGAIN) { 1517 continue; 1518 } else { 1519 rc = PTR_ERR(flow_node); 1520 goto done; 1521 } 1522 } 1523 1524 /* No more flows */ 1525 if (!flow_node) 1526 goto done; 1527 1528 stats_batch[i].flow_node = flow_node; 1529 } 1530 done: 1531 rhashtable_walk_stop(iter); 1532 *num_flows = i; 1533 return rc; 1534 } 1535 1536 void bnxt_tc_flow_stats_work(struct bnxt *bp) 1537 { 1538 struct bnxt_tc_info *tc_info = bp->tc_info; 1539 int num_flows, rc; 1540 1541 num_flows = atomic_read(&tc_info->flow_table.nelems); 1542 if (!num_flows) 1543 return; 1544 1545 rhashtable_walk_enter(&tc_info->flow_table, &tc_info->iter); 1546 1547 for (;;) { 1548 rc = bnxt_tc_flow_stats_batch_prep(bp, tc_info->stats_batch, 1549 &num_flows); 1550 if (rc) { 1551 if (rc == -EAGAIN) 1552 continue; 1553 break; 1554 } 1555 1556 if (!num_flows) 1557 break; 1558 1559 bnxt_tc_flow_stats_batch_update(bp, num_flows, 1560 tc_info->stats_batch); 1561 } 1562 1563 rhashtable_walk_exit(&tc_info->iter); 1564 } 1565 1566 int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid, 1567 struct tc_cls_flower_offload *cls_flower) 1568 { 1569 switch (cls_flower->command) { 1570 case TC_CLSFLOWER_REPLACE: 1571 return bnxt_tc_add_flow(bp, src_fid, cls_flower); 1572 case TC_CLSFLOWER_DESTROY: 1573 return bnxt_tc_del_flow(bp, cls_flower); 1574 case TC_CLSFLOWER_STATS: 1575 return bnxt_tc_get_flow_stats(bp, cls_flower); 1576 default: 1577 return -EOPNOTSUPP; 1578 } 1579 } 1580 1581 static const struct rhashtable_params bnxt_tc_flow_ht_params = { 1582 .head_offset = offsetof(struct bnxt_tc_flow_node, node), 1583 .key_offset = offsetof(struct bnxt_tc_flow_node, cookie), 1584 .key_len = sizeof(((struct bnxt_tc_flow_node *)0)->cookie), 1585 .automatic_shrinking = true 1586 }; 1587 1588 static const struct rhashtable_params bnxt_tc_l2_ht_params = { 1589 .head_offset = offsetof(struct bnxt_tc_l2_node, node), 1590 .key_offset = offsetof(struct bnxt_tc_l2_node, key), 1591 .key_len = BNXT_TC_L2_KEY_LEN, 1592 .automatic_shrinking = true 1593 }; 1594 1595 static const struct rhashtable_params bnxt_tc_decap_l2_ht_params = { 1596 .head_offset = offsetof(struct bnxt_tc_l2_node, node), 1597 .key_offset = offsetof(struct bnxt_tc_l2_node, key), 1598 .key_len = BNXT_TC_L2_KEY_LEN, 1599 .automatic_shrinking = true 1600 }; 1601 1602 static const struct rhashtable_params bnxt_tc_tunnel_ht_params = { 1603 .head_offset = offsetof(struct bnxt_tc_tunnel_node, node), 1604 .key_offset = offsetof(struct bnxt_tc_tunnel_node, key), 1605 .key_len = sizeof(struct ip_tunnel_key), 1606 .automatic_shrinking = true 1607 }; 1608 1609 /* convert counter width in bits to a mask */ 1610 #define mask(width) ((u64)~0 >> (64 - (width))) 1611 1612 int bnxt_init_tc(struct bnxt *bp) 1613 { 1614 struct bnxt_tc_info *tc_info; 1615 int rc; 1616 1617 if (bp->hwrm_spec_code < 0x10803) { 1618 netdev_warn(bp->dev, 1619 "Firmware does not support TC flower offload.\n"); 1620 return -ENOTSUPP; 1621 } 1622 1623 tc_info = kzalloc(sizeof(*tc_info), GFP_KERNEL); 1624 if (!tc_info) 1625 return -ENOMEM; 1626 mutex_init(&tc_info->lock); 1627 1628 /* Counter widths are programmed by FW */ 1629 tc_info->bytes_mask = mask(36); 1630 tc_info->packets_mask = mask(28); 1631 1632 tc_info->flow_ht_params = bnxt_tc_flow_ht_params; 1633 rc = rhashtable_init(&tc_info->flow_table, &tc_info->flow_ht_params); 1634 if (rc) 1635 goto free_tc_info; 1636 1637 tc_info->l2_ht_params = bnxt_tc_l2_ht_params; 1638 rc = rhashtable_init(&tc_info->l2_table, &tc_info->l2_ht_params); 1639 if (rc) 1640 goto destroy_flow_table; 1641 1642 tc_info->decap_l2_ht_params = bnxt_tc_decap_l2_ht_params; 1643 rc = rhashtable_init(&tc_info->decap_l2_table, 1644 &tc_info->decap_l2_ht_params); 1645 if (rc) 1646 goto destroy_l2_table; 1647 1648 tc_info->decap_ht_params = bnxt_tc_tunnel_ht_params; 1649 rc = rhashtable_init(&tc_info->decap_table, 1650 &tc_info->decap_ht_params); 1651 if (rc) 1652 goto destroy_decap_l2_table; 1653 1654 tc_info->encap_ht_params = bnxt_tc_tunnel_ht_params; 1655 rc = rhashtable_init(&tc_info->encap_table, 1656 &tc_info->encap_ht_params); 1657 if (rc) 1658 goto destroy_decap_table; 1659 1660 tc_info->enabled = true; 1661 bp->dev->hw_features |= NETIF_F_HW_TC; 1662 bp->dev->features |= NETIF_F_HW_TC; 1663 bp->tc_info = tc_info; 1664 return 0; 1665 1666 destroy_decap_table: 1667 rhashtable_destroy(&tc_info->decap_table); 1668 destroy_decap_l2_table: 1669 rhashtable_destroy(&tc_info->decap_l2_table); 1670 destroy_l2_table: 1671 rhashtable_destroy(&tc_info->l2_table); 1672 destroy_flow_table: 1673 rhashtable_destroy(&tc_info->flow_table); 1674 free_tc_info: 1675 kfree(tc_info); 1676 return rc; 1677 } 1678 1679 void bnxt_shutdown_tc(struct bnxt *bp) 1680 { 1681 struct bnxt_tc_info *tc_info = bp->tc_info; 1682 1683 if (!bnxt_tc_flower_enabled(bp)) 1684 return; 1685 1686 rhashtable_destroy(&tc_info->flow_table); 1687 rhashtable_destroy(&tc_info->l2_table); 1688 rhashtable_destroy(&tc_info->decap_l2_table); 1689 rhashtable_destroy(&tc_info->decap_table); 1690 rhashtable_destroy(&tc_info->encap_table); 1691 kfree(tc_info); 1692 bp->tc_info = NULL; 1693 } 1694