xref: /openbmc/linux/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c (revision dd2934a95701576203b2f61e8ded4e4a2f9183ea)
1 /* Broadcom NetXtreme-C/E network driver.
2  *
3  * Copyright (c) 2017 Broadcom Limited
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  */
9 
10 #include <linux/netdevice.h>
11 #include <linux/inetdevice.h>
12 #include <linux/if_vlan.h>
13 #include <net/flow_dissector.h>
14 #include <net/pkt_cls.h>
15 #include <net/tc_act/tc_gact.h>
16 #include <net/tc_act/tc_skbedit.h>
17 #include <net/tc_act/tc_mirred.h>
18 #include <net/tc_act/tc_vlan.h>
19 #include <net/tc_act/tc_tunnel_key.h>
20 
21 #include "bnxt_hsi.h"
22 #include "bnxt.h"
23 #include "bnxt_sriov.h"
24 #include "bnxt_tc.h"
25 #include "bnxt_vfr.h"
26 
27 #define BNXT_FID_INVALID			0xffff
28 #define VLAN_TCI(vid, prio)	((vid) | ((prio) << VLAN_PRIO_SHIFT))
29 
30 #define is_vlan_pcp_wildcarded(vlan_tci_mask)	\
31 	((ntohs(vlan_tci_mask) & VLAN_PRIO_MASK) == 0x0000)
32 #define is_vlan_pcp_exactmatch(vlan_tci_mask)	\
33 	((ntohs(vlan_tci_mask) & VLAN_PRIO_MASK) == VLAN_PRIO_MASK)
34 #define is_vlan_pcp_zero(vlan_tci)	\
35 	((ntohs(vlan_tci) & VLAN_PRIO_MASK) == 0x0000)
36 #define is_vid_exactmatch(vlan_tci_mask)	\
37 	((ntohs(vlan_tci_mask) & VLAN_VID_MASK) == VLAN_VID_MASK)
38 
39 /* Return the dst fid of the func for flow forwarding
40  * For PFs: src_fid is the fid of the PF
41  * For VF-reps: src_fid the fid of the VF
42  */
43 static u16 bnxt_flow_get_dst_fid(struct bnxt *pf_bp, struct net_device *dev)
44 {
45 	struct bnxt *bp;
46 
47 	/* check if dev belongs to the same switch */
48 	if (!switchdev_port_same_parent_id(pf_bp->dev, dev)) {
49 		netdev_info(pf_bp->dev, "dev(ifindex=%d) not on same switch",
50 			    dev->ifindex);
51 		return BNXT_FID_INVALID;
52 	}
53 
54 	/* Is dev a VF-rep? */
55 	if (bnxt_dev_is_vf_rep(dev))
56 		return bnxt_vf_rep_get_fid(dev);
57 
58 	bp = netdev_priv(dev);
59 	return bp->pf.fw_fid;
60 }
61 
62 static int bnxt_tc_parse_redir(struct bnxt *bp,
63 			       struct bnxt_tc_actions *actions,
64 			       const struct tc_action *tc_act)
65 {
66 	struct net_device *dev = tcf_mirred_dev(tc_act);
67 
68 	if (!dev) {
69 		netdev_info(bp->dev, "no dev in mirred action");
70 		return -EINVAL;
71 	}
72 
73 	actions->flags |= BNXT_TC_ACTION_FLAG_FWD;
74 	actions->dst_dev = dev;
75 	return 0;
76 }
77 
78 static void bnxt_tc_parse_vlan(struct bnxt *bp,
79 			       struct bnxt_tc_actions *actions,
80 			       const struct tc_action *tc_act)
81 {
82 	if (tcf_vlan_action(tc_act) == TCA_VLAN_ACT_POP) {
83 		actions->flags |= BNXT_TC_ACTION_FLAG_POP_VLAN;
84 	} else if (tcf_vlan_action(tc_act) == TCA_VLAN_ACT_PUSH) {
85 		actions->flags |= BNXT_TC_ACTION_FLAG_PUSH_VLAN;
86 		actions->push_vlan_tci = htons(tcf_vlan_push_vid(tc_act));
87 		actions->push_vlan_tpid = tcf_vlan_push_proto(tc_act);
88 	}
89 }
90 
91 static int bnxt_tc_parse_tunnel_set(struct bnxt *bp,
92 				    struct bnxt_tc_actions *actions,
93 				    const struct tc_action *tc_act)
94 {
95 	struct ip_tunnel_info *tun_info = tcf_tunnel_info(tc_act);
96 	struct ip_tunnel_key *tun_key = &tun_info->key;
97 
98 	if (ip_tunnel_info_af(tun_info) != AF_INET) {
99 		netdev_info(bp->dev, "only IPv4 tunnel-encap is supported");
100 		return -EOPNOTSUPP;
101 	}
102 
103 	actions->tun_encap_key = *tun_key;
104 	actions->flags |= BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP;
105 	return 0;
106 }
107 
108 static int bnxt_tc_parse_actions(struct bnxt *bp,
109 				 struct bnxt_tc_actions *actions,
110 				 struct tcf_exts *tc_exts)
111 {
112 	const struct tc_action *tc_act;
113 	int i, rc;
114 
115 	if (!tcf_exts_has_actions(tc_exts)) {
116 		netdev_info(bp->dev, "no actions");
117 		return -EINVAL;
118 	}
119 
120 	tcf_exts_for_each_action(i, tc_act, tc_exts) {
121 		/* Drop action */
122 		if (is_tcf_gact_shot(tc_act)) {
123 			actions->flags |= BNXT_TC_ACTION_FLAG_DROP;
124 			return 0; /* don't bother with other actions */
125 		}
126 
127 		/* Redirect action */
128 		if (is_tcf_mirred_egress_redirect(tc_act)) {
129 			rc = bnxt_tc_parse_redir(bp, actions, tc_act);
130 			if (rc)
131 				return rc;
132 			continue;
133 		}
134 
135 		/* Push/pop VLAN */
136 		if (is_tcf_vlan(tc_act)) {
137 			bnxt_tc_parse_vlan(bp, actions, tc_act);
138 			continue;
139 		}
140 
141 		/* Tunnel encap */
142 		if (is_tcf_tunnel_set(tc_act)) {
143 			rc = bnxt_tc_parse_tunnel_set(bp, actions, tc_act);
144 			if (rc)
145 				return rc;
146 			continue;
147 		}
148 
149 		/* Tunnel decap */
150 		if (is_tcf_tunnel_release(tc_act)) {
151 			actions->flags |= BNXT_TC_ACTION_FLAG_TUNNEL_DECAP;
152 			continue;
153 		}
154 	}
155 
156 	if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) {
157 		if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) {
158 			/* dst_fid is PF's fid */
159 			actions->dst_fid = bp->pf.fw_fid;
160 		} else {
161 			/* find the FID from dst_dev */
162 			actions->dst_fid =
163 				bnxt_flow_get_dst_fid(bp, actions->dst_dev);
164 			if (actions->dst_fid == BNXT_FID_INVALID)
165 				return -EINVAL;
166 		}
167 	}
168 
169 	return 0;
170 }
171 
172 #define GET_KEY(flow_cmd, key_type)					\
173 		skb_flow_dissector_target((flow_cmd)->dissector, key_type,\
174 					  (flow_cmd)->key)
175 #define GET_MASK(flow_cmd, key_type)					\
176 		skb_flow_dissector_target((flow_cmd)->dissector, key_type,\
177 					  (flow_cmd)->mask)
178 
179 static int bnxt_tc_parse_flow(struct bnxt *bp,
180 			      struct tc_cls_flower_offload *tc_flow_cmd,
181 			      struct bnxt_tc_flow *flow)
182 {
183 	struct flow_dissector *dissector = tc_flow_cmd->dissector;
184 
185 	/* KEY_CONTROL and KEY_BASIC are needed for forming a meaningful key */
186 	if ((dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) == 0 ||
187 	    (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_BASIC)) == 0) {
188 		netdev_info(bp->dev, "cannot form TC key: used_keys = 0x%x",
189 			    dissector->used_keys);
190 		return -EOPNOTSUPP;
191 	}
192 
193 	if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_BASIC)) {
194 		struct flow_dissector_key_basic *key =
195 			GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_BASIC);
196 		struct flow_dissector_key_basic *mask =
197 			GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_BASIC);
198 
199 		flow->l2_key.ether_type = key->n_proto;
200 		flow->l2_mask.ether_type = mask->n_proto;
201 
202 		if (key->n_proto == htons(ETH_P_IP) ||
203 		    key->n_proto == htons(ETH_P_IPV6)) {
204 			flow->l4_key.ip_proto = key->ip_proto;
205 			flow->l4_mask.ip_proto = mask->ip_proto;
206 		}
207 	}
208 
209 	if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
210 		struct flow_dissector_key_eth_addrs *key =
211 			GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ETH_ADDRS);
212 		struct flow_dissector_key_eth_addrs *mask =
213 			GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ETH_ADDRS);
214 
215 		flow->flags |= BNXT_TC_FLOW_FLAGS_ETH_ADDRS;
216 		ether_addr_copy(flow->l2_key.dmac, key->dst);
217 		ether_addr_copy(flow->l2_mask.dmac, mask->dst);
218 		ether_addr_copy(flow->l2_key.smac, key->src);
219 		ether_addr_copy(flow->l2_mask.smac, mask->src);
220 	}
221 
222 	if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_VLAN)) {
223 		struct flow_dissector_key_vlan *key =
224 			GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_VLAN);
225 		struct flow_dissector_key_vlan *mask =
226 			GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_VLAN);
227 
228 		flow->l2_key.inner_vlan_tci =
229 		   cpu_to_be16(VLAN_TCI(key->vlan_id, key->vlan_priority));
230 		flow->l2_mask.inner_vlan_tci =
231 		   cpu_to_be16((VLAN_TCI(mask->vlan_id, mask->vlan_priority)));
232 		flow->l2_key.inner_vlan_tpid = htons(ETH_P_8021Q);
233 		flow->l2_mask.inner_vlan_tpid = htons(0xffff);
234 		flow->l2_key.num_vlans = 1;
235 	}
236 
237 	if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
238 		struct flow_dissector_key_ipv4_addrs *key =
239 			GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
240 		struct flow_dissector_key_ipv4_addrs *mask =
241 			GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
242 
243 		flow->flags |= BNXT_TC_FLOW_FLAGS_IPV4_ADDRS;
244 		flow->l3_key.ipv4.daddr.s_addr = key->dst;
245 		flow->l3_mask.ipv4.daddr.s_addr = mask->dst;
246 		flow->l3_key.ipv4.saddr.s_addr = key->src;
247 		flow->l3_mask.ipv4.saddr.s_addr = mask->src;
248 	} else if (dissector_uses_key(dissector,
249 				      FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
250 		struct flow_dissector_key_ipv6_addrs *key =
251 			GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
252 		struct flow_dissector_key_ipv6_addrs *mask =
253 			GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
254 
255 		flow->flags |= BNXT_TC_FLOW_FLAGS_IPV6_ADDRS;
256 		flow->l3_key.ipv6.daddr = key->dst;
257 		flow->l3_mask.ipv6.daddr = mask->dst;
258 		flow->l3_key.ipv6.saddr = key->src;
259 		flow->l3_mask.ipv6.saddr = mask->src;
260 	}
261 
262 	if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_PORTS)) {
263 		struct flow_dissector_key_ports *key =
264 			GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_PORTS);
265 		struct flow_dissector_key_ports *mask =
266 			GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_PORTS);
267 
268 		flow->flags |= BNXT_TC_FLOW_FLAGS_PORTS;
269 		flow->l4_key.ports.dport = key->dst;
270 		flow->l4_mask.ports.dport = mask->dst;
271 		flow->l4_key.ports.sport = key->src;
272 		flow->l4_mask.ports.sport = mask->src;
273 	}
274 
275 	if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ICMP)) {
276 		struct flow_dissector_key_icmp *key =
277 			GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ICMP);
278 		struct flow_dissector_key_icmp *mask =
279 			GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ICMP);
280 
281 		flow->flags |= BNXT_TC_FLOW_FLAGS_ICMP;
282 		flow->l4_key.icmp.type = key->type;
283 		flow->l4_key.icmp.code = key->code;
284 		flow->l4_mask.icmp.type = mask->type;
285 		flow->l4_mask.icmp.code = mask->code;
286 	}
287 
288 	if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
289 		struct flow_dissector_key_ipv4_addrs *key =
290 			GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS);
291 		struct flow_dissector_key_ipv4_addrs *mask =
292 				GET_MASK(tc_flow_cmd,
293 					 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS);
294 
295 		flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS;
296 		flow->tun_key.u.ipv4.dst = key->dst;
297 		flow->tun_mask.u.ipv4.dst = mask->dst;
298 		flow->tun_key.u.ipv4.src = key->src;
299 		flow->tun_mask.u.ipv4.src = mask->src;
300 	} else if (dissector_uses_key(dissector,
301 				      FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) {
302 		return -EOPNOTSUPP;
303 	}
304 
305 	if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
306 		struct flow_dissector_key_keyid *key =
307 			GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_KEYID);
308 		struct flow_dissector_key_keyid *mask =
309 			GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_KEYID);
310 
311 		flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_ID;
312 		flow->tun_key.tun_id = key32_to_tunnel_id(key->keyid);
313 		flow->tun_mask.tun_id = key32_to_tunnel_id(mask->keyid);
314 	}
315 
316 	if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
317 		struct flow_dissector_key_ports *key =
318 			GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_PORTS);
319 		struct flow_dissector_key_ports *mask =
320 			GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_PORTS);
321 
322 		flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_PORTS;
323 		flow->tun_key.tp_dst = key->dst;
324 		flow->tun_mask.tp_dst = mask->dst;
325 		flow->tun_key.tp_src = key->src;
326 		flow->tun_mask.tp_src = mask->src;
327 	}
328 
329 	return bnxt_tc_parse_actions(bp, &flow->actions, tc_flow_cmd->exts);
330 }
331 
332 static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp, __le16 flow_handle)
333 {
334 	struct hwrm_cfa_flow_free_input req = { 0 };
335 	int rc;
336 
337 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_FREE, -1, -1);
338 	req.flow_handle = flow_handle;
339 
340 	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
341 	if (rc)
342 		netdev_info(bp->dev, "Error: %s: flow_handle=0x%x rc=%d",
343 			    __func__, flow_handle, rc);
344 
345 	if (rc)
346 		rc = -EIO;
347 	return rc;
348 }
349 
350 static int ipv6_mask_len(struct in6_addr *mask)
351 {
352 	int mask_len = 0, i;
353 
354 	for (i = 0; i < 4; i++)
355 		mask_len += inet_mask_len(mask->s6_addr32[i]);
356 
357 	return mask_len;
358 }
359 
360 static bool is_wildcard(void *mask, int len)
361 {
362 	const u8 *p = mask;
363 	int i;
364 
365 	for (i = 0; i < len; i++) {
366 		if (p[i] != 0)
367 			return false;
368 	}
369 	return true;
370 }
371 
372 static bool is_exactmatch(void *mask, int len)
373 {
374 	const u8 *p = mask;
375 	int i;
376 
377 	for (i = 0; i < len; i++)
378 		if (p[i] != 0xff)
379 			return false;
380 
381 	return true;
382 }
383 
384 static bool is_vlan_tci_allowed(__be16  vlan_tci_mask,
385 				__be16  vlan_tci)
386 {
387 	/* VLAN priority must be either exactly zero or fully wildcarded and
388 	 * VLAN id must be exact match.
389 	 */
390 	if (is_vid_exactmatch(vlan_tci_mask) &&
391 	    ((is_vlan_pcp_exactmatch(vlan_tci_mask) &&
392 	      is_vlan_pcp_zero(vlan_tci)) ||
393 	     is_vlan_pcp_wildcarded(vlan_tci_mask)))
394 		return true;
395 
396 	return false;
397 }
398 
399 static bool bits_set(void *key, int len)
400 {
401 	const u8 *p = key;
402 	int i;
403 
404 	for (i = 0; i < len; i++)
405 		if (p[i] != 0)
406 			return true;
407 
408 	return false;
409 }
410 
411 static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
412 				    __le16 ref_flow_handle,
413 				    __le32 tunnel_handle, __le16 *flow_handle)
414 {
415 	struct hwrm_cfa_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
416 	struct bnxt_tc_actions *actions = &flow->actions;
417 	struct bnxt_tc_l3_key *l3_mask = &flow->l3_mask;
418 	struct bnxt_tc_l3_key *l3_key = &flow->l3_key;
419 	struct hwrm_cfa_flow_alloc_input req = { 0 };
420 	u16 flow_flags = 0, action_flags = 0;
421 	int rc;
422 
423 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_ALLOC, -1, -1);
424 
425 	req.src_fid = cpu_to_le16(flow->src_fid);
426 	req.ref_flow_handle = ref_flow_handle;
427 
428 	if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP ||
429 	    actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) {
430 		req.tunnel_handle = tunnel_handle;
431 		flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL;
432 		action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL;
433 	}
434 
435 	req.ethertype = flow->l2_key.ether_type;
436 	req.ip_proto = flow->l4_key.ip_proto;
437 
438 	if (flow->flags & BNXT_TC_FLOW_FLAGS_ETH_ADDRS) {
439 		memcpy(req.dmac, flow->l2_key.dmac, ETH_ALEN);
440 		memcpy(req.smac, flow->l2_key.smac, ETH_ALEN);
441 	}
442 
443 	if (flow->l2_key.num_vlans > 0) {
444 		flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_ONE;
445 		/* FW expects the inner_vlan_tci value to be set
446 		 * in outer_vlan_tci when num_vlans is 1 (which is
447 		 * always the case in TC.)
448 		 */
449 		req.outer_vlan_tci = flow->l2_key.inner_vlan_tci;
450 	}
451 
452 	/* If all IP and L4 fields are wildcarded then this is an L2 flow */
453 	if (is_wildcard(l3_mask, sizeof(*l3_mask)) &&
454 	    is_wildcard(&flow->l4_mask, sizeof(flow->l4_mask))) {
455 		flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2;
456 	} else {
457 		flow_flags |= flow->l2_key.ether_type == htons(ETH_P_IP) ?
458 				CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV4 :
459 				CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6;
460 
461 		if (flow->flags & BNXT_TC_FLOW_FLAGS_IPV4_ADDRS) {
462 			req.ip_dst[0] = l3_key->ipv4.daddr.s_addr;
463 			req.ip_dst_mask_len =
464 				inet_mask_len(l3_mask->ipv4.daddr.s_addr);
465 			req.ip_src[0] = l3_key->ipv4.saddr.s_addr;
466 			req.ip_src_mask_len =
467 				inet_mask_len(l3_mask->ipv4.saddr.s_addr);
468 		} else if (flow->flags & BNXT_TC_FLOW_FLAGS_IPV6_ADDRS) {
469 			memcpy(req.ip_dst, l3_key->ipv6.daddr.s6_addr32,
470 			       sizeof(req.ip_dst));
471 			req.ip_dst_mask_len =
472 					ipv6_mask_len(&l3_mask->ipv6.daddr);
473 			memcpy(req.ip_src, l3_key->ipv6.saddr.s6_addr32,
474 			       sizeof(req.ip_src));
475 			req.ip_src_mask_len =
476 					ipv6_mask_len(&l3_mask->ipv6.saddr);
477 		}
478 	}
479 
480 	if (flow->flags & BNXT_TC_FLOW_FLAGS_PORTS) {
481 		req.l4_src_port = flow->l4_key.ports.sport;
482 		req.l4_src_port_mask = flow->l4_mask.ports.sport;
483 		req.l4_dst_port = flow->l4_key.ports.dport;
484 		req.l4_dst_port_mask = flow->l4_mask.ports.dport;
485 	} else if (flow->flags & BNXT_TC_FLOW_FLAGS_ICMP) {
486 		/* l4 ports serve as type/code when ip_proto is ICMP */
487 		req.l4_src_port = htons(flow->l4_key.icmp.type);
488 		req.l4_src_port_mask = htons(flow->l4_mask.icmp.type);
489 		req.l4_dst_port = htons(flow->l4_key.icmp.code);
490 		req.l4_dst_port_mask = htons(flow->l4_mask.icmp.code);
491 	}
492 	req.flags = cpu_to_le16(flow_flags);
493 
494 	if (actions->flags & BNXT_TC_ACTION_FLAG_DROP) {
495 		action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP;
496 	} else {
497 		if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) {
498 			action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD;
499 			req.dst_fid = cpu_to_le16(actions->dst_fid);
500 		}
501 		if (actions->flags & BNXT_TC_ACTION_FLAG_PUSH_VLAN) {
502 			action_flags |=
503 			    CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE;
504 			req.l2_rewrite_vlan_tpid = actions->push_vlan_tpid;
505 			req.l2_rewrite_vlan_tci = actions->push_vlan_tci;
506 			memcpy(&req.l2_rewrite_dmac, &req.dmac, ETH_ALEN);
507 			memcpy(&req.l2_rewrite_smac, &req.smac, ETH_ALEN);
508 		}
509 		if (actions->flags & BNXT_TC_ACTION_FLAG_POP_VLAN) {
510 			action_flags |=
511 			    CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE;
512 			/* Rewrite config with tpid = 0 implies vlan pop */
513 			req.l2_rewrite_vlan_tpid = 0;
514 			memcpy(&req.l2_rewrite_dmac, &req.dmac, ETH_ALEN);
515 			memcpy(&req.l2_rewrite_smac, &req.smac, ETH_ALEN);
516 		}
517 	}
518 	req.action_flags = cpu_to_le16(action_flags);
519 
520 	mutex_lock(&bp->hwrm_cmd_lock);
521 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
522 	if (!rc)
523 		*flow_handle = resp->flow_handle;
524 	mutex_unlock(&bp->hwrm_cmd_lock);
525 
526 	if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR)
527 		rc = -ENOSPC;
528 	else if (rc)
529 		rc = -EIO;
530 	return rc;
531 }
532 
533 static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp,
534 				       struct bnxt_tc_flow *flow,
535 				       struct bnxt_tc_l2_key *l2_info,
536 				       __le32 ref_decap_handle,
537 				       __le32 *decap_filter_handle)
538 {
539 	struct hwrm_cfa_decap_filter_alloc_output *resp =
540 						bp->hwrm_cmd_resp_addr;
541 	struct hwrm_cfa_decap_filter_alloc_input req = { 0 };
542 	struct ip_tunnel_key *tun_key = &flow->tun_key;
543 	u32 enables = 0;
544 	int rc;
545 
546 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_DECAP_FILTER_ALLOC, -1, -1);
547 
548 	req.flags = cpu_to_le32(CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL);
549 	enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE |
550 		   CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL;
551 	req.tunnel_type = CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
552 	req.ip_protocol = CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP;
553 
554 	if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ID) {
555 		enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_ID;
556 		/* tunnel_id is wrongly defined in hsi defn. as __le32 */
557 		req.tunnel_id = tunnel_id_to_key32(tun_key->tun_id);
558 	}
559 
560 	if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS) {
561 		enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR;
562 		ether_addr_copy(req.dst_macaddr, l2_info->dmac);
563 	}
564 	if (l2_info->num_vlans) {
565 		enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID;
566 		req.t_ivlan_vid = l2_info->inner_vlan_tci;
567 	}
568 
569 	enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE;
570 	req.ethertype = htons(ETH_P_IP);
571 
572 	if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS) {
573 		enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR |
574 			   CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR |
575 			   CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE;
576 		req.ip_addr_type = CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
577 		req.dst_ipaddr[0] = tun_key->u.ipv4.dst;
578 		req.src_ipaddr[0] = tun_key->u.ipv4.src;
579 	}
580 
581 	if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_PORTS) {
582 		enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_PORT;
583 		req.dst_port = tun_key->tp_dst;
584 	}
585 
586 	/* Eventhough the decap_handle returned by hwrm_cfa_decap_filter_alloc
587 	 * is defined as __le32, l2_ctxt_ref_id is defined in HSI as __le16.
588 	 */
589 	req.l2_ctxt_ref_id = (__force __le16)ref_decap_handle;
590 	req.enables = cpu_to_le32(enables);
591 
592 	mutex_lock(&bp->hwrm_cmd_lock);
593 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
594 	if (!rc)
595 		*decap_filter_handle = resp->decap_filter_id;
596 	else
597 		netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
598 	mutex_unlock(&bp->hwrm_cmd_lock);
599 
600 	if (rc)
601 		rc = -EIO;
602 	return rc;
603 }
604 
605 static int hwrm_cfa_decap_filter_free(struct bnxt *bp,
606 				      __le32 decap_filter_handle)
607 {
608 	struct hwrm_cfa_decap_filter_free_input req = { 0 };
609 	int rc;
610 
611 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_DECAP_FILTER_FREE, -1, -1);
612 	req.decap_filter_id = decap_filter_handle;
613 
614 	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
615 	if (rc)
616 		netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
617 
618 	if (rc)
619 		rc = -EIO;
620 	return rc;
621 }
622 
623 static int hwrm_cfa_encap_record_alloc(struct bnxt *bp,
624 				       struct ip_tunnel_key *encap_key,
625 				       struct bnxt_tc_l2_key *l2_info,
626 				       __le32 *encap_record_handle)
627 {
628 	struct hwrm_cfa_encap_record_alloc_output *resp =
629 						bp->hwrm_cmd_resp_addr;
630 	struct hwrm_cfa_encap_record_alloc_input req = { 0 };
631 	struct hwrm_cfa_encap_data_vxlan *encap =
632 			(struct hwrm_cfa_encap_data_vxlan *)&req.encap_data;
633 	struct hwrm_vxlan_ipv4_hdr *encap_ipv4 =
634 				(struct hwrm_vxlan_ipv4_hdr *)encap->l3;
635 	int rc;
636 
637 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ENCAP_RECORD_ALLOC, -1, -1);
638 
639 	req.encap_type = CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN;
640 
641 	ether_addr_copy(encap->dst_mac_addr, l2_info->dmac);
642 	ether_addr_copy(encap->src_mac_addr, l2_info->smac);
643 	if (l2_info->num_vlans) {
644 		encap->num_vlan_tags = l2_info->num_vlans;
645 		encap->ovlan_tci = l2_info->inner_vlan_tci;
646 		encap->ovlan_tpid = l2_info->inner_vlan_tpid;
647 	}
648 
649 	encap_ipv4->ver_hlen = 4 << VXLAN_IPV4_HDR_VER_HLEN_VERSION_SFT;
650 	encap_ipv4->ver_hlen |= 5 << VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_SFT;
651 	encap_ipv4->ttl = encap_key->ttl;
652 
653 	encap_ipv4->dest_ip_addr = encap_key->u.ipv4.dst;
654 	encap_ipv4->src_ip_addr = encap_key->u.ipv4.src;
655 	encap_ipv4->protocol = IPPROTO_UDP;
656 
657 	encap->dst_port = encap_key->tp_dst;
658 	encap->vni = tunnel_id_to_key32(encap_key->tun_id);
659 
660 	mutex_lock(&bp->hwrm_cmd_lock);
661 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
662 	if (!rc)
663 		*encap_record_handle = resp->encap_record_id;
664 	else
665 		netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
666 	mutex_unlock(&bp->hwrm_cmd_lock);
667 
668 	if (rc)
669 		rc = -EIO;
670 	return rc;
671 }
672 
673 static int hwrm_cfa_encap_record_free(struct bnxt *bp,
674 				      __le32 encap_record_handle)
675 {
676 	struct hwrm_cfa_encap_record_free_input req = { 0 };
677 	int rc;
678 
679 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ENCAP_RECORD_FREE, -1, -1);
680 	req.encap_record_id = encap_record_handle;
681 
682 	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
683 	if (rc)
684 		netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
685 
686 	if (rc)
687 		rc = -EIO;
688 	return rc;
689 }
690 
691 static int bnxt_tc_put_l2_node(struct bnxt *bp,
692 			       struct bnxt_tc_flow_node *flow_node)
693 {
694 	struct bnxt_tc_l2_node *l2_node = flow_node->l2_node;
695 	struct bnxt_tc_info *tc_info = bp->tc_info;
696 	int rc;
697 
698 	/* remove flow_node from the L2 shared flow list */
699 	list_del(&flow_node->l2_list_node);
700 	if (--l2_node->refcount == 0) {
701 		rc =  rhashtable_remove_fast(&tc_info->l2_table, &l2_node->node,
702 					     tc_info->l2_ht_params);
703 		if (rc)
704 			netdev_err(bp->dev,
705 				   "Error: %s: rhashtable_remove_fast: %d",
706 				   __func__, rc);
707 		kfree_rcu(l2_node, rcu);
708 	}
709 	return 0;
710 }
711 
712 static struct bnxt_tc_l2_node *
713 bnxt_tc_get_l2_node(struct bnxt *bp, struct rhashtable *l2_table,
714 		    struct rhashtable_params ht_params,
715 		    struct bnxt_tc_l2_key *l2_key)
716 {
717 	struct bnxt_tc_l2_node *l2_node;
718 	int rc;
719 
720 	l2_node = rhashtable_lookup_fast(l2_table, l2_key, ht_params);
721 	if (!l2_node) {
722 		l2_node = kzalloc(sizeof(*l2_node), GFP_KERNEL);
723 		if (!l2_node) {
724 			rc = -ENOMEM;
725 			return NULL;
726 		}
727 
728 		l2_node->key = *l2_key;
729 		rc = rhashtable_insert_fast(l2_table, &l2_node->node,
730 					    ht_params);
731 		if (rc) {
732 			kfree_rcu(l2_node, rcu);
733 			netdev_err(bp->dev,
734 				   "Error: %s: rhashtable_insert_fast: %d",
735 				   __func__, rc);
736 			return NULL;
737 		}
738 		INIT_LIST_HEAD(&l2_node->common_l2_flows);
739 	}
740 	return l2_node;
741 }
742 
743 /* Get the ref_flow_handle for a flow by checking if there are any other
744  * flows that share the same L2 key as this flow.
745  */
746 static int
747 bnxt_tc_get_ref_flow_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
748 			    struct bnxt_tc_flow_node *flow_node,
749 			    __le16 *ref_flow_handle)
750 {
751 	struct bnxt_tc_info *tc_info = bp->tc_info;
752 	struct bnxt_tc_flow_node *ref_flow_node;
753 	struct bnxt_tc_l2_node *l2_node;
754 
755 	l2_node = bnxt_tc_get_l2_node(bp, &tc_info->l2_table,
756 				      tc_info->l2_ht_params,
757 				      &flow->l2_key);
758 	if (!l2_node)
759 		return -1;
760 
761 	/* If any other flow is using this l2_node, use it's flow_handle
762 	 * as the ref_flow_handle
763 	 */
764 	if (l2_node->refcount > 0) {
765 		ref_flow_node = list_first_entry(&l2_node->common_l2_flows,
766 						 struct bnxt_tc_flow_node,
767 						 l2_list_node);
768 		*ref_flow_handle = ref_flow_node->flow_handle;
769 	} else {
770 		*ref_flow_handle = cpu_to_le16(0xffff);
771 	}
772 
773 	/* Insert the l2_node into the flow_node so that subsequent flows
774 	 * with a matching l2 key can use the flow_handle of this flow
775 	 * as their ref_flow_handle
776 	 */
777 	flow_node->l2_node = l2_node;
778 	list_add(&flow_node->l2_list_node, &l2_node->common_l2_flows);
779 	l2_node->refcount++;
780 	return 0;
781 }
782 
783 /* After the flow parsing is done, this routine is used for checking
784  * if there are any aspects of the flow that prevent it from being
785  * offloaded.
786  */
787 static bool bnxt_tc_can_offload(struct bnxt *bp, struct bnxt_tc_flow *flow)
788 {
789 	/* If L4 ports are specified then ip_proto must be TCP or UDP */
790 	if ((flow->flags & BNXT_TC_FLOW_FLAGS_PORTS) &&
791 	    (flow->l4_key.ip_proto != IPPROTO_TCP &&
792 	     flow->l4_key.ip_proto != IPPROTO_UDP)) {
793 		netdev_info(bp->dev, "Cannot offload non-TCP/UDP (%d) ports",
794 			    flow->l4_key.ip_proto);
795 		return false;
796 	}
797 
798 	/* Currently source/dest MAC cannot be partial wildcard  */
799 	if (bits_set(&flow->l2_key.smac, sizeof(flow->l2_key.smac)) &&
800 	    !is_exactmatch(flow->l2_mask.smac, sizeof(flow->l2_mask.smac))) {
801 		netdev_info(bp->dev, "Wildcard match unsupported for Source MAC\n");
802 		return false;
803 	}
804 	if (bits_set(&flow->l2_key.dmac, sizeof(flow->l2_key.dmac)) &&
805 	    !is_exactmatch(&flow->l2_mask.dmac, sizeof(flow->l2_mask.dmac))) {
806 		netdev_info(bp->dev, "Wildcard match unsupported for Dest MAC\n");
807 		return false;
808 	}
809 
810 	/* Currently VLAN fields cannot be partial wildcard */
811 	if (bits_set(&flow->l2_key.inner_vlan_tci,
812 		     sizeof(flow->l2_key.inner_vlan_tci)) &&
813 	    !is_vlan_tci_allowed(flow->l2_mask.inner_vlan_tci,
814 				 flow->l2_key.inner_vlan_tci)) {
815 		netdev_info(bp->dev, "Unsupported VLAN TCI\n");
816 		return false;
817 	}
818 	if (bits_set(&flow->l2_key.inner_vlan_tpid,
819 		     sizeof(flow->l2_key.inner_vlan_tpid)) &&
820 	    !is_exactmatch(&flow->l2_mask.inner_vlan_tpid,
821 			   sizeof(flow->l2_mask.inner_vlan_tpid))) {
822 		netdev_info(bp->dev, "Wildcard match unsupported for VLAN TPID\n");
823 		return false;
824 	}
825 
826 	/* Currently Ethertype must be set */
827 	if (!is_exactmatch(&flow->l2_mask.ether_type,
828 			   sizeof(flow->l2_mask.ether_type))) {
829 		netdev_info(bp->dev, "Wildcard match unsupported for Ethertype\n");
830 		return false;
831 	}
832 
833 	return true;
834 }
835 
836 /* Returns the final refcount of the node on success
837  * or a -ve error code on failure
838  */
839 static int bnxt_tc_put_tunnel_node(struct bnxt *bp,
840 				   struct rhashtable *tunnel_table,
841 				   struct rhashtable_params *ht_params,
842 				   struct bnxt_tc_tunnel_node *tunnel_node)
843 {
844 	int rc;
845 
846 	if (--tunnel_node->refcount == 0) {
847 		rc =  rhashtable_remove_fast(tunnel_table, &tunnel_node->node,
848 					     *ht_params);
849 		if (rc) {
850 			netdev_err(bp->dev, "rhashtable_remove_fast rc=%d", rc);
851 			rc = -1;
852 		}
853 		kfree_rcu(tunnel_node, rcu);
854 		return rc;
855 	} else {
856 		return tunnel_node->refcount;
857 	}
858 }
859 
860 /* Get (or add) either encap or decap tunnel node from/to the supplied
861  * hash table.
862  */
863 static struct bnxt_tc_tunnel_node *
864 bnxt_tc_get_tunnel_node(struct bnxt *bp, struct rhashtable *tunnel_table,
865 			struct rhashtable_params *ht_params,
866 			struct ip_tunnel_key *tun_key)
867 {
868 	struct bnxt_tc_tunnel_node *tunnel_node;
869 	int rc;
870 
871 	tunnel_node = rhashtable_lookup_fast(tunnel_table, tun_key, *ht_params);
872 	if (!tunnel_node) {
873 		tunnel_node = kzalloc(sizeof(*tunnel_node), GFP_KERNEL);
874 		if (!tunnel_node) {
875 			rc = -ENOMEM;
876 			goto err;
877 		}
878 
879 		tunnel_node->key = *tun_key;
880 		tunnel_node->tunnel_handle = INVALID_TUNNEL_HANDLE;
881 		rc = rhashtable_insert_fast(tunnel_table, &tunnel_node->node,
882 					    *ht_params);
883 		if (rc) {
884 			kfree_rcu(tunnel_node, rcu);
885 			goto err;
886 		}
887 	}
888 	tunnel_node->refcount++;
889 	return tunnel_node;
890 err:
891 	netdev_info(bp->dev, "error rc=%d", rc);
892 	return NULL;
893 }
894 
895 static int bnxt_tc_get_ref_decap_handle(struct bnxt *bp,
896 					struct bnxt_tc_flow *flow,
897 					struct bnxt_tc_l2_key *l2_key,
898 					struct bnxt_tc_flow_node *flow_node,
899 					__le32 *ref_decap_handle)
900 {
901 	struct bnxt_tc_info *tc_info = bp->tc_info;
902 	struct bnxt_tc_flow_node *ref_flow_node;
903 	struct bnxt_tc_l2_node *decap_l2_node;
904 
905 	decap_l2_node = bnxt_tc_get_l2_node(bp, &tc_info->decap_l2_table,
906 					    tc_info->decap_l2_ht_params,
907 					    l2_key);
908 	if (!decap_l2_node)
909 		return -1;
910 
911 	/* If any other flow is using this decap_l2_node, use it's decap_handle
912 	 * as the ref_decap_handle
913 	 */
914 	if (decap_l2_node->refcount > 0) {
915 		ref_flow_node =
916 			list_first_entry(&decap_l2_node->common_l2_flows,
917 					 struct bnxt_tc_flow_node,
918 					 decap_l2_list_node);
919 		*ref_decap_handle = ref_flow_node->decap_node->tunnel_handle;
920 	} else {
921 		*ref_decap_handle = INVALID_TUNNEL_HANDLE;
922 	}
923 
924 	/* Insert the l2_node into the flow_node so that subsequent flows
925 	 * with a matching decap l2 key can use the decap_filter_handle of
926 	 * this flow as their ref_decap_handle
927 	 */
928 	flow_node->decap_l2_node = decap_l2_node;
929 	list_add(&flow_node->decap_l2_list_node,
930 		 &decap_l2_node->common_l2_flows);
931 	decap_l2_node->refcount++;
932 	return 0;
933 }
934 
935 static void bnxt_tc_put_decap_l2_node(struct bnxt *bp,
936 				      struct bnxt_tc_flow_node *flow_node)
937 {
938 	struct bnxt_tc_l2_node *decap_l2_node = flow_node->decap_l2_node;
939 	struct bnxt_tc_info *tc_info = bp->tc_info;
940 	int rc;
941 
942 	/* remove flow_node from the decap L2 sharing flow list */
943 	list_del(&flow_node->decap_l2_list_node);
944 	if (--decap_l2_node->refcount == 0) {
945 		rc =  rhashtable_remove_fast(&tc_info->decap_l2_table,
946 					     &decap_l2_node->node,
947 					     tc_info->decap_l2_ht_params);
948 		if (rc)
949 			netdev_err(bp->dev, "rhashtable_remove_fast rc=%d", rc);
950 		kfree_rcu(decap_l2_node, rcu);
951 	}
952 }
953 
954 static void bnxt_tc_put_decap_handle(struct bnxt *bp,
955 				     struct bnxt_tc_flow_node *flow_node)
956 {
957 	__le32 decap_handle = flow_node->decap_node->tunnel_handle;
958 	struct bnxt_tc_info *tc_info = bp->tc_info;
959 	int rc;
960 
961 	if (flow_node->decap_l2_node)
962 		bnxt_tc_put_decap_l2_node(bp, flow_node);
963 
964 	rc = bnxt_tc_put_tunnel_node(bp, &tc_info->decap_table,
965 				     &tc_info->decap_ht_params,
966 				     flow_node->decap_node);
967 	if (!rc && decap_handle != INVALID_TUNNEL_HANDLE)
968 		hwrm_cfa_decap_filter_free(bp, decap_handle);
969 }
970 
971 static int bnxt_tc_resolve_tunnel_hdrs(struct bnxt *bp,
972 				       struct ip_tunnel_key *tun_key,
973 				       struct bnxt_tc_l2_key *l2_info)
974 {
975 #ifdef CONFIG_INET
976 	struct net_device *real_dst_dev = bp->dev;
977 	struct flowi4 flow = { {0} };
978 	struct net_device *dst_dev;
979 	struct neighbour *nbr;
980 	struct rtable *rt;
981 	int rc;
982 
983 	flow.flowi4_proto = IPPROTO_UDP;
984 	flow.fl4_dport = tun_key->tp_dst;
985 	flow.daddr = tun_key->u.ipv4.dst;
986 
987 	rt = ip_route_output_key(dev_net(real_dst_dev), &flow);
988 	if (IS_ERR(rt)) {
989 		netdev_info(bp->dev, "no route to %pI4b", &flow.daddr);
990 		return -EOPNOTSUPP;
991 	}
992 
993 	/* The route must either point to the real_dst_dev or a dst_dev that
994 	 * uses the real_dst_dev.
995 	 */
996 	dst_dev = rt->dst.dev;
997 	if (is_vlan_dev(dst_dev)) {
998 #if IS_ENABLED(CONFIG_VLAN_8021Q)
999 		struct vlan_dev_priv *vlan = vlan_dev_priv(dst_dev);
1000 
1001 		if (vlan->real_dev != real_dst_dev) {
1002 			netdev_info(bp->dev,
1003 				    "dst_dev(%s) doesn't use PF-if(%s)",
1004 				    netdev_name(dst_dev),
1005 				    netdev_name(real_dst_dev));
1006 			rc = -EOPNOTSUPP;
1007 			goto put_rt;
1008 		}
1009 		l2_info->inner_vlan_tci = htons(vlan->vlan_id);
1010 		l2_info->inner_vlan_tpid = vlan->vlan_proto;
1011 		l2_info->num_vlans = 1;
1012 #endif
1013 	} else if (dst_dev != real_dst_dev) {
1014 		netdev_info(bp->dev,
1015 			    "dst_dev(%s) for %pI4b is not PF-if(%s)",
1016 			    netdev_name(dst_dev), &flow.daddr,
1017 			    netdev_name(real_dst_dev));
1018 		rc = -EOPNOTSUPP;
1019 		goto put_rt;
1020 	}
1021 
1022 	nbr = dst_neigh_lookup(&rt->dst, &flow.daddr);
1023 	if (!nbr) {
1024 		netdev_info(bp->dev, "can't lookup neighbor for %pI4b",
1025 			    &flow.daddr);
1026 		rc = -EOPNOTSUPP;
1027 		goto put_rt;
1028 	}
1029 
1030 	tun_key->u.ipv4.src = flow.saddr;
1031 	tun_key->ttl = ip4_dst_hoplimit(&rt->dst);
1032 	neigh_ha_snapshot(l2_info->dmac, nbr, dst_dev);
1033 	ether_addr_copy(l2_info->smac, dst_dev->dev_addr);
1034 	neigh_release(nbr);
1035 	ip_rt_put(rt);
1036 
1037 	return 0;
1038 put_rt:
1039 	ip_rt_put(rt);
1040 	return rc;
1041 #else
1042 	return -EOPNOTSUPP;
1043 #endif
1044 }
1045 
1046 static int bnxt_tc_get_decap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
1047 				    struct bnxt_tc_flow_node *flow_node,
1048 				    __le32 *decap_filter_handle)
1049 {
1050 	struct ip_tunnel_key *decap_key = &flow->tun_key;
1051 	struct bnxt_tc_info *tc_info = bp->tc_info;
1052 	struct bnxt_tc_l2_key l2_info = { {0} };
1053 	struct bnxt_tc_tunnel_node *decap_node;
1054 	struct ip_tunnel_key tun_key = { 0 };
1055 	struct bnxt_tc_l2_key *decap_l2_info;
1056 	__le32 ref_decap_handle;
1057 	int rc;
1058 
1059 	/* Check if there's another flow using the same tunnel decap.
1060 	 * If not, add this tunnel to the table and resolve the other
1061 	 * tunnel header fileds. Ignore src_port in the tunnel_key,
1062 	 * since it is not required for decap filters.
1063 	 */
1064 	decap_key->tp_src = 0;
1065 	decap_node = bnxt_tc_get_tunnel_node(bp, &tc_info->decap_table,
1066 					     &tc_info->decap_ht_params,
1067 					     decap_key);
1068 	if (!decap_node)
1069 		return -ENOMEM;
1070 
1071 	flow_node->decap_node = decap_node;
1072 
1073 	if (decap_node->tunnel_handle != INVALID_TUNNEL_HANDLE)
1074 		goto done;
1075 
1076 	/* Resolve the L2 fields for tunnel decap
1077 	 * Resolve the route for remote vtep (saddr) of the decap key
1078 	 * Find it's next-hop mac addrs
1079 	 */
1080 	tun_key.u.ipv4.dst = flow->tun_key.u.ipv4.src;
1081 	tun_key.tp_dst = flow->tun_key.tp_dst;
1082 	rc = bnxt_tc_resolve_tunnel_hdrs(bp, &tun_key, &l2_info);
1083 	if (rc)
1084 		goto put_decap;
1085 
1086 	decap_l2_info = &decap_node->l2_info;
1087 	/* decap smac is wildcarded */
1088 	ether_addr_copy(decap_l2_info->dmac, l2_info.smac);
1089 	if (l2_info.num_vlans) {
1090 		decap_l2_info->num_vlans = l2_info.num_vlans;
1091 		decap_l2_info->inner_vlan_tpid = l2_info.inner_vlan_tpid;
1092 		decap_l2_info->inner_vlan_tci = l2_info.inner_vlan_tci;
1093 	}
1094 	flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS;
1095 
1096 	/* For getting a decap_filter_handle we first need to check if
1097 	 * there are any other decap flows that share the same tunnel L2
1098 	 * key and if so, pass that flow's decap_filter_handle as the
1099 	 * ref_decap_handle for this flow.
1100 	 */
1101 	rc = bnxt_tc_get_ref_decap_handle(bp, flow, decap_l2_info, flow_node,
1102 					  &ref_decap_handle);
1103 	if (rc)
1104 		goto put_decap;
1105 
1106 	/* Issue the hwrm cmd to allocate a decap filter handle */
1107 	rc = hwrm_cfa_decap_filter_alloc(bp, flow, decap_l2_info,
1108 					 ref_decap_handle,
1109 					 &decap_node->tunnel_handle);
1110 	if (rc)
1111 		goto put_decap_l2;
1112 
1113 done:
1114 	*decap_filter_handle = decap_node->tunnel_handle;
1115 	return 0;
1116 
1117 put_decap_l2:
1118 	bnxt_tc_put_decap_l2_node(bp, flow_node);
1119 put_decap:
1120 	bnxt_tc_put_tunnel_node(bp, &tc_info->decap_table,
1121 				&tc_info->decap_ht_params,
1122 				flow_node->decap_node);
1123 	return rc;
1124 }
1125 
1126 static void bnxt_tc_put_encap_handle(struct bnxt *bp,
1127 				     struct bnxt_tc_tunnel_node *encap_node)
1128 {
1129 	__le32 encap_handle = encap_node->tunnel_handle;
1130 	struct bnxt_tc_info *tc_info = bp->tc_info;
1131 	int rc;
1132 
1133 	rc = bnxt_tc_put_tunnel_node(bp, &tc_info->encap_table,
1134 				     &tc_info->encap_ht_params, encap_node);
1135 	if (!rc && encap_handle != INVALID_TUNNEL_HANDLE)
1136 		hwrm_cfa_encap_record_free(bp, encap_handle);
1137 }
1138 
1139 /* Lookup the tunnel encap table and check if there's an encap_handle
1140  * alloc'd already.
1141  * If not, query L2 info via a route lookup and issue an encap_record_alloc
1142  * cmd to FW.
1143  */
1144 static int bnxt_tc_get_encap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
1145 				    struct bnxt_tc_flow_node *flow_node,
1146 				    __le32 *encap_handle)
1147 {
1148 	struct ip_tunnel_key *encap_key = &flow->actions.tun_encap_key;
1149 	struct bnxt_tc_info *tc_info = bp->tc_info;
1150 	struct bnxt_tc_tunnel_node *encap_node;
1151 	int rc;
1152 
1153 	/* Check if there's another flow using the same tunnel encap.
1154 	 * If not, add this tunnel to the table and resolve the other
1155 	 * tunnel header fileds
1156 	 */
1157 	encap_node = bnxt_tc_get_tunnel_node(bp, &tc_info->encap_table,
1158 					     &tc_info->encap_ht_params,
1159 					     encap_key);
1160 	if (!encap_node)
1161 		return -ENOMEM;
1162 
1163 	flow_node->encap_node = encap_node;
1164 
1165 	if (encap_node->tunnel_handle != INVALID_TUNNEL_HANDLE)
1166 		goto done;
1167 
1168 	rc = bnxt_tc_resolve_tunnel_hdrs(bp, encap_key, &encap_node->l2_info);
1169 	if (rc)
1170 		goto put_encap;
1171 
1172 	/* Allocate a new tunnel encap record */
1173 	rc = hwrm_cfa_encap_record_alloc(bp, encap_key, &encap_node->l2_info,
1174 					 &encap_node->tunnel_handle);
1175 	if (rc)
1176 		goto put_encap;
1177 
1178 done:
1179 	*encap_handle = encap_node->tunnel_handle;
1180 	return 0;
1181 
1182 put_encap:
1183 	bnxt_tc_put_tunnel_node(bp, &tc_info->encap_table,
1184 				&tc_info->encap_ht_params, encap_node);
1185 	return rc;
1186 }
1187 
1188 static void bnxt_tc_put_tunnel_handle(struct bnxt *bp,
1189 				      struct bnxt_tc_flow *flow,
1190 				      struct bnxt_tc_flow_node *flow_node)
1191 {
1192 	if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP)
1193 		bnxt_tc_put_decap_handle(bp, flow_node);
1194 	else if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP)
1195 		bnxt_tc_put_encap_handle(bp, flow_node->encap_node);
1196 }
1197 
1198 static int bnxt_tc_get_tunnel_handle(struct bnxt *bp,
1199 				     struct bnxt_tc_flow *flow,
1200 				     struct bnxt_tc_flow_node *flow_node,
1201 				     __le32 *tunnel_handle)
1202 {
1203 	if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP)
1204 		return bnxt_tc_get_decap_handle(bp, flow, flow_node,
1205 						tunnel_handle);
1206 	else if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP)
1207 		return bnxt_tc_get_encap_handle(bp, flow, flow_node,
1208 						tunnel_handle);
1209 	else
1210 		return 0;
1211 }
1212 static int __bnxt_tc_del_flow(struct bnxt *bp,
1213 			      struct bnxt_tc_flow_node *flow_node)
1214 {
1215 	struct bnxt_tc_info *tc_info = bp->tc_info;
1216 	int rc;
1217 
1218 	/* send HWRM cmd to free the flow-id */
1219 	bnxt_hwrm_cfa_flow_free(bp, flow_node->flow_handle);
1220 
1221 	mutex_lock(&tc_info->lock);
1222 
1223 	/* release references to any tunnel encap/decap nodes */
1224 	bnxt_tc_put_tunnel_handle(bp, &flow_node->flow, flow_node);
1225 
1226 	/* release reference to l2 node */
1227 	bnxt_tc_put_l2_node(bp, flow_node);
1228 
1229 	mutex_unlock(&tc_info->lock);
1230 
1231 	rc = rhashtable_remove_fast(&tc_info->flow_table, &flow_node->node,
1232 				    tc_info->flow_ht_params);
1233 	if (rc)
1234 		netdev_err(bp->dev, "Error: %s: rhashtable_remove_fast rc=%d",
1235 			   __func__, rc);
1236 
1237 	kfree_rcu(flow_node, rcu);
1238 	return 0;
1239 }
1240 
1241 static void bnxt_tc_set_src_fid(struct bnxt *bp, struct bnxt_tc_flow *flow,
1242 				u16 src_fid)
1243 {
1244 	if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP)
1245 		flow->src_fid = bp->pf.fw_fid;
1246 	else
1247 		flow->src_fid = src_fid;
1248 }
1249 
1250 /* Add a new flow or replace an existing flow.
1251  * Notes on locking:
1252  * There are essentially two critical sections here.
1253  * 1. while adding a new flow
1254  *    a) lookup l2-key
1255  *    b) issue HWRM cmd and get flow_handle
1256  *    c) link l2-key with flow
1257  * 2. while deleting a flow
1258  *    a) unlinking l2-key from flow
1259  * A lock is needed to protect these two critical sections.
1260  *
1261  * The hash-tables are already protected by the rhashtable API.
1262  */
1263 static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid,
1264 			    struct tc_cls_flower_offload *tc_flow_cmd)
1265 {
1266 	struct bnxt_tc_flow_node *new_node, *old_node;
1267 	struct bnxt_tc_info *tc_info = bp->tc_info;
1268 	struct bnxt_tc_flow *flow;
1269 	__le32 tunnel_handle = 0;
1270 	__le16 ref_flow_handle;
1271 	int rc;
1272 
1273 	/* allocate memory for the new flow and it's node */
1274 	new_node = kzalloc(sizeof(*new_node), GFP_KERNEL);
1275 	if (!new_node) {
1276 		rc = -ENOMEM;
1277 		goto done;
1278 	}
1279 	new_node->cookie = tc_flow_cmd->cookie;
1280 	flow = &new_node->flow;
1281 
1282 	rc = bnxt_tc_parse_flow(bp, tc_flow_cmd, flow);
1283 	if (rc)
1284 		goto free_node;
1285 
1286 	bnxt_tc_set_src_fid(bp, flow, src_fid);
1287 
1288 	if (!bnxt_tc_can_offload(bp, flow)) {
1289 		rc = -ENOSPC;
1290 		goto free_node;
1291 	}
1292 
1293 	/* If a flow exists with the same cookie, delete it */
1294 	old_node = rhashtable_lookup_fast(&tc_info->flow_table,
1295 					  &tc_flow_cmd->cookie,
1296 					  tc_info->flow_ht_params);
1297 	if (old_node)
1298 		__bnxt_tc_del_flow(bp, old_node);
1299 
1300 	/* Check if the L2 part of the flow has been offloaded already.
1301 	 * If so, bump up it's refcnt and get it's reference handle.
1302 	 */
1303 	mutex_lock(&tc_info->lock);
1304 	rc = bnxt_tc_get_ref_flow_handle(bp, flow, new_node, &ref_flow_handle);
1305 	if (rc)
1306 		goto unlock;
1307 
1308 	/* If the flow involves tunnel encap/decap, get tunnel_handle */
1309 	rc = bnxt_tc_get_tunnel_handle(bp, flow, new_node, &tunnel_handle);
1310 	if (rc)
1311 		goto put_l2;
1312 
1313 	/* send HWRM cmd to alloc the flow */
1314 	rc = bnxt_hwrm_cfa_flow_alloc(bp, flow, ref_flow_handle,
1315 				      tunnel_handle, &new_node->flow_handle);
1316 	if (rc)
1317 		goto put_tunnel;
1318 
1319 	flow->lastused = jiffies;
1320 	spin_lock_init(&flow->stats_lock);
1321 	/* add new flow to flow-table */
1322 	rc = rhashtable_insert_fast(&tc_info->flow_table, &new_node->node,
1323 				    tc_info->flow_ht_params);
1324 	if (rc)
1325 		goto hwrm_flow_free;
1326 
1327 	mutex_unlock(&tc_info->lock);
1328 	return 0;
1329 
1330 hwrm_flow_free:
1331 	bnxt_hwrm_cfa_flow_free(bp, new_node->flow_handle);
1332 put_tunnel:
1333 	bnxt_tc_put_tunnel_handle(bp, flow, new_node);
1334 put_l2:
1335 	bnxt_tc_put_l2_node(bp, new_node);
1336 unlock:
1337 	mutex_unlock(&tc_info->lock);
1338 free_node:
1339 	kfree_rcu(new_node, rcu);
1340 done:
1341 	netdev_err(bp->dev, "Error: %s: cookie=0x%lx error=%d",
1342 		   __func__, tc_flow_cmd->cookie, rc);
1343 	return rc;
1344 }
1345 
1346 static int bnxt_tc_del_flow(struct bnxt *bp,
1347 			    struct tc_cls_flower_offload *tc_flow_cmd)
1348 {
1349 	struct bnxt_tc_info *tc_info = bp->tc_info;
1350 	struct bnxt_tc_flow_node *flow_node;
1351 
1352 	flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
1353 					   &tc_flow_cmd->cookie,
1354 					   tc_info->flow_ht_params);
1355 	if (!flow_node)
1356 		return -EINVAL;
1357 
1358 	return __bnxt_tc_del_flow(bp, flow_node);
1359 }
1360 
1361 static int bnxt_tc_get_flow_stats(struct bnxt *bp,
1362 				  struct tc_cls_flower_offload *tc_flow_cmd)
1363 {
1364 	struct bnxt_tc_flow_stats stats, *curr_stats, *prev_stats;
1365 	struct bnxt_tc_info *tc_info = bp->tc_info;
1366 	struct bnxt_tc_flow_node *flow_node;
1367 	struct bnxt_tc_flow *flow;
1368 	unsigned long lastused;
1369 
1370 	flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
1371 					   &tc_flow_cmd->cookie,
1372 					   tc_info->flow_ht_params);
1373 	if (!flow_node)
1374 		return -1;
1375 
1376 	flow = &flow_node->flow;
1377 	curr_stats = &flow->stats;
1378 	prev_stats = &flow->prev_stats;
1379 
1380 	spin_lock(&flow->stats_lock);
1381 	stats.packets = curr_stats->packets - prev_stats->packets;
1382 	stats.bytes = curr_stats->bytes - prev_stats->bytes;
1383 	*prev_stats = *curr_stats;
1384 	lastused = flow->lastused;
1385 	spin_unlock(&flow->stats_lock);
1386 
1387 	tcf_exts_stats_update(tc_flow_cmd->exts, stats.bytes, stats.packets,
1388 			      lastused);
1389 	return 0;
1390 }
1391 
1392 static int
1393 bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows,
1394 			     struct bnxt_tc_stats_batch stats_batch[])
1395 {
1396 	struct hwrm_cfa_flow_stats_output *resp = bp->hwrm_cmd_resp_addr;
1397 	struct hwrm_cfa_flow_stats_input req = { 0 };
1398 	__le16 *req_flow_handles = &req.flow_handle_0;
1399 	int rc, i;
1400 
1401 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_STATS, -1, -1);
1402 	req.num_flows = cpu_to_le16(num_flows);
1403 	for (i = 0; i < num_flows; i++) {
1404 		struct bnxt_tc_flow_node *flow_node = stats_batch[i].flow_node;
1405 
1406 		req_flow_handles[i] = flow_node->flow_handle;
1407 	}
1408 
1409 	mutex_lock(&bp->hwrm_cmd_lock);
1410 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1411 	if (!rc) {
1412 		__le64 *resp_packets = &resp->packet_0;
1413 		__le64 *resp_bytes = &resp->byte_0;
1414 
1415 		for (i = 0; i < num_flows; i++) {
1416 			stats_batch[i].hw_stats.packets =
1417 						le64_to_cpu(resp_packets[i]);
1418 			stats_batch[i].hw_stats.bytes =
1419 						le64_to_cpu(resp_bytes[i]);
1420 		}
1421 	} else {
1422 		netdev_info(bp->dev, "error rc=%d", rc);
1423 	}
1424 	mutex_unlock(&bp->hwrm_cmd_lock);
1425 
1426 	if (rc)
1427 		rc = -EIO;
1428 	return rc;
1429 }
1430 
1431 /* Add val to accum while handling a possible wraparound
1432  * of val. Eventhough val is of type u64, its actual width
1433  * is denoted by mask and will wrap-around beyond that width.
1434  */
1435 static void accumulate_val(u64 *accum, u64 val, u64 mask)
1436 {
1437 #define low_bits(x, mask)		((x) & (mask))
1438 #define high_bits(x, mask)		((x) & ~(mask))
1439 	bool wrapped = val < low_bits(*accum, mask);
1440 
1441 	*accum = high_bits(*accum, mask) + val;
1442 	if (wrapped)
1443 		*accum += (mask + 1);
1444 }
1445 
1446 /* The HW counters' width is much less than 64bits.
1447  * Handle possible wrap-around while updating the stat counters
1448  */
1449 static void bnxt_flow_stats_accum(struct bnxt_tc_info *tc_info,
1450 				  struct bnxt_tc_flow_stats *acc_stats,
1451 				  struct bnxt_tc_flow_stats *hw_stats)
1452 {
1453 	accumulate_val(&acc_stats->bytes, hw_stats->bytes, tc_info->bytes_mask);
1454 	accumulate_val(&acc_stats->packets, hw_stats->packets,
1455 		       tc_info->packets_mask);
1456 }
1457 
1458 static int
1459 bnxt_tc_flow_stats_batch_update(struct bnxt *bp, int num_flows,
1460 				struct bnxt_tc_stats_batch stats_batch[])
1461 {
1462 	struct bnxt_tc_info *tc_info = bp->tc_info;
1463 	int rc, i;
1464 
1465 	rc = bnxt_hwrm_cfa_flow_stats_get(bp, num_flows, stats_batch);
1466 	if (rc)
1467 		return rc;
1468 
1469 	for (i = 0; i < num_flows; i++) {
1470 		struct bnxt_tc_flow_node *flow_node = stats_batch[i].flow_node;
1471 		struct bnxt_tc_flow *flow = &flow_node->flow;
1472 
1473 		spin_lock(&flow->stats_lock);
1474 		bnxt_flow_stats_accum(tc_info, &flow->stats,
1475 				      &stats_batch[i].hw_stats);
1476 		if (flow->stats.packets != flow->prev_stats.packets)
1477 			flow->lastused = jiffies;
1478 		spin_unlock(&flow->stats_lock);
1479 	}
1480 
1481 	return 0;
1482 }
1483 
1484 static int
1485 bnxt_tc_flow_stats_batch_prep(struct bnxt *bp,
1486 			      struct bnxt_tc_stats_batch stats_batch[],
1487 			      int *num_flows)
1488 {
1489 	struct bnxt_tc_info *tc_info = bp->tc_info;
1490 	struct rhashtable_iter *iter = &tc_info->iter;
1491 	void *flow_node;
1492 	int rc, i;
1493 
1494 	rhashtable_walk_start(iter);
1495 
1496 	rc = 0;
1497 	for (i = 0; i < BNXT_FLOW_STATS_BATCH_MAX; i++) {
1498 		flow_node = rhashtable_walk_next(iter);
1499 		if (IS_ERR(flow_node)) {
1500 			i = 0;
1501 			if (PTR_ERR(flow_node) == -EAGAIN) {
1502 				continue;
1503 			} else {
1504 				rc = PTR_ERR(flow_node);
1505 				goto done;
1506 			}
1507 		}
1508 
1509 		/* No more flows */
1510 		if (!flow_node)
1511 			goto done;
1512 
1513 		stats_batch[i].flow_node = flow_node;
1514 	}
1515 done:
1516 	rhashtable_walk_stop(iter);
1517 	*num_flows = i;
1518 	return rc;
1519 }
1520 
1521 void bnxt_tc_flow_stats_work(struct bnxt *bp)
1522 {
1523 	struct bnxt_tc_info *tc_info = bp->tc_info;
1524 	int num_flows, rc;
1525 
1526 	num_flows = atomic_read(&tc_info->flow_table.nelems);
1527 	if (!num_flows)
1528 		return;
1529 
1530 	rhashtable_walk_enter(&tc_info->flow_table, &tc_info->iter);
1531 
1532 	for (;;) {
1533 		rc = bnxt_tc_flow_stats_batch_prep(bp, tc_info->stats_batch,
1534 						   &num_flows);
1535 		if (rc) {
1536 			if (rc == -EAGAIN)
1537 				continue;
1538 			break;
1539 		}
1540 
1541 		if (!num_flows)
1542 			break;
1543 
1544 		bnxt_tc_flow_stats_batch_update(bp, num_flows,
1545 						tc_info->stats_batch);
1546 	}
1547 
1548 	rhashtable_walk_exit(&tc_info->iter);
1549 }
1550 
1551 int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid,
1552 			 struct tc_cls_flower_offload *cls_flower)
1553 {
1554 	switch (cls_flower->command) {
1555 	case TC_CLSFLOWER_REPLACE:
1556 		return bnxt_tc_add_flow(bp, src_fid, cls_flower);
1557 	case TC_CLSFLOWER_DESTROY:
1558 		return bnxt_tc_del_flow(bp, cls_flower);
1559 	case TC_CLSFLOWER_STATS:
1560 		return bnxt_tc_get_flow_stats(bp, cls_flower);
1561 	default:
1562 		return -EOPNOTSUPP;
1563 	}
1564 }
1565 
1566 static const struct rhashtable_params bnxt_tc_flow_ht_params = {
1567 	.head_offset = offsetof(struct bnxt_tc_flow_node, node),
1568 	.key_offset = offsetof(struct bnxt_tc_flow_node, cookie),
1569 	.key_len = sizeof(((struct bnxt_tc_flow_node *)0)->cookie),
1570 	.automatic_shrinking = true
1571 };
1572 
1573 static const struct rhashtable_params bnxt_tc_l2_ht_params = {
1574 	.head_offset = offsetof(struct bnxt_tc_l2_node, node),
1575 	.key_offset = offsetof(struct bnxt_tc_l2_node, key),
1576 	.key_len = BNXT_TC_L2_KEY_LEN,
1577 	.automatic_shrinking = true
1578 };
1579 
1580 static const struct rhashtable_params bnxt_tc_decap_l2_ht_params = {
1581 	.head_offset = offsetof(struct bnxt_tc_l2_node, node),
1582 	.key_offset = offsetof(struct bnxt_tc_l2_node, key),
1583 	.key_len = BNXT_TC_L2_KEY_LEN,
1584 	.automatic_shrinking = true
1585 };
1586 
1587 static const struct rhashtable_params bnxt_tc_tunnel_ht_params = {
1588 	.head_offset = offsetof(struct bnxt_tc_tunnel_node, node),
1589 	.key_offset = offsetof(struct bnxt_tc_tunnel_node, key),
1590 	.key_len = sizeof(struct ip_tunnel_key),
1591 	.automatic_shrinking = true
1592 };
1593 
1594 /* convert counter width in bits to a mask */
1595 #define mask(width)		((u64)~0 >> (64 - (width)))
1596 
1597 int bnxt_init_tc(struct bnxt *bp)
1598 {
1599 	struct bnxt_tc_info *tc_info;
1600 	int rc;
1601 
1602 	if (bp->hwrm_spec_code < 0x10803) {
1603 		netdev_warn(bp->dev,
1604 			    "Firmware does not support TC flower offload.\n");
1605 		return -ENOTSUPP;
1606 	}
1607 
1608 	tc_info = kzalloc(sizeof(*tc_info), GFP_KERNEL);
1609 	if (!tc_info)
1610 		return -ENOMEM;
1611 	mutex_init(&tc_info->lock);
1612 
1613 	/* Counter widths are programmed by FW */
1614 	tc_info->bytes_mask = mask(36);
1615 	tc_info->packets_mask = mask(28);
1616 
1617 	tc_info->flow_ht_params = bnxt_tc_flow_ht_params;
1618 	rc = rhashtable_init(&tc_info->flow_table, &tc_info->flow_ht_params);
1619 	if (rc)
1620 		goto free_tc_info;
1621 
1622 	tc_info->l2_ht_params = bnxt_tc_l2_ht_params;
1623 	rc = rhashtable_init(&tc_info->l2_table, &tc_info->l2_ht_params);
1624 	if (rc)
1625 		goto destroy_flow_table;
1626 
1627 	tc_info->decap_l2_ht_params = bnxt_tc_decap_l2_ht_params;
1628 	rc = rhashtable_init(&tc_info->decap_l2_table,
1629 			     &tc_info->decap_l2_ht_params);
1630 	if (rc)
1631 		goto destroy_l2_table;
1632 
1633 	tc_info->decap_ht_params = bnxt_tc_tunnel_ht_params;
1634 	rc = rhashtable_init(&tc_info->decap_table,
1635 			     &tc_info->decap_ht_params);
1636 	if (rc)
1637 		goto destroy_decap_l2_table;
1638 
1639 	tc_info->encap_ht_params = bnxt_tc_tunnel_ht_params;
1640 	rc = rhashtable_init(&tc_info->encap_table,
1641 			     &tc_info->encap_ht_params);
1642 	if (rc)
1643 		goto destroy_decap_table;
1644 
1645 	tc_info->enabled = true;
1646 	bp->dev->hw_features |= NETIF_F_HW_TC;
1647 	bp->dev->features |= NETIF_F_HW_TC;
1648 	bp->tc_info = tc_info;
1649 	return 0;
1650 
1651 destroy_decap_table:
1652 	rhashtable_destroy(&tc_info->decap_table);
1653 destroy_decap_l2_table:
1654 	rhashtable_destroy(&tc_info->decap_l2_table);
1655 destroy_l2_table:
1656 	rhashtable_destroy(&tc_info->l2_table);
1657 destroy_flow_table:
1658 	rhashtable_destroy(&tc_info->flow_table);
1659 free_tc_info:
1660 	kfree(tc_info);
1661 	return rc;
1662 }
1663 
1664 void bnxt_shutdown_tc(struct bnxt *bp)
1665 {
1666 	struct bnxt_tc_info *tc_info = bp->tc_info;
1667 
1668 	if (!bnxt_tc_flower_enabled(bp))
1669 		return;
1670 
1671 	rhashtable_destroy(&tc_info->flow_table);
1672 	rhashtable_destroy(&tc_info->l2_table);
1673 	rhashtable_destroy(&tc_info->decap_l2_table);
1674 	rhashtable_destroy(&tc_info->decap_table);
1675 	rhashtable_destroy(&tc_info->encap_table);
1676 	kfree(tc_info);
1677 	bp->tc_info = NULL;
1678 }
1679