1 /* Broadcom NetXtreme-C/E network driver.
2  *
3  * Copyright (c) 2017 Broadcom Limited
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  */
9 
10 #include <linux/netdevice.h>
11 #include <linux/inetdevice.h>
12 #include <linux/if_vlan.h>
13 #include <net/flow_dissector.h>
14 #include <net/pkt_cls.h>
15 #include <net/tc_act/tc_gact.h>
16 #include <net/tc_act/tc_skbedit.h>
17 #include <net/tc_act/tc_mirred.h>
18 #include <net/tc_act/tc_vlan.h>
19 #include <net/tc_act/tc_tunnel_key.h>
20 
21 #include "bnxt_hsi.h"
22 #include "bnxt.h"
23 #include "bnxt_sriov.h"
24 #include "bnxt_tc.h"
25 #include "bnxt_vfr.h"
26 
27 #define BNXT_FID_INVALID			0xffff
28 #define VLAN_TCI(vid, prio)	((vid) | ((prio) << VLAN_PRIO_SHIFT))
29 
30 /* Return the dst fid of the func for flow forwarding
31  * For PFs: src_fid is the fid of the PF
32  * For VF-reps: src_fid the fid of the VF
33  */
34 static u16 bnxt_flow_get_dst_fid(struct bnxt *pf_bp, struct net_device *dev)
35 {
36 	struct bnxt *bp;
37 
38 	/* check if dev belongs to the same switch */
39 	if (!switchdev_port_same_parent_id(pf_bp->dev, dev)) {
40 		netdev_info(pf_bp->dev, "dev(ifindex=%d) not on same switch",
41 			    dev->ifindex);
42 		return BNXT_FID_INVALID;
43 	}
44 
45 	/* Is dev a VF-rep? */
46 	if (bnxt_dev_is_vf_rep(dev))
47 		return bnxt_vf_rep_get_fid(dev);
48 
49 	bp = netdev_priv(dev);
50 	return bp->pf.fw_fid;
51 }
52 
53 static int bnxt_tc_parse_redir(struct bnxt *bp,
54 			       struct bnxt_tc_actions *actions,
55 			       const struct tc_action *tc_act)
56 {
57 	struct net_device *dev = tcf_mirred_dev(tc_act);
58 
59 	if (!dev) {
60 		netdev_info(bp->dev, "no dev in mirred action");
61 		return -EINVAL;
62 	}
63 
64 	actions->flags |= BNXT_TC_ACTION_FLAG_FWD;
65 	actions->dst_dev = dev;
66 	return 0;
67 }
68 
69 static void bnxt_tc_parse_vlan(struct bnxt *bp,
70 			       struct bnxt_tc_actions *actions,
71 			       const struct tc_action *tc_act)
72 {
73 	if (tcf_vlan_action(tc_act) == TCA_VLAN_ACT_POP) {
74 		actions->flags |= BNXT_TC_ACTION_FLAG_POP_VLAN;
75 	} else if (tcf_vlan_action(tc_act) == TCA_VLAN_ACT_PUSH) {
76 		actions->flags |= BNXT_TC_ACTION_FLAG_PUSH_VLAN;
77 		actions->push_vlan_tci = htons(tcf_vlan_push_vid(tc_act));
78 		actions->push_vlan_tpid = tcf_vlan_push_proto(tc_act);
79 	}
80 }
81 
82 static int bnxt_tc_parse_tunnel_set(struct bnxt *bp,
83 				    struct bnxt_tc_actions *actions,
84 				    const struct tc_action *tc_act)
85 {
86 	struct ip_tunnel_info *tun_info = tcf_tunnel_info(tc_act);
87 	struct ip_tunnel_key *tun_key = &tun_info->key;
88 
89 	if (ip_tunnel_info_af(tun_info) != AF_INET) {
90 		netdev_info(bp->dev, "only IPv4 tunnel-encap is supported");
91 		return -EOPNOTSUPP;
92 	}
93 
94 	actions->tun_encap_key = *tun_key;
95 	actions->flags |= BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP;
96 	return 0;
97 }
98 
99 static int bnxt_tc_parse_actions(struct bnxt *bp,
100 				 struct bnxt_tc_actions *actions,
101 				 struct tcf_exts *tc_exts)
102 {
103 	const struct tc_action *tc_act;
104 	LIST_HEAD(tc_actions);
105 	int rc;
106 
107 	if (!tcf_exts_has_actions(tc_exts)) {
108 		netdev_info(bp->dev, "no actions");
109 		return -EINVAL;
110 	}
111 
112 	tcf_exts_to_list(tc_exts, &tc_actions);
113 	list_for_each_entry(tc_act, &tc_actions, list) {
114 		/* Drop action */
115 		if (is_tcf_gact_shot(tc_act)) {
116 			actions->flags |= BNXT_TC_ACTION_FLAG_DROP;
117 			return 0; /* don't bother with other actions */
118 		}
119 
120 		/* Redirect action */
121 		if (is_tcf_mirred_egress_redirect(tc_act)) {
122 			rc = bnxt_tc_parse_redir(bp, actions, tc_act);
123 			if (rc)
124 				return rc;
125 			continue;
126 		}
127 
128 		/* Push/pop VLAN */
129 		if (is_tcf_vlan(tc_act)) {
130 			bnxt_tc_parse_vlan(bp, actions, tc_act);
131 			continue;
132 		}
133 
134 		/* Tunnel encap */
135 		if (is_tcf_tunnel_set(tc_act)) {
136 			rc = bnxt_tc_parse_tunnel_set(bp, actions, tc_act);
137 			if (rc)
138 				return rc;
139 			continue;
140 		}
141 
142 		/* Tunnel decap */
143 		if (is_tcf_tunnel_release(tc_act)) {
144 			actions->flags |= BNXT_TC_ACTION_FLAG_TUNNEL_DECAP;
145 			continue;
146 		}
147 	}
148 
149 	if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) {
150 		if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) {
151 			/* dst_fid is PF's fid */
152 			actions->dst_fid = bp->pf.fw_fid;
153 		} else {
154 			/* find the FID from dst_dev */
155 			actions->dst_fid =
156 				bnxt_flow_get_dst_fid(bp, actions->dst_dev);
157 			if (actions->dst_fid == BNXT_FID_INVALID)
158 				return -EINVAL;
159 		}
160 	}
161 
162 	return 0;
163 }
164 
165 #define GET_KEY(flow_cmd, key_type)					\
166 		skb_flow_dissector_target((flow_cmd)->dissector, key_type,\
167 					  (flow_cmd)->key)
168 #define GET_MASK(flow_cmd, key_type)					\
169 		skb_flow_dissector_target((flow_cmd)->dissector, key_type,\
170 					  (flow_cmd)->mask)
171 
172 static int bnxt_tc_parse_flow(struct bnxt *bp,
173 			      struct tc_cls_flower_offload *tc_flow_cmd,
174 			      struct bnxt_tc_flow *flow)
175 {
176 	struct flow_dissector *dissector = tc_flow_cmd->dissector;
177 	u16 addr_type = 0;
178 
179 	/* KEY_CONTROL and KEY_BASIC are needed for forming a meaningful key */
180 	if ((dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) == 0 ||
181 	    (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_BASIC)) == 0) {
182 		netdev_info(bp->dev, "cannot form TC key: used_keys = 0x%x",
183 			    dissector->used_keys);
184 		return -EOPNOTSUPP;
185 	}
186 
187 	if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
188 		struct flow_dissector_key_control *key =
189 			GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_CONTROL);
190 
191 		addr_type = key->addr_type;
192 	}
193 
194 	if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_BASIC)) {
195 		struct flow_dissector_key_basic *key =
196 			GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_BASIC);
197 		struct flow_dissector_key_basic *mask =
198 			GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_BASIC);
199 
200 		flow->l2_key.ether_type = key->n_proto;
201 		flow->l2_mask.ether_type = mask->n_proto;
202 
203 		if (key->n_proto == htons(ETH_P_IP) ||
204 		    key->n_proto == htons(ETH_P_IPV6)) {
205 			flow->l4_key.ip_proto = key->ip_proto;
206 			flow->l4_mask.ip_proto = mask->ip_proto;
207 		}
208 	}
209 
210 	if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
211 		struct flow_dissector_key_eth_addrs *key =
212 			GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ETH_ADDRS);
213 		struct flow_dissector_key_eth_addrs *mask =
214 			GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ETH_ADDRS);
215 
216 		flow->flags |= BNXT_TC_FLOW_FLAGS_ETH_ADDRS;
217 		ether_addr_copy(flow->l2_key.dmac, key->dst);
218 		ether_addr_copy(flow->l2_mask.dmac, mask->dst);
219 		ether_addr_copy(flow->l2_key.smac, key->src);
220 		ether_addr_copy(flow->l2_mask.smac, mask->src);
221 	}
222 
223 	if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_VLAN)) {
224 		struct flow_dissector_key_vlan *key =
225 			GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_VLAN);
226 		struct flow_dissector_key_vlan *mask =
227 			GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_VLAN);
228 
229 		flow->l2_key.inner_vlan_tci =
230 		   cpu_to_be16(VLAN_TCI(key->vlan_id, key->vlan_priority));
231 		flow->l2_mask.inner_vlan_tci =
232 		   cpu_to_be16((VLAN_TCI(mask->vlan_id, mask->vlan_priority)));
233 		flow->l2_key.inner_vlan_tpid = htons(ETH_P_8021Q);
234 		flow->l2_mask.inner_vlan_tpid = htons(0xffff);
235 		flow->l2_key.num_vlans = 1;
236 	}
237 
238 	if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
239 		struct flow_dissector_key_ipv4_addrs *key =
240 			GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
241 		struct flow_dissector_key_ipv4_addrs *mask =
242 			GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
243 
244 		flow->flags |= BNXT_TC_FLOW_FLAGS_IPV4_ADDRS;
245 		flow->l3_key.ipv4.daddr.s_addr = key->dst;
246 		flow->l3_mask.ipv4.daddr.s_addr = mask->dst;
247 		flow->l3_key.ipv4.saddr.s_addr = key->src;
248 		flow->l3_mask.ipv4.saddr.s_addr = mask->src;
249 	} else if (dissector_uses_key(dissector,
250 				      FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
251 		struct flow_dissector_key_ipv6_addrs *key =
252 			GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
253 		struct flow_dissector_key_ipv6_addrs *mask =
254 			GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
255 
256 		flow->flags |= BNXT_TC_FLOW_FLAGS_IPV6_ADDRS;
257 		flow->l3_key.ipv6.daddr = key->dst;
258 		flow->l3_mask.ipv6.daddr = mask->dst;
259 		flow->l3_key.ipv6.saddr = key->src;
260 		flow->l3_mask.ipv6.saddr = mask->src;
261 	}
262 
263 	if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_PORTS)) {
264 		struct flow_dissector_key_ports *key =
265 			GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_PORTS);
266 		struct flow_dissector_key_ports *mask =
267 			GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_PORTS);
268 
269 		flow->flags |= BNXT_TC_FLOW_FLAGS_PORTS;
270 		flow->l4_key.ports.dport = key->dst;
271 		flow->l4_mask.ports.dport = mask->dst;
272 		flow->l4_key.ports.sport = key->src;
273 		flow->l4_mask.ports.sport = mask->src;
274 	}
275 
276 	if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ICMP)) {
277 		struct flow_dissector_key_icmp *key =
278 			GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ICMP);
279 		struct flow_dissector_key_icmp *mask =
280 			GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ICMP);
281 
282 		flow->flags |= BNXT_TC_FLOW_FLAGS_ICMP;
283 		flow->l4_key.icmp.type = key->type;
284 		flow->l4_key.icmp.code = key->code;
285 		flow->l4_mask.icmp.type = mask->type;
286 		flow->l4_mask.icmp.code = mask->code;
287 	}
288 
289 	if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
290 		struct flow_dissector_key_control *key =
291 			GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_CONTROL);
292 
293 		addr_type = key->addr_type;
294 	}
295 
296 	if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
297 		struct flow_dissector_key_ipv4_addrs *key =
298 			GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS);
299 		struct flow_dissector_key_ipv4_addrs *mask =
300 				GET_MASK(tc_flow_cmd,
301 					 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS);
302 
303 		flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS;
304 		flow->tun_key.u.ipv4.dst = key->dst;
305 		flow->tun_mask.u.ipv4.dst = mask->dst;
306 		flow->tun_key.u.ipv4.src = key->src;
307 		flow->tun_mask.u.ipv4.src = mask->src;
308 	} else if (dissector_uses_key(dissector,
309 				      FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) {
310 		return -EOPNOTSUPP;
311 	}
312 
313 	if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
314 		struct flow_dissector_key_keyid *key =
315 			GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_KEYID);
316 		struct flow_dissector_key_keyid *mask =
317 			GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_KEYID);
318 
319 		flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_ID;
320 		flow->tun_key.tun_id = key32_to_tunnel_id(key->keyid);
321 		flow->tun_mask.tun_id = key32_to_tunnel_id(mask->keyid);
322 	}
323 
324 	if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
325 		struct flow_dissector_key_ports *key =
326 			GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_PORTS);
327 		struct flow_dissector_key_ports *mask =
328 			GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_PORTS);
329 
330 		flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_PORTS;
331 		flow->tun_key.tp_dst = key->dst;
332 		flow->tun_mask.tp_dst = mask->dst;
333 		flow->tun_key.tp_src = key->src;
334 		flow->tun_mask.tp_src = mask->src;
335 	}
336 
337 	return bnxt_tc_parse_actions(bp, &flow->actions, tc_flow_cmd->exts);
338 }
339 
340 static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp, __le16 flow_handle)
341 {
342 	struct hwrm_cfa_flow_free_input req = { 0 };
343 	int rc;
344 
345 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_FREE, -1, -1);
346 	req.flow_handle = flow_handle;
347 
348 	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
349 	if (rc)
350 		netdev_info(bp->dev, "Error: %s: flow_handle=0x%x rc=%d",
351 			    __func__, flow_handle, rc);
352 
353 	if (rc)
354 		rc = -EIO;
355 	return rc;
356 }
357 
358 static int ipv6_mask_len(struct in6_addr *mask)
359 {
360 	int mask_len = 0, i;
361 
362 	for (i = 0; i < 4; i++)
363 		mask_len += inet_mask_len(mask->s6_addr32[i]);
364 
365 	return mask_len;
366 }
367 
368 static bool is_wildcard(void *mask, int len)
369 {
370 	const u8 *p = mask;
371 	int i;
372 
373 	for (i = 0; i < len; i++) {
374 		if (p[i] != 0)
375 			return false;
376 	}
377 	return true;
378 }
379 
380 static bool is_exactmatch(void *mask, int len)
381 {
382 	const u8 *p = mask;
383 	int i;
384 
385 	for (i = 0; i < len; i++)
386 		if (p[i] != 0xff)
387 			return false;
388 
389 	return true;
390 }
391 
392 static bool bits_set(void *key, int len)
393 {
394 	const u8 *p = key;
395 	int i;
396 
397 	for (i = 0; i < len; i++)
398 		if (p[i] != 0)
399 			return true;
400 
401 	return false;
402 }
403 
404 static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
405 				    __le16 ref_flow_handle,
406 				    __le32 tunnel_handle, __le16 *flow_handle)
407 {
408 	struct hwrm_cfa_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
409 	struct bnxt_tc_actions *actions = &flow->actions;
410 	struct bnxt_tc_l3_key *l3_mask = &flow->l3_mask;
411 	struct bnxt_tc_l3_key *l3_key = &flow->l3_key;
412 	struct hwrm_cfa_flow_alloc_input req = { 0 };
413 	u16 flow_flags = 0, action_flags = 0;
414 	int rc;
415 
416 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_ALLOC, -1, -1);
417 
418 	req.src_fid = cpu_to_le16(flow->src_fid);
419 	req.ref_flow_handle = ref_flow_handle;
420 
421 	if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP ||
422 	    actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) {
423 		req.tunnel_handle = tunnel_handle;
424 		flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL;
425 		action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL;
426 	}
427 
428 	req.ethertype = flow->l2_key.ether_type;
429 	req.ip_proto = flow->l4_key.ip_proto;
430 
431 	if (flow->flags & BNXT_TC_FLOW_FLAGS_ETH_ADDRS) {
432 		memcpy(req.dmac, flow->l2_key.dmac, ETH_ALEN);
433 		memcpy(req.smac, flow->l2_key.smac, ETH_ALEN);
434 	}
435 
436 	if (flow->l2_key.num_vlans > 0) {
437 		flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_ONE;
438 		/* FW expects the inner_vlan_tci value to be set
439 		 * in outer_vlan_tci when num_vlans is 1 (which is
440 		 * always the case in TC.)
441 		 */
442 		req.outer_vlan_tci = flow->l2_key.inner_vlan_tci;
443 	}
444 
445 	/* If all IP and L4 fields are wildcarded then this is an L2 flow */
446 	if (is_wildcard(l3_mask, sizeof(*l3_mask)) &&
447 	    is_wildcard(&flow->l4_mask, sizeof(flow->l4_mask))) {
448 		flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2;
449 	} else {
450 		flow_flags |= flow->l2_key.ether_type == htons(ETH_P_IP) ?
451 				CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV4 :
452 				CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6;
453 
454 		if (flow->flags & BNXT_TC_FLOW_FLAGS_IPV4_ADDRS) {
455 			req.ip_dst[0] = l3_key->ipv4.daddr.s_addr;
456 			req.ip_dst_mask_len =
457 				inet_mask_len(l3_mask->ipv4.daddr.s_addr);
458 			req.ip_src[0] = l3_key->ipv4.saddr.s_addr;
459 			req.ip_src_mask_len =
460 				inet_mask_len(l3_mask->ipv4.saddr.s_addr);
461 		} else if (flow->flags & BNXT_TC_FLOW_FLAGS_IPV6_ADDRS) {
462 			memcpy(req.ip_dst, l3_key->ipv6.daddr.s6_addr32,
463 			       sizeof(req.ip_dst));
464 			req.ip_dst_mask_len =
465 					ipv6_mask_len(&l3_mask->ipv6.daddr);
466 			memcpy(req.ip_src, l3_key->ipv6.saddr.s6_addr32,
467 			       sizeof(req.ip_src));
468 			req.ip_src_mask_len =
469 					ipv6_mask_len(&l3_mask->ipv6.saddr);
470 		}
471 	}
472 
473 	if (flow->flags & BNXT_TC_FLOW_FLAGS_PORTS) {
474 		req.l4_src_port = flow->l4_key.ports.sport;
475 		req.l4_src_port_mask = flow->l4_mask.ports.sport;
476 		req.l4_dst_port = flow->l4_key.ports.dport;
477 		req.l4_dst_port_mask = flow->l4_mask.ports.dport;
478 	} else if (flow->flags & BNXT_TC_FLOW_FLAGS_ICMP) {
479 		/* l4 ports serve as type/code when ip_proto is ICMP */
480 		req.l4_src_port = htons(flow->l4_key.icmp.type);
481 		req.l4_src_port_mask = htons(flow->l4_mask.icmp.type);
482 		req.l4_dst_port = htons(flow->l4_key.icmp.code);
483 		req.l4_dst_port_mask = htons(flow->l4_mask.icmp.code);
484 	}
485 	req.flags = cpu_to_le16(flow_flags);
486 
487 	if (actions->flags & BNXT_TC_ACTION_FLAG_DROP) {
488 		action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP;
489 	} else {
490 		if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) {
491 			action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD;
492 			req.dst_fid = cpu_to_le16(actions->dst_fid);
493 		}
494 		if (actions->flags & BNXT_TC_ACTION_FLAG_PUSH_VLAN) {
495 			action_flags |=
496 			    CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE;
497 			req.l2_rewrite_vlan_tpid = actions->push_vlan_tpid;
498 			req.l2_rewrite_vlan_tci = actions->push_vlan_tci;
499 			memcpy(&req.l2_rewrite_dmac, &req.dmac, ETH_ALEN);
500 			memcpy(&req.l2_rewrite_smac, &req.smac, ETH_ALEN);
501 		}
502 		if (actions->flags & BNXT_TC_ACTION_FLAG_POP_VLAN) {
503 			action_flags |=
504 			    CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE;
505 			/* Rewrite config with tpid = 0 implies vlan pop */
506 			req.l2_rewrite_vlan_tpid = 0;
507 			memcpy(&req.l2_rewrite_dmac, &req.dmac, ETH_ALEN);
508 			memcpy(&req.l2_rewrite_smac, &req.smac, ETH_ALEN);
509 		}
510 	}
511 	req.action_flags = cpu_to_le16(action_flags);
512 
513 	mutex_lock(&bp->hwrm_cmd_lock);
514 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
515 	if (!rc)
516 		*flow_handle = resp->flow_handle;
517 	mutex_unlock(&bp->hwrm_cmd_lock);
518 
519 	if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR)
520 		rc = -ENOSPC;
521 	else if (rc)
522 		rc = -EIO;
523 	return rc;
524 }
525 
526 static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp,
527 				       struct bnxt_tc_flow *flow,
528 				       struct bnxt_tc_l2_key *l2_info,
529 				       __le32 ref_decap_handle,
530 				       __le32 *decap_filter_handle)
531 {
532 	struct hwrm_cfa_decap_filter_alloc_output *resp =
533 						bp->hwrm_cmd_resp_addr;
534 	struct hwrm_cfa_decap_filter_alloc_input req = { 0 };
535 	struct ip_tunnel_key *tun_key = &flow->tun_key;
536 	u32 enables = 0;
537 	int rc;
538 
539 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_DECAP_FILTER_ALLOC, -1, -1);
540 
541 	req.flags = cpu_to_le32(CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL);
542 	enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE |
543 		   CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL;
544 	req.tunnel_type = CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
545 	req.ip_protocol = CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP;
546 
547 	if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ID) {
548 		enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_ID;
549 		/* tunnel_id is wrongly defined in hsi defn. as __le32 */
550 		req.tunnel_id = tunnel_id_to_key32(tun_key->tun_id);
551 	}
552 
553 	if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS) {
554 		enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR;
555 		ether_addr_copy(req.dst_macaddr, l2_info->dmac);
556 	}
557 	if (l2_info->num_vlans) {
558 		enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID;
559 		req.t_ivlan_vid = l2_info->inner_vlan_tci;
560 	}
561 
562 	enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE;
563 	req.ethertype = htons(ETH_P_IP);
564 
565 	if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS) {
566 		enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR |
567 			   CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR |
568 			   CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE;
569 		req.ip_addr_type = CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
570 		req.dst_ipaddr[0] = tun_key->u.ipv4.dst;
571 		req.src_ipaddr[0] = tun_key->u.ipv4.src;
572 	}
573 
574 	if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_PORTS) {
575 		enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_PORT;
576 		req.dst_port = tun_key->tp_dst;
577 	}
578 
579 	/* Eventhough the decap_handle returned by hwrm_cfa_decap_filter_alloc
580 	 * is defined as __le32, l2_ctxt_ref_id is defined in HSI as __le16.
581 	 */
582 	req.l2_ctxt_ref_id = (__force __le16)ref_decap_handle;
583 	req.enables = cpu_to_le32(enables);
584 
585 	mutex_lock(&bp->hwrm_cmd_lock);
586 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
587 	if (!rc)
588 		*decap_filter_handle = resp->decap_filter_id;
589 	else
590 		netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
591 	mutex_unlock(&bp->hwrm_cmd_lock);
592 
593 	if (rc)
594 		rc = -EIO;
595 	return rc;
596 }
597 
598 static int hwrm_cfa_decap_filter_free(struct bnxt *bp,
599 				      __le32 decap_filter_handle)
600 {
601 	struct hwrm_cfa_decap_filter_free_input req = { 0 };
602 	int rc;
603 
604 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_DECAP_FILTER_FREE, -1, -1);
605 	req.decap_filter_id = decap_filter_handle;
606 
607 	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
608 	if (rc)
609 		netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
610 
611 	if (rc)
612 		rc = -EIO;
613 	return rc;
614 }
615 
616 static int hwrm_cfa_encap_record_alloc(struct bnxt *bp,
617 				       struct ip_tunnel_key *encap_key,
618 				       struct bnxt_tc_l2_key *l2_info,
619 				       __le32 *encap_record_handle)
620 {
621 	struct hwrm_cfa_encap_record_alloc_output *resp =
622 						bp->hwrm_cmd_resp_addr;
623 	struct hwrm_cfa_encap_record_alloc_input req = { 0 };
624 	struct hwrm_cfa_encap_data_vxlan *encap =
625 			(struct hwrm_cfa_encap_data_vxlan *)&req.encap_data;
626 	struct hwrm_vxlan_ipv4_hdr *encap_ipv4 =
627 				(struct hwrm_vxlan_ipv4_hdr *)encap->l3;
628 	int rc;
629 
630 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ENCAP_RECORD_ALLOC, -1, -1);
631 
632 	req.encap_type = CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN;
633 
634 	ether_addr_copy(encap->dst_mac_addr, l2_info->dmac);
635 	ether_addr_copy(encap->src_mac_addr, l2_info->smac);
636 	if (l2_info->num_vlans) {
637 		encap->num_vlan_tags = l2_info->num_vlans;
638 		encap->ovlan_tci = l2_info->inner_vlan_tci;
639 		encap->ovlan_tpid = l2_info->inner_vlan_tpid;
640 	}
641 
642 	encap_ipv4->ver_hlen = 4 << VXLAN_IPV4_HDR_VER_HLEN_VERSION_SFT;
643 	encap_ipv4->ver_hlen |= 5 << VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_SFT;
644 	encap_ipv4->ttl = encap_key->ttl;
645 
646 	encap_ipv4->dest_ip_addr = encap_key->u.ipv4.dst;
647 	encap_ipv4->src_ip_addr = encap_key->u.ipv4.src;
648 	encap_ipv4->protocol = IPPROTO_UDP;
649 
650 	encap->dst_port = encap_key->tp_dst;
651 	encap->vni = tunnel_id_to_key32(encap_key->tun_id);
652 
653 	mutex_lock(&bp->hwrm_cmd_lock);
654 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
655 	if (!rc)
656 		*encap_record_handle = resp->encap_record_id;
657 	else
658 		netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
659 	mutex_unlock(&bp->hwrm_cmd_lock);
660 
661 	if (rc)
662 		rc = -EIO;
663 	return rc;
664 }
665 
666 static int hwrm_cfa_encap_record_free(struct bnxt *bp,
667 				      __le32 encap_record_handle)
668 {
669 	struct hwrm_cfa_encap_record_free_input req = { 0 };
670 	int rc;
671 
672 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ENCAP_RECORD_FREE, -1, -1);
673 	req.encap_record_id = encap_record_handle;
674 
675 	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
676 	if (rc)
677 		netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
678 
679 	if (rc)
680 		rc = -EIO;
681 	return rc;
682 }
683 
684 static int bnxt_tc_put_l2_node(struct bnxt *bp,
685 			       struct bnxt_tc_flow_node *flow_node)
686 {
687 	struct bnxt_tc_l2_node *l2_node = flow_node->l2_node;
688 	struct bnxt_tc_info *tc_info = bp->tc_info;
689 	int rc;
690 
691 	/* remove flow_node from the L2 shared flow list */
692 	list_del(&flow_node->l2_list_node);
693 	if (--l2_node->refcount == 0) {
694 		rc =  rhashtable_remove_fast(&tc_info->l2_table, &l2_node->node,
695 					     tc_info->l2_ht_params);
696 		if (rc)
697 			netdev_err(bp->dev,
698 				   "Error: %s: rhashtable_remove_fast: %d",
699 				   __func__, rc);
700 		kfree_rcu(l2_node, rcu);
701 	}
702 	return 0;
703 }
704 
705 static struct bnxt_tc_l2_node *
706 bnxt_tc_get_l2_node(struct bnxt *bp, struct rhashtable *l2_table,
707 		    struct rhashtable_params ht_params,
708 		    struct bnxt_tc_l2_key *l2_key)
709 {
710 	struct bnxt_tc_l2_node *l2_node;
711 	int rc;
712 
713 	l2_node = rhashtable_lookup_fast(l2_table, l2_key, ht_params);
714 	if (!l2_node) {
715 		l2_node = kzalloc(sizeof(*l2_node), GFP_KERNEL);
716 		if (!l2_node) {
717 			rc = -ENOMEM;
718 			return NULL;
719 		}
720 
721 		l2_node->key = *l2_key;
722 		rc = rhashtable_insert_fast(l2_table, &l2_node->node,
723 					    ht_params);
724 		if (rc) {
725 			kfree_rcu(l2_node, rcu);
726 			netdev_err(bp->dev,
727 				   "Error: %s: rhashtable_insert_fast: %d",
728 				   __func__, rc);
729 			return NULL;
730 		}
731 		INIT_LIST_HEAD(&l2_node->common_l2_flows);
732 	}
733 	return l2_node;
734 }
735 
736 /* Get the ref_flow_handle for a flow by checking if there are any other
737  * flows that share the same L2 key as this flow.
738  */
739 static int
740 bnxt_tc_get_ref_flow_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
741 			    struct bnxt_tc_flow_node *flow_node,
742 			    __le16 *ref_flow_handle)
743 {
744 	struct bnxt_tc_info *tc_info = bp->tc_info;
745 	struct bnxt_tc_flow_node *ref_flow_node;
746 	struct bnxt_tc_l2_node *l2_node;
747 
748 	l2_node = bnxt_tc_get_l2_node(bp, &tc_info->l2_table,
749 				      tc_info->l2_ht_params,
750 				      &flow->l2_key);
751 	if (!l2_node)
752 		return -1;
753 
754 	/* If any other flow is using this l2_node, use it's flow_handle
755 	 * as the ref_flow_handle
756 	 */
757 	if (l2_node->refcount > 0) {
758 		ref_flow_node = list_first_entry(&l2_node->common_l2_flows,
759 						 struct bnxt_tc_flow_node,
760 						 l2_list_node);
761 		*ref_flow_handle = ref_flow_node->flow_handle;
762 	} else {
763 		*ref_flow_handle = cpu_to_le16(0xffff);
764 	}
765 
766 	/* Insert the l2_node into the flow_node so that subsequent flows
767 	 * with a matching l2 key can use the flow_handle of this flow
768 	 * as their ref_flow_handle
769 	 */
770 	flow_node->l2_node = l2_node;
771 	list_add(&flow_node->l2_list_node, &l2_node->common_l2_flows);
772 	l2_node->refcount++;
773 	return 0;
774 }
775 
776 /* After the flow parsing is done, this routine is used for checking
777  * if there are any aspects of the flow that prevent it from being
778  * offloaded.
779  */
780 static bool bnxt_tc_can_offload(struct bnxt *bp, struct bnxt_tc_flow *flow)
781 {
782 	/* If L4 ports are specified then ip_proto must be TCP or UDP */
783 	if ((flow->flags & BNXT_TC_FLOW_FLAGS_PORTS) &&
784 	    (flow->l4_key.ip_proto != IPPROTO_TCP &&
785 	     flow->l4_key.ip_proto != IPPROTO_UDP)) {
786 		netdev_info(bp->dev, "Cannot offload non-TCP/UDP (%d) ports",
787 			    flow->l4_key.ip_proto);
788 		return false;
789 	}
790 
791 	/* Currently source/dest MAC cannot be partial wildcard  */
792 	if (bits_set(&flow->l2_key.smac, sizeof(flow->l2_key.smac)) &&
793 	    !is_exactmatch(flow->l2_mask.smac, sizeof(flow->l2_mask.smac))) {
794 		netdev_info(bp->dev, "Wildcard match unsupported for Source MAC\n");
795 		return false;
796 	}
797 	if (bits_set(&flow->l2_key.dmac, sizeof(flow->l2_key.dmac)) &&
798 	    !is_exactmatch(&flow->l2_mask.dmac, sizeof(flow->l2_mask.dmac))) {
799 		netdev_info(bp->dev, "Wildcard match unsupported for Dest MAC\n");
800 		return false;
801 	}
802 
803 	/* Currently VLAN fields cannot be partial wildcard */
804 	if (bits_set(&flow->l2_key.inner_vlan_tci,
805 		     sizeof(flow->l2_key.inner_vlan_tci)) &&
806 	    !is_exactmatch(&flow->l2_mask.inner_vlan_tci,
807 			   sizeof(flow->l2_mask.inner_vlan_tci))) {
808 		netdev_info(bp->dev, "Wildcard match unsupported for VLAN TCI\n");
809 		return false;
810 	}
811 	if (bits_set(&flow->l2_key.inner_vlan_tpid,
812 		     sizeof(flow->l2_key.inner_vlan_tpid)) &&
813 	    !is_exactmatch(&flow->l2_mask.inner_vlan_tpid,
814 			   sizeof(flow->l2_mask.inner_vlan_tpid))) {
815 		netdev_info(bp->dev, "Wildcard match unsupported for VLAN TPID\n");
816 		return false;
817 	}
818 
819 	/* Currently Ethertype must be set */
820 	if (!is_exactmatch(&flow->l2_mask.ether_type,
821 			   sizeof(flow->l2_mask.ether_type))) {
822 		netdev_info(bp->dev, "Wildcard match unsupported for Ethertype\n");
823 		return false;
824 	}
825 
826 	return true;
827 }
828 
829 /* Returns the final refcount of the node on success
830  * or a -ve error code on failure
831  */
832 static int bnxt_tc_put_tunnel_node(struct bnxt *bp,
833 				   struct rhashtable *tunnel_table,
834 				   struct rhashtable_params *ht_params,
835 				   struct bnxt_tc_tunnel_node *tunnel_node)
836 {
837 	int rc;
838 
839 	if (--tunnel_node->refcount == 0) {
840 		rc =  rhashtable_remove_fast(tunnel_table, &tunnel_node->node,
841 					     *ht_params);
842 		if (rc) {
843 			netdev_err(bp->dev, "rhashtable_remove_fast rc=%d", rc);
844 			rc = -1;
845 		}
846 		kfree_rcu(tunnel_node, rcu);
847 		return rc;
848 	} else {
849 		return tunnel_node->refcount;
850 	}
851 }
852 
853 /* Get (or add) either encap or decap tunnel node from/to the supplied
854  * hash table.
855  */
856 static struct bnxt_tc_tunnel_node *
857 bnxt_tc_get_tunnel_node(struct bnxt *bp, struct rhashtable *tunnel_table,
858 			struct rhashtable_params *ht_params,
859 			struct ip_tunnel_key *tun_key)
860 {
861 	struct bnxt_tc_tunnel_node *tunnel_node;
862 	int rc;
863 
864 	tunnel_node = rhashtable_lookup_fast(tunnel_table, tun_key, *ht_params);
865 	if (!tunnel_node) {
866 		tunnel_node = kzalloc(sizeof(*tunnel_node), GFP_KERNEL);
867 		if (!tunnel_node) {
868 			rc = -ENOMEM;
869 			goto err;
870 		}
871 
872 		tunnel_node->key = *tun_key;
873 		tunnel_node->tunnel_handle = INVALID_TUNNEL_HANDLE;
874 		rc = rhashtable_insert_fast(tunnel_table, &tunnel_node->node,
875 					    *ht_params);
876 		if (rc) {
877 			kfree_rcu(tunnel_node, rcu);
878 			goto err;
879 		}
880 	}
881 	tunnel_node->refcount++;
882 	return tunnel_node;
883 err:
884 	netdev_info(bp->dev, "error rc=%d", rc);
885 	return NULL;
886 }
887 
888 static int bnxt_tc_get_ref_decap_handle(struct bnxt *bp,
889 					struct bnxt_tc_flow *flow,
890 					struct bnxt_tc_l2_key *l2_key,
891 					struct bnxt_tc_flow_node *flow_node,
892 					__le32 *ref_decap_handle)
893 {
894 	struct bnxt_tc_info *tc_info = bp->tc_info;
895 	struct bnxt_tc_flow_node *ref_flow_node;
896 	struct bnxt_tc_l2_node *decap_l2_node;
897 
898 	decap_l2_node = bnxt_tc_get_l2_node(bp, &tc_info->decap_l2_table,
899 					    tc_info->decap_l2_ht_params,
900 					    l2_key);
901 	if (!decap_l2_node)
902 		return -1;
903 
904 	/* If any other flow is using this decap_l2_node, use it's decap_handle
905 	 * as the ref_decap_handle
906 	 */
907 	if (decap_l2_node->refcount > 0) {
908 		ref_flow_node =
909 			list_first_entry(&decap_l2_node->common_l2_flows,
910 					 struct bnxt_tc_flow_node,
911 					 decap_l2_list_node);
912 		*ref_decap_handle = ref_flow_node->decap_node->tunnel_handle;
913 	} else {
914 		*ref_decap_handle = INVALID_TUNNEL_HANDLE;
915 	}
916 
917 	/* Insert the l2_node into the flow_node so that subsequent flows
918 	 * with a matching decap l2 key can use the decap_filter_handle of
919 	 * this flow as their ref_decap_handle
920 	 */
921 	flow_node->decap_l2_node = decap_l2_node;
922 	list_add(&flow_node->decap_l2_list_node,
923 		 &decap_l2_node->common_l2_flows);
924 	decap_l2_node->refcount++;
925 	return 0;
926 }
927 
928 static void bnxt_tc_put_decap_l2_node(struct bnxt *bp,
929 				      struct bnxt_tc_flow_node *flow_node)
930 {
931 	struct bnxt_tc_l2_node *decap_l2_node = flow_node->decap_l2_node;
932 	struct bnxt_tc_info *tc_info = bp->tc_info;
933 	int rc;
934 
935 	/* remove flow_node from the decap L2 sharing flow list */
936 	list_del(&flow_node->decap_l2_list_node);
937 	if (--decap_l2_node->refcount == 0) {
938 		rc =  rhashtable_remove_fast(&tc_info->decap_l2_table,
939 					     &decap_l2_node->node,
940 					     tc_info->decap_l2_ht_params);
941 		if (rc)
942 			netdev_err(bp->dev, "rhashtable_remove_fast rc=%d", rc);
943 		kfree_rcu(decap_l2_node, rcu);
944 	}
945 }
946 
947 static void bnxt_tc_put_decap_handle(struct bnxt *bp,
948 				     struct bnxt_tc_flow_node *flow_node)
949 {
950 	__le32 decap_handle = flow_node->decap_node->tunnel_handle;
951 	struct bnxt_tc_info *tc_info = bp->tc_info;
952 	int rc;
953 
954 	if (flow_node->decap_l2_node)
955 		bnxt_tc_put_decap_l2_node(bp, flow_node);
956 
957 	rc = bnxt_tc_put_tunnel_node(bp, &tc_info->decap_table,
958 				     &tc_info->decap_ht_params,
959 				     flow_node->decap_node);
960 	if (!rc && decap_handle != INVALID_TUNNEL_HANDLE)
961 		hwrm_cfa_decap_filter_free(bp, decap_handle);
962 }
963 
964 static int bnxt_tc_resolve_tunnel_hdrs(struct bnxt *bp,
965 				       struct ip_tunnel_key *tun_key,
966 				       struct bnxt_tc_l2_key *l2_info)
967 {
968 #ifdef CONFIG_INET
969 	struct net_device *real_dst_dev = bp->dev;
970 	struct flowi4 flow = { {0} };
971 	struct net_device *dst_dev;
972 	struct neighbour *nbr;
973 	struct rtable *rt;
974 	int rc;
975 
976 	flow.flowi4_proto = IPPROTO_UDP;
977 	flow.fl4_dport = tun_key->tp_dst;
978 	flow.daddr = tun_key->u.ipv4.dst;
979 
980 	rt = ip_route_output_key(dev_net(real_dst_dev), &flow);
981 	if (IS_ERR(rt)) {
982 		netdev_info(bp->dev, "no route to %pI4b", &flow.daddr);
983 		return -EOPNOTSUPP;
984 	}
985 
986 	/* The route must either point to the real_dst_dev or a dst_dev that
987 	 * uses the real_dst_dev.
988 	 */
989 	dst_dev = rt->dst.dev;
990 	if (is_vlan_dev(dst_dev)) {
991 #if IS_ENABLED(CONFIG_VLAN_8021Q)
992 		struct vlan_dev_priv *vlan = vlan_dev_priv(dst_dev);
993 
994 		if (vlan->real_dev != real_dst_dev) {
995 			netdev_info(bp->dev,
996 				    "dst_dev(%s) doesn't use PF-if(%s)",
997 				    netdev_name(dst_dev),
998 				    netdev_name(real_dst_dev));
999 			rc = -EOPNOTSUPP;
1000 			goto put_rt;
1001 		}
1002 		l2_info->inner_vlan_tci = htons(vlan->vlan_id);
1003 		l2_info->inner_vlan_tpid = vlan->vlan_proto;
1004 		l2_info->num_vlans = 1;
1005 #endif
1006 	} else if (dst_dev != real_dst_dev) {
1007 		netdev_info(bp->dev,
1008 			    "dst_dev(%s) for %pI4b is not PF-if(%s)",
1009 			    netdev_name(dst_dev), &flow.daddr,
1010 			    netdev_name(real_dst_dev));
1011 		rc = -EOPNOTSUPP;
1012 		goto put_rt;
1013 	}
1014 
1015 	nbr = dst_neigh_lookup(&rt->dst, &flow.daddr);
1016 	if (!nbr) {
1017 		netdev_info(bp->dev, "can't lookup neighbor for %pI4b",
1018 			    &flow.daddr);
1019 		rc = -EOPNOTSUPP;
1020 		goto put_rt;
1021 	}
1022 
1023 	tun_key->u.ipv4.src = flow.saddr;
1024 	tun_key->ttl = ip4_dst_hoplimit(&rt->dst);
1025 	neigh_ha_snapshot(l2_info->dmac, nbr, dst_dev);
1026 	ether_addr_copy(l2_info->smac, dst_dev->dev_addr);
1027 	neigh_release(nbr);
1028 	ip_rt_put(rt);
1029 
1030 	return 0;
1031 put_rt:
1032 	ip_rt_put(rt);
1033 	return rc;
1034 #else
1035 	return -EOPNOTSUPP;
1036 #endif
1037 }
1038 
1039 static int bnxt_tc_get_decap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
1040 				    struct bnxt_tc_flow_node *flow_node,
1041 				    __le32 *decap_filter_handle)
1042 {
1043 	struct ip_tunnel_key *decap_key = &flow->tun_key;
1044 	struct bnxt_tc_info *tc_info = bp->tc_info;
1045 	struct bnxt_tc_l2_key l2_info = { {0} };
1046 	struct bnxt_tc_tunnel_node *decap_node;
1047 	struct ip_tunnel_key tun_key = { 0 };
1048 	struct bnxt_tc_l2_key *decap_l2_info;
1049 	__le32 ref_decap_handle;
1050 	int rc;
1051 
1052 	/* Check if there's another flow using the same tunnel decap.
1053 	 * If not, add this tunnel to the table and resolve the other
1054 	 * tunnel header fileds. Ignore src_port in the tunnel_key,
1055 	 * since it is not required for decap filters.
1056 	 */
1057 	decap_key->tp_src = 0;
1058 	decap_node = bnxt_tc_get_tunnel_node(bp, &tc_info->decap_table,
1059 					     &tc_info->decap_ht_params,
1060 					     decap_key);
1061 	if (!decap_node)
1062 		return -ENOMEM;
1063 
1064 	flow_node->decap_node = decap_node;
1065 
1066 	if (decap_node->tunnel_handle != INVALID_TUNNEL_HANDLE)
1067 		goto done;
1068 
1069 	/* Resolve the L2 fields for tunnel decap
1070 	 * Resolve the route for remote vtep (saddr) of the decap key
1071 	 * Find it's next-hop mac addrs
1072 	 */
1073 	tun_key.u.ipv4.dst = flow->tun_key.u.ipv4.src;
1074 	tun_key.tp_dst = flow->tun_key.tp_dst;
1075 	rc = bnxt_tc_resolve_tunnel_hdrs(bp, &tun_key, &l2_info);
1076 	if (rc)
1077 		goto put_decap;
1078 
1079 	decap_l2_info = &decap_node->l2_info;
1080 	/* decap smac is wildcarded */
1081 	ether_addr_copy(decap_l2_info->dmac, l2_info.smac);
1082 	if (l2_info.num_vlans) {
1083 		decap_l2_info->num_vlans = l2_info.num_vlans;
1084 		decap_l2_info->inner_vlan_tpid = l2_info.inner_vlan_tpid;
1085 		decap_l2_info->inner_vlan_tci = l2_info.inner_vlan_tci;
1086 	}
1087 	flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS;
1088 
1089 	/* For getting a decap_filter_handle we first need to check if
1090 	 * there are any other decap flows that share the same tunnel L2
1091 	 * key and if so, pass that flow's decap_filter_handle as the
1092 	 * ref_decap_handle for this flow.
1093 	 */
1094 	rc = bnxt_tc_get_ref_decap_handle(bp, flow, decap_l2_info, flow_node,
1095 					  &ref_decap_handle);
1096 	if (rc)
1097 		goto put_decap;
1098 
1099 	/* Issue the hwrm cmd to allocate a decap filter handle */
1100 	rc = hwrm_cfa_decap_filter_alloc(bp, flow, decap_l2_info,
1101 					 ref_decap_handle,
1102 					 &decap_node->tunnel_handle);
1103 	if (rc)
1104 		goto put_decap_l2;
1105 
1106 done:
1107 	*decap_filter_handle = decap_node->tunnel_handle;
1108 	return 0;
1109 
1110 put_decap_l2:
1111 	bnxt_tc_put_decap_l2_node(bp, flow_node);
1112 put_decap:
1113 	bnxt_tc_put_tunnel_node(bp, &tc_info->decap_table,
1114 				&tc_info->decap_ht_params,
1115 				flow_node->decap_node);
1116 	return rc;
1117 }
1118 
1119 static void bnxt_tc_put_encap_handle(struct bnxt *bp,
1120 				     struct bnxt_tc_tunnel_node *encap_node)
1121 {
1122 	__le32 encap_handle = encap_node->tunnel_handle;
1123 	struct bnxt_tc_info *tc_info = bp->tc_info;
1124 	int rc;
1125 
1126 	rc = bnxt_tc_put_tunnel_node(bp, &tc_info->encap_table,
1127 				     &tc_info->encap_ht_params, encap_node);
1128 	if (!rc && encap_handle != INVALID_TUNNEL_HANDLE)
1129 		hwrm_cfa_encap_record_free(bp, encap_handle);
1130 }
1131 
1132 /* Lookup the tunnel encap table and check if there's an encap_handle
1133  * alloc'd already.
1134  * If not, query L2 info via a route lookup and issue an encap_record_alloc
1135  * cmd to FW.
1136  */
1137 static int bnxt_tc_get_encap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
1138 				    struct bnxt_tc_flow_node *flow_node,
1139 				    __le32 *encap_handle)
1140 {
1141 	struct ip_tunnel_key *encap_key = &flow->actions.tun_encap_key;
1142 	struct bnxt_tc_info *tc_info = bp->tc_info;
1143 	struct bnxt_tc_tunnel_node *encap_node;
1144 	int rc;
1145 
1146 	/* Check if there's another flow using the same tunnel encap.
1147 	 * If not, add this tunnel to the table and resolve the other
1148 	 * tunnel header fileds
1149 	 */
1150 	encap_node = bnxt_tc_get_tunnel_node(bp, &tc_info->encap_table,
1151 					     &tc_info->encap_ht_params,
1152 					     encap_key);
1153 	if (!encap_node)
1154 		return -ENOMEM;
1155 
1156 	flow_node->encap_node = encap_node;
1157 
1158 	if (encap_node->tunnel_handle != INVALID_TUNNEL_HANDLE)
1159 		goto done;
1160 
1161 	rc = bnxt_tc_resolve_tunnel_hdrs(bp, encap_key, &encap_node->l2_info);
1162 	if (rc)
1163 		goto put_encap;
1164 
1165 	/* Allocate a new tunnel encap record */
1166 	rc = hwrm_cfa_encap_record_alloc(bp, encap_key, &encap_node->l2_info,
1167 					 &encap_node->tunnel_handle);
1168 	if (rc)
1169 		goto put_encap;
1170 
1171 done:
1172 	*encap_handle = encap_node->tunnel_handle;
1173 	return 0;
1174 
1175 put_encap:
1176 	bnxt_tc_put_tunnel_node(bp, &tc_info->encap_table,
1177 				&tc_info->encap_ht_params, encap_node);
1178 	return rc;
1179 }
1180 
1181 static void bnxt_tc_put_tunnel_handle(struct bnxt *bp,
1182 				      struct bnxt_tc_flow *flow,
1183 				      struct bnxt_tc_flow_node *flow_node)
1184 {
1185 	if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP)
1186 		bnxt_tc_put_decap_handle(bp, flow_node);
1187 	else if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP)
1188 		bnxt_tc_put_encap_handle(bp, flow_node->encap_node);
1189 }
1190 
1191 static int bnxt_tc_get_tunnel_handle(struct bnxt *bp,
1192 				     struct bnxt_tc_flow *flow,
1193 				     struct bnxt_tc_flow_node *flow_node,
1194 				     __le32 *tunnel_handle)
1195 {
1196 	if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP)
1197 		return bnxt_tc_get_decap_handle(bp, flow, flow_node,
1198 						tunnel_handle);
1199 	else if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP)
1200 		return bnxt_tc_get_encap_handle(bp, flow, flow_node,
1201 						tunnel_handle);
1202 	else
1203 		return 0;
1204 }
1205 static int __bnxt_tc_del_flow(struct bnxt *bp,
1206 			      struct bnxt_tc_flow_node *flow_node)
1207 {
1208 	struct bnxt_tc_info *tc_info = bp->tc_info;
1209 	int rc;
1210 
1211 	/* send HWRM cmd to free the flow-id */
1212 	bnxt_hwrm_cfa_flow_free(bp, flow_node->flow_handle);
1213 
1214 	mutex_lock(&tc_info->lock);
1215 
1216 	/* release references to any tunnel encap/decap nodes */
1217 	bnxt_tc_put_tunnel_handle(bp, &flow_node->flow, flow_node);
1218 
1219 	/* release reference to l2 node */
1220 	bnxt_tc_put_l2_node(bp, flow_node);
1221 
1222 	mutex_unlock(&tc_info->lock);
1223 
1224 	rc = rhashtable_remove_fast(&tc_info->flow_table, &flow_node->node,
1225 				    tc_info->flow_ht_params);
1226 	if (rc)
1227 		netdev_err(bp->dev, "Error: %s: rhashtable_remove_fast rc=%d",
1228 			   __func__, rc);
1229 
1230 	kfree_rcu(flow_node, rcu);
1231 	return 0;
1232 }
1233 
1234 static void bnxt_tc_set_src_fid(struct bnxt *bp, struct bnxt_tc_flow *flow,
1235 				u16 src_fid)
1236 {
1237 	if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP)
1238 		flow->src_fid = bp->pf.fw_fid;
1239 	else
1240 		flow->src_fid = src_fid;
1241 }
1242 
1243 /* Add a new flow or replace an existing flow.
1244  * Notes on locking:
1245  * There are essentially two critical sections here.
1246  * 1. while adding a new flow
1247  *    a) lookup l2-key
1248  *    b) issue HWRM cmd and get flow_handle
1249  *    c) link l2-key with flow
1250  * 2. while deleting a flow
1251  *    a) unlinking l2-key from flow
1252  * A lock is needed to protect these two critical sections.
1253  *
1254  * The hash-tables are already protected by the rhashtable API.
1255  */
1256 static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid,
1257 			    struct tc_cls_flower_offload *tc_flow_cmd)
1258 {
1259 	struct bnxt_tc_flow_node *new_node, *old_node;
1260 	struct bnxt_tc_info *tc_info = bp->tc_info;
1261 	struct bnxt_tc_flow *flow;
1262 	__le32 tunnel_handle = 0;
1263 	__le16 ref_flow_handle;
1264 	int rc;
1265 
1266 	/* allocate memory for the new flow and it's node */
1267 	new_node = kzalloc(sizeof(*new_node), GFP_KERNEL);
1268 	if (!new_node) {
1269 		rc = -ENOMEM;
1270 		goto done;
1271 	}
1272 	new_node->cookie = tc_flow_cmd->cookie;
1273 	flow = &new_node->flow;
1274 
1275 	rc = bnxt_tc_parse_flow(bp, tc_flow_cmd, flow);
1276 	if (rc)
1277 		goto free_node;
1278 
1279 	bnxt_tc_set_src_fid(bp, flow, src_fid);
1280 
1281 	if (!bnxt_tc_can_offload(bp, flow)) {
1282 		rc = -ENOSPC;
1283 		goto free_node;
1284 	}
1285 
1286 	/* If a flow exists with the same cookie, delete it */
1287 	old_node = rhashtable_lookup_fast(&tc_info->flow_table,
1288 					  &tc_flow_cmd->cookie,
1289 					  tc_info->flow_ht_params);
1290 	if (old_node)
1291 		__bnxt_tc_del_flow(bp, old_node);
1292 
1293 	/* Check if the L2 part of the flow has been offloaded already.
1294 	 * If so, bump up it's refcnt and get it's reference handle.
1295 	 */
1296 	mutex_lock(&tc_info->lock);
1297 	rc = bnxt_tc_get_ref_flow_handle(bp, flow, new_node, &ref_flow_handle);
1298 	if (rc)
1299 		goto unlock;
1300 
1301 	/* If the flow involves tunnel encap/decap, get tunnel_handle */
1302 	rc = bnxt_tc_get_tunnel_handle(bp, flow, new_node, &tunnel_handle);
1303 	if (rc)
1304 		goto put_l2;
1305 
1306 	/* send HWRM cmd to alloc the flow */
1307 	rc = bnxt_hwrm_cfa_flow_alloc(bp, flow, ref_flow_handle,
1308 				      tunnel_handle, &new_node->flow_handle);
1309 	if (rc)
1310 		goto put_tunnel;
1311 
1312 	flow->lastused = jiffies;
1313 	spin_lock_init(&flow->stats_lock);
1314 	/* add new flow to flow-table */
1315 	rc = rhashtable_insert_fast(&tc_info->flow_table, &new_node->node,
1316 				    tc_info->flow_ht_params);
1317 	if (rc)
1318 		goto hwrm_flow_free;
1319 
1320 	mutex_unlock(&tc_info->lock);
1321 	return 0;
1322 
1323 hwrm_flow_free:
1324 	bnxt_hwrm_cfa_flow_free(bp, new_node->flow_handle);
1325 put_tunnel:
1326 	bnxt_tc_put_tunnel_handle(bp, flow, new_node);
1327 put_l2:
1328 	bnxt_tc_put_l2_node(bp, new_node);
1329 unlock:
1330 	mutex_unlock(&tc_info->lock);
1331 free_node:
1332 	kfree_rcu(new_node, rcu);
1333 done:
1334 	netdev_err(bp->dev, "Error: %s: cookie=0x%lx error=%d",
1335 		   __func__, tc_flow_cmd->cookie, rc);
1336 	return rc;
1337 }
1338 
1339 static int bnxt_tc_del_flow(struct bnxt *bp,
1340 			    struct tc_cls_flower_offload *tc_flow_cmd)
1341 {
1342 	struct bnxt_tc_info *tc_info = bp->tc_info;
1343 	struct bnxt_tc_flow_node *flow_node;
1344 
1345 	flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
1346 					   &tc_flow_cmd->cookie,
1347 					   tc_info->flow_ht_params);
1348 	if (!flow_node)
1349 		return -EINVAL;
1350 
1351 	return __bnxt_tc_del_flow(bp, flow_node);
1352 }
1353 
1354 static int bnxt_tc_get_flow_stats(struct bnxt *bp,
1355 				  struct tc_cls_flower_offload *tc_flow_cmd)
1356 {
1357 	struct bnxt_tc_flow_stats stats, *curr_stats, *prev_stats;
1358 	struct bnxt_tc_info *tc_info = bp->tc_info;
1359 	struct bnxt_tc_flow_node *flow_node;
1360 	struct bnxt_tc_flow *flow;
1361 	unsigned long lastused;
1362 
1363 	flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
1364 					   &tc_flow_cmd->cookie,
1365 					   tc_info->flow_ht_params);
1366 	if (!flow_node)
1367 		return -1;
1368 
1369 	flow = &flow_node->flow;
1370 	curr_stats = &flow->stats;
1371 	prev_stats = &flow->prev_stats;
1372 
1373 	spin_lock(&flow->stats_lock);
1374 	stats.packets = curr_stats->packets - prev_stats->packets;
1375 	stats.bytes = curr_stats->bytes - prev_stats->bytes;
1376 	*prev_stats = *curr_stats;
1377 	lastused = flow->lastused;
1378 	spin_unlock(&flow->stats_lock);
1379 
1380 	tcf_exts_stats_update(tc_flow_cmd->exts, stats.bytes, stats.packets,
1381 			      lastused);
1382 	return 0;
1383 }
1384 
1385 static int
1386 bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows,
1387 			     struct bnxt_tc_stats_batch stats_batch[])
1388 {
1389 	struct hwrm_cfa_flow_stats_output *resp = bp->hwrm_cmd_resp_addr;
1390 	struct hwrm_cfa_flow_stats_input req = { 0 };
1391 	__le16 *req_flow_handles = &req.flow_handle_0;
1392 	int rc, i;
1393 
1394 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_STATS, -1, -1);
1395 	req.num_flows = cpu_to_le16(num_flows);
1396 	for (i = 0; i < num_flows; i++) {
1397 		struct bnxt_tc_flow_node *flow_node = stats_batch[i].flow_node;
1398 
1399 		req_flow_handles[i] = flow_node->flow_handle;
1400 	}
1401 
1402 	mutex_lock(&bp->hwrm_cmd_lock);
1403 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1404 	if (!rc) {
1405 		__le64 *resp_packets = &resp->packet_0;
1406 		__le64 *resp_bytes = &resp->byte_0;
1407 
1408 		for (i = 0; i < num_flows; i++) {
1409 			stats_batch[i].hw_stats.packets =
1410 						le64_to_cpu(resp_packets[i]);
1411 			stats_batch[i].hw_stats.bytes =
1412 						le64_to_cpu(resp_bytes[i]);
1413 		}
1414 	} else {
1415 		netdev_info(bp->dev, "error rc=%d", rc);
1416 	}
1417 	mutex_unlock(&bp->hwrm_cmd_lock);
1418 
1419 	if (rc)
1420 		rc = -EIO;
1421 	return rc;
1422 }
1423 
1424 /* Add val to accum while handling a possible wraparound
1425  * of val. Eventhough val is of type u64, its actual width
1426  * is denoted by mask and will wrap-around beyond that width.
1427  */
1428 static void accumulate_val(u64 *accum, u64 val, u64 mask)
1429 {
1430 #define low_bits(x, mask)		((x) & (mask))
1431 #define high_bits(x, mask)		((x) & ~(mask))
1432 	bool wrapped = val < low_bits(*accum, mask);
1433 
1434 	*accum = high_bits(*accum, mask) + val;
1435 	if (wrapped)
1436 		*accum += (mask + 1);
1437 }
1438 
1439 /* The HW counters' width is much less than 64bits.
1440  * Handle possible wrap-around while updating the stat counters
1441  */
1442 static void bnxt_flow_stats_accum(struct bnxt_tc_info *tc_info,
1443 				  struct bnxt_tc_flow_stats *acc_stats,
1444 				  struct bnxt_tc_flow_stats *hw_stats)
1445 {
1446 	accumulate_val(&acc_stats->bytes, hw_stats->bytes, tc_info->bytes_mask);
1447 	accumulate_val(&acc_stats->packets, hw_stats->packets,
1448 		       tc_info->packets_mask);
1449 }
1450 
1451 static int
1452 bnxt_tc_flow_stats_batch_update(struct bnxt *bp, int num_flows,
1453 				struct bnxt_tc_stats_batch stats_batch[])
1454 {
1455 	struct bnxt_tc_info *tc_info = bp->tc_info;
1456 	int rc, i;
1457 
1458 	rc = bnxt_hwrm_cfa_flow_stats_get(bp, num_flows, stats_batch);
1459 	if (rc)
1460 		return rc;
1461 
1462 	for (i = 0; i < num_flows; i++) {
1463 		struct bnxt_tc_flow_node *flow_node = stats_batch[i].flow_node;
1464 		struct bnxt_tc_flow *flow = &flow_node->flow;
1465 
1466 		spin_lock(&flow->stats_lock);
1467 		bnxt_flow_stats_accum(tc_info, &flow->stats,
1468 				      &stats_batch[i].hw_stats);
1469 		if (flow->stats.packets != flow->prev_stats.packets)
1470 			flow->lastused = jiffies;
1471 		spin_unlock(&flow->stats_lock);
1472 	}
1473 
1474 	return 0;
1475 }
1476 
1477 static int
1478 bnxt_tc_flow_stats_batch_prep(struct bnxt *bp,
1479 			      struct bnxt_tc_stats_batch stats_batch[],
1480 			      int *num_flows)
1481 {
1482 	struct bnxt_tc_info *tc_info = bp->tc_info;
1483 	struct rhashtable_iter *iter = &tc_info->iter;
1484 	void *flow_node;
1485 	int rc, i;
1486 
1487 	rhashtable_walk_start(iter);
1488 
1489 	rc = 0;
1490 	for (i = 0; i < BNXT_FLOW_STATS_BATCH_MAX; i++) {
1491 		flow_node = rhashtable_walk_next(iter);
1492 		if (IS_ERR(flow_node)) {
1493 			i = 0;
1494 			if (PTR_ERR(flow_node) == -EAGAIN) {
1495 				continue;
1496 			} else {
1497 				rc = PTR_ERR(flow_node);
1498 				goto done;
1499 			}
1500 		}
1501 
1502 		/* No more flows */
1503 		if (!flow_node)
1504 			goto done;
1505 
1506 		stats_batch[i].flow_node = flow_node;
1507 	}
1508 done:
1509 	rhashtable_walk_stop(iter);
1510 	*num_flows = i;
1511 	return rc;
1512 }
1513 
1514 void bnxt_tc_flow_stats_work(struct bnxt *bp)
1515 {
1516 	struct bnxt_tc_info *tc_info = bp->tc_info;
1517 	int num_flows, rc;
1518 
1519 	num_flows = atomic_read(&tc_info->flow_table.nelems);
1520 	if (!num_flows)
1521 		return;
1522 
1523 	rhashtable_walk_enter(&tc_info->flow_table, &tc_info->iter);
1524 
1525 	for (;;) {
1526 		rc = bnxt_tc_flow_stats_batch_prep(bp, tc_info->stats_batch,
1527 						   &num_flows);
1528 		if (rc) {
1529 			if (rc == -EAGAIN)
1530 				continue;
1531 			break;
1532 		}
1533 
1534 		if (!num_flows)
1535 			break;
1536 
1537 		bnxt_tc_flow_stats_batch_update(bp, num_flows,
1538 						tc_info->stats_batch);
1539 	}
1540 
1541 	rhashtable_walk_exit(&tc_info->iter);
1542 }
1543 
1544 int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid,
1545 			 struct tc_cls_flower_offload *cls_flower)
1546 {
1547 	int rc = 0;
1548 
1549 	switch (cls_flower->command) {
1550 	case TC_CLSFLOWER_REPLACE:
1551 		rc = bnxt_tc_add_flow(bp, src_fid, cls_flower);
1552 		break;
1553 
1554 	case TC_CLSFLOWER_DESTROY:
1555 		rc = bnxt_tc_del_flow(bp, cls_flower);
1556 		break;
1557 
1558 	case TC_CLSFLOWER_STATS:
1559 		rc = bnxt_tc_get_flow_stats(bp, cls_flower);
1560 		break;
1561 	}
1562 	return rc;
1563 }
1564 
1565 static const struct rhashtable_params bnxt_tc_flow_ht_params = {
1566 	.head_offset = offsetof(struct bnxt_tc_flow_node, node),
1567 	.key_offset = offsetof(struct bnxt_tc_flow_node, cookie),
1568 	.key_len = sizeof(((struct bnxt_tc_flow_node *)0)->cookie),
1569 	.automatic_shrinking = true
1570 };
1571 
1572 static const struct rhashtable_params bnxt_tc_l2_ht_params = {
1573 	.head_offset = offsetof(struct bnxt_tc_l2_node, node),
1574 	.key_offset = offsetof(struct bnxt_tc_l2_node, key),
1575 	.key_len = BNXT_TC_L2_KEY_LEN,
1576 	.automatic_shrinking = true
1577 };
1578 
1579 static const struct rhashtable_params bnxt_tc_decap_l2_ht_params = {
1580 	.head_offset = offsetof(struct bnxt_tc_l2_node, node),
1581 	.key_offset = offsetof(struct bnxt_tc_l2_node, key),
1582 	.key_len = BNXT_TC_L2_KEY_LEN,
1583 	.automatic_shrinking = true
1584 };
1585 
1586 static const struct rhashtable_params bnxt_tc_tunnel_ht_params = {
1587 	.head_offset = offsetof(struct bnxt_tc_tunnel_node, node),
1588 	.key_offset = offsetof(struct bnxt_tc_tunnel_node, key),
1589 	.key_len = sizeof(struct ip_tunnel_key),
1590 	.automatic_shrinking = true
1591 };
1592 
1593 /* convert counter width in bits to a mask */
1594 #define mask(width)		((u64)~0 >> (64 - (width)))
1595 
1596 int bnxt_init_tc(struct bnxt *bp)
1597 {
1598 	struct bnxt_tc_info *tc_info;
1599 	int rc;
1600 
1601 	if (bp->hwrm_spec_code < 0x10803) {
1602 		netdev_warn(bp->dev,
1603 			    "Firmware does not support TC flower offload.\n");
1604 		return -ENOTSUPP;
1605 	}
1606 
1607 	tc_info = kzalloc(sizeof(*tc_info), GFP_KERNEL);
1608 	if (!tc_info)
1609 		return -ENOMEM;
1610 	mutex_init(&tc_info->lock);
1611 
1612 	/* Counter widths are programmed by FW */
1613 	tc_info->bytes_mask = mask(36);
1614 	tc_info->packets_mask = mask(28);
1615 
1616 	tc_info->flow_ht_params = bnxt_tc_flow_ht_params;
1617 	rc = rhashtable_init(&tc_info->flow_table, &tc_info->flow_ht_params);
1618 	if (rc)
1619 		goto free_tc_info;
1620 
1621 	tc_info->l2_ht_params = bnxt_tc_l2_ht_params;
1622 	rc = rhashtable_init(&tc_info->l2_table, &tc_info->l2_ht_params);
1623 	if (rc)
1624 		goto destroy_flow_table;
1625 
1626 	tc_info->decap_l2_ht_params = bnxt_tc_decap_l2_ht_params;
1627 	rc = rhashtable_init(&tc_info->decap_l2_table,
1628 			     &tc_info->decap_l2_ht_params);
1629 	if (rc)
1630 		goto destroy_l2_table;
1631 
1632 	tc_info->decap_ht_params = bnxt_tc_tunnel_ht_params;
1633 	rc = rhashtable_init(&tc_info->decap_table,
1634 			     &tc_info->decap_ht_params);
1635 	if (rc)
1636 		goto destroy_decap_l2_table;
1637 
1638 	tc_info->encap_ht_params = bnxt_tc_tunnel_ht_params;
1639 	rc = rhashtable_init(&tc_info->encap_table,
1640 			     &tc_info->encap_ht_params);
1641 	if (rc)
1642 		goto destroy_decap_table;
1643 
1644 	tc_info->enabled = true;
1645 	bp->dev->hw_features |= NETIF_F_HW_TC;
1646 	bp->dev->features |= NETIF_F_HW_TC;
1647 	bp->tc_info = tc_info;
1648 	return 0;
1649 
1650 destroy_decap_table:
1651 	rhashtable_destroy(&tc_info->decap_table);
1652 destroy_decap_l2_table:
1653 	rhashtable_destroy(&tc_info->decap_l2_table);
1654 destroy_l2_table:
1655 	rhashtable_destroy(&tc_info->l2_table);
1656 destroy_flow_table:
1657 	rhashtable_destroy(&tc_info->flow_table);
1658 free_tc_info:
1659 	kfree(tc_info);
1660 	return rc;
1661 }
1662 
1663 void bnxt_shutdown_tc(struct bnxt *bp)
1664 {
1665 	struct bnxt_tc_info *tc_info = bp->tc_info;
1666 
1667 	if (!bnxt_tc_flower_enabled(bp))
1668 		return;
1669 
1670 	rhashtable_destroy(&tc_info->flow_table);
1671 	rhashtable_destroy(&tc_info->l2_table);
1672 	rhashtable_destroy(&tc_info->decap_l2_table);
1673 	rhashtable_destroy(&tc_info->decap_table);
1674 	rhashtable_destroy(&tc_info->encap_table);
1675 	kfree(tc_info);
1676 	bp->tc_info = NULL;
1677 }
1678