10d9f9647SVlad Buslov // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
20d9f9647SVlad Buslov /* Copyright (c) 2021 Mellanox Technologies. */
30d9f9647SVlad Buslov 
48914add2SVlad Buslov #include <net/fib_notifier.h>
55632817bSVlad Buslov #include <net/nexthop.h>
60d9f9647SVlad Buslov #include "tc_tun_encap.h"
70d9f9647SVlad Buslov #include "en_tc.h"
80d9f9647SVlad Buslov #include "tc_tun.h"
90d9f9647SVlad Buslov #include "rep/tc.h"
100d9f9647SVlad Buslov #include "diag/en_tc_tracepoint.h"
110d9f9647SVlad Buslov 
128914add2SVlad Buslov enum {
138914add2SVlad Buslov 	MLX5E_ROUTE_ENTRY_VALID     = BIT(0),
148914add2SVlad Buslov };
158914add2SVlad Buslov 
16100ad4e2SAriel Levkovich static int mlx5e_set_int_port_tunnel(struct mlx5e_priv *priv,
17100ad4e2SAriel Levkovich 				     struct mlx5_flow_attr *attr,
18100ad4e2SAriel Levkovich 				     struct mlx5e_encap_entry *e,
19100ad4e2SAriel Levkovich 				     int out_index)
20100ad4e2SAriel Levkovich {
21100ad4e2SAriel Levkovich 	struct net_device *route_dev;
22100ad4e2SAriel Levkovich 	int err = 0;
23100ad4e2SAriel Levkovich 
24100ad4e2SAriel Levkovich 	route_dev = dev_get_by_index(dev_net(e->out_dev), e->route_dev_ifindex);
25100ad4e2SAriel Levkovich 
26100ad4e2SAriel Levkovich 	if (!route_dev || !netif_is_ovs_master(route_dev))
27100ad4e2SAriel Levkovich 		goto out;
28100ad4e2SAriel Levkovich 
29100ad4e2SAriel Levkovich 	err = mlx5e_set_fwd_to_int_port_actions(priv, attr, e->route_dev_ifindex,
30100ad4e2SAriel Levkovich 						MLX5E_TC_INT_PORT_EGRESS,
31100ad4e2SAriel Levkovich 						&attr->action, out_index);
32100ad4e2SAriel Levkovich 
33100ad4e2SAriel Levkovich out:
34100ad4e2SAriel Levkovich 	if (route_dev)
35100ad4e2SAriel Levkovich 		dev_put(route_dev);
36100ad4e2SAriel Levkovich 
37100ad4e2SAriel Levkovich 	return err;
38100ad4e2SAriel Levkovich }
39100ad4e2SAriel Levkovich 
40777bb800SVlad Buslov struct mlx5e_route_key {
41777bb800SVlad Buslov 	int ip_version;
42777bb800SVlad Buslov 	union {
43777bb800SVlad Buslov 		__be32 v4;
44777bb800SVlad Buslov 		struct in6_addr v6;
45777bb800SVlad Buslov 	} endpoint_ip;
46777bb800SVlad Buslov };
47777bb800SVlad Buslov 
48777bb800SVlad Buslov struct mlx5e_route_entry {
49777bb800SVlad Buslov 	struct mlx5e_route_key key;
50777bb800SVlad Buslov 	struct list_head encap_entries;
51777bb800SVlad Buslov 	struct list_head decap_flows;
528914add2SVlad Buslov 	u32 flags;
53777bb800SVlad Buslov 	struct hlist_node hlist;
54777bb800SVlad Buslov 	refcount_t refcnt;
558914add2SVlad Buslov 	int tunnel_dev_index;
56777bb800SVlad Buslov 	struct rcu_head rcu;
57777bb800SVlad Buslov };
58777bb800SVlad Buslov 
598914add2SVlad Buslov struct mlx5e_tc_tun_encap {
608914add2SVlad Buslov 	struct mlx5e_priv *priv;
618914add2SVlad Buslov 	struct notifier_block fib_nb;
628914add2SVlad Buslov 	spinlock_t route_lock; /* protects route_tbl */
638914add2SVlad Buslov 	unsigned long route_tbl_last_update;
648914add2SVlad Buslov 	DECLARE_HASHTABLE(route_tbl, 8);
658914add2SVlad Buslov };
668914add2SVlad Buslov 
678914add2SVlad Buslov static bool mlx5e_route_entry_valid(struct mlx5e_route_entry *r)
688914add2SVlad Buslov {
698914add2SVlad Buslov 	return r->flags & MLX5E_ROUTE_ENTRY_VALID;
708914add2SVlad Buslov }
718914add2SVlad Buslov 
720d9f9647SVlad Buslov int mlx5e_tc_set_attr_rx_tun(struct mlx5e_tc_flow *flow,
730d9f9647SVlad Buslov 			     struct mlx5_flow_spec *spec)
740d9f9647SVlad Buslov {
750d9f9647SVlad Buslov 	struct mlx5_esw_flow_attr *esw_attr = flow->attr->esw_attr;
760d9f9647SVlad Buslov 	struct mlx5_rx_tun_attr *tun_attr;
770d9f9647SVlad Buslov 	void *daddr, *saddr;
780d9f9647SVlad Buslov 	u8 ip_version;
790d9f9647SVlad Buslov 
800d9f9647SVlad Buslov 	tun_attr = kvzalloc(sizeof(*tun_attr), GFP_KERNEL);
810d9f9647SVlad Buslov 	if (!tun_attr)
820d9f9647SVlad Buslov 		return -ENOMEM;
830d9f9647SVlad Buslov 
840d9f9647SVlad Buslov 	esw_attr->rx_tun_attr = tun_attr;
850d9f9647SVlad Buslov 	ip_version = mlx5e_tc_get_ip_version(spec, true);
860d9f9647SVlad Buslov 
870d9f9647SVlad Buslov 	if (ip_version == 4) {
880d9f9647SVlad Buslov 		daddr = MLX5_ADDR_OF(fte_match_param, spec->match_value,
890d9f9647SVlad Buslov 				     outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
900d9f9647SVlad Buslov 		saddr = MLX5_ADDR_OF(fte_match_param, spec->match_value,
910d9f9647SVlad Buslov 				     outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4);
920d9f9647SVlad Buslov 		tun_attr->dst_ip.v4 = *(__be32 *)daddr;
930d9f9647SVlad Buslov 		tun_attr->src_ip.v4 = *(__be32 *)saddr;
94777bb800SVlad Buslov 		if (!tun_attr->dst_ip.v4 || !tun_attr->src_ip.v4)
95777bb800SVlad Buslov 			return 0;
960d9f9647SVlad Buslov 	}
970d9f9647SVlad Buslov #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
980d9f9647SVlad Buslov 	else if (ip_version == 6) {
990d9f9647SVlad Buslov 		int ipv6_size = MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6);
100777bb800SVlad Buslov 		struct in6_addr zerov6 = {};
1010d9f9647SVlad Buslov 
1020d9f9647SVlad Buslov 		daddr = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1030d9f9647SVlad Buslov 				     outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
1040d9f9647SVlad Buslov 		saddr = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1050d9f9647SVlad Buslov 				     outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6);
1060d9f9647SVlad Buslov 		memcpy(&tun_attr->dst_ip.v6, daddr, ipv6_size);
1070d9f9647SVlad Buslov 		memcpy(&tun_attr->src_ip.v6, saddr, ipv6_size);
108777bb800SVlad Buslov 		if (!memcmp(&tun_attr->dst_ip.v6, &zerov6, sizeof(zerov6)) ||
109777bb800SVlad Buslov 		    !memcmp(&tun_attr->src_ip.v6, &zerov6, sizeof(zerov6)))
110777bb800SVlad Buslov 			return 0;
1110d9f9647SVlad Buslov 	}
1120d9f9647SVlad Buslov #endif
113777bb800SVlad Buslov 	/* Only set the flag if both src and dst ip addresses exist. They are
114777bb800SVlad Buslov 	 * required to establish routing.
115777bb800SVlad Buslov 	 */
116777bb800SVlad Buslov 	flow_flag_set(flow, TUN_RX);
1171e74152eSRoi Dayan 	flow->attr->tun_ip_version = ip_version;
1180d9f9647SVlad Buslov 	return 0;
1190d9f9647SVlad Buslov }
1200d9f9647SVlad Buslov 
1218914add2SVlad Buslov static bool mlx5e_tc_flow_all_encaps_valid(struct mlx5_esw_flow_attr *esw_attr)
1228914add2SVlad Buslov {
1238914add2SVlad Buslov 	bool all_flow_encaps_valid = true;
1248914add2SVlad Buslov 	int i;
1258914add2SVlad Buslov 
1268914add2SVlad Buslov 	/* Flow can be associated with multiple encap entries.
1278914add2SVlad Buslov 	 * Before offloading the flow verify that all of them have
1288914add2SVlad Buslov 	 * a valid neighbour.
1298914add2SVlad Buslov 	 */
1308914add2SVlad Buslov 	for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
1318914add2SVlad Buslov 		if (!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP))
1328914add2SVlad Buslov 			continue;
1338914add2SVlad Buslov 		if (!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP_VALID)) {
1348914add2SVlad Buslov 			all_flow_encaps_valid = false;
1358914add2SVlad Buslov 			break;
1368914add2SVlad Buslov 		}
1378914add2SVlad Buslov 	}
1388914add2SVlad Buslov 
1398914add2SVlad Buslov 	return all_flow_encaps_valid;
1408914add2SVlad Buslov }
1418914add2SVlad Buslov 
1420d9f9647SVlad Buslov void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
1430d9f9647SVlad Buslov 			      struct mlx5e_encap_entry *e,
1440d9f9647SVlad Buslov 			      struct list_head *flow_list)
1450d9f9647SVlad Buslov {
1460d9f9647SVlad Buslov 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1473f3f05abSYevgeny Kliteynik 	struct mlx5_pkt_reformat_params reformat_params;
1480d9f9647SVlad Buslov 	struct mlx5_esw_flow_attr *esw_attr;
1490d9f9647SVlad Buslov 	struct mlx5_flow_handle *rule;
1500d9f9647SVlad Buslov 	struct mlx5_flow_attr *attr;
1510d9f9647SVlad Buslov 	struct mlx5_flow_spec *spec;
1520d9f9647SVlad Buslov 	struct mlx5e_tc_flow *flow;
1530d9f9647SVlad Buslov 	int err;
1540d9f9647SVlad Buslov 
1558914add2SVlad Buslov 	if (e->flags & MLX5_ENCAP_ENTRY_NO_ROUTE)
1568914add2SVlad Buslov 		return;
1578914add2SVlad Buslov 
1583f3f05abSYevgeny Kliteynik 	memset(&reformat_params, 0, sizeof(reformat_params));
1593f3f05abSYevgeny Kliteynik 	reformat_params.type = e->reformat_type;
1603f3f05abSYevgeny Kliteynik 	reformat_params.size = e->encap_size;
1613f3f05abSYevgeny Kliteynik 	reformat_params.data = e->encap_header;
1620d9f9647SVlad Buslov 	e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
1633f3f05abSYevgeny Kliteynik 						     &reformat_params,
1640d9f9647SVlad Buslov 						     MLX5_FLOW_NAMESPACE_FDB);
1650d9f9647SVlad Buslov 	if (IS_ERR(e->pkt_reformat)) {
1660d9f9647SVlad Buslov 		mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %lu\n",
1670d9f9647SVlad Buslov 			       PTR_ERR(e->pkt_reformat));
1680d9f9647SVlad Buslov 		return;
1690d9f9647SVlad Buslov 	}
1700d9f9647SVlad Buslov 	e->flags |= MLX5_ENCAP_ENTRY_VALID;
1710d9f9647SVlad Buslov 	mlx5e_rep_queue_neigh_stats_work(priv);
1720d9f9647SVlad Buslov 
1730d9f9647SVlad Buslov 	list_for_each_entry(flow, flow_list, tmp_list) {
1749a5f9cc7SRoi Dayan 		if (!mlx5e_is_offloaded_flow(flow) || !flow_flag_test(flow, SLOW))
1750d9f9647SVlad Buslov 			continue;
1760d9f9647SVlad Buslov 
177*8300f225SRoi Dayan 		spec = &flow->attr->parse_attr->spec;
178*8300f225SRoi Dayan 
179*8300f225SRoi Dayan 		attr = mlx5e_tc_get_encap_attr(flow);
180*8300f225SRoi Dayan 		esw_attr = attr->esw_attr;
181021905f8SVlad Buslov 		esw_attr->dests[flow->tmp_entry_index].pkt_reformat = e->pkt_reformat;
182021905f8SVlad Buslov 		esw_attr->dests[flow->tmp_entry_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
1838914add2SVlad Buslov 
1840d9f9647SVlad Buslov 		/* Do not offload flows with unresolved neighbors */
1858914add2SVlad Buslov 		if (!mlx5e_tc_flow_all_encaps_valid(esw_attr))
1860d9f9647SVlad Buslov 			continue;
187*8300f225SRoi Dayan 
188*8300f225SRoi Dayan 		err = mlx5e_tc_offload_flow_post_acts(flow);
189*8300f225SRoi Dayan 		if (err) {
190*8300f225SRoi Dayan 			mlx5_core_warn(priv->mdev, "Failed to update flow post acts, %d\n",
191*8300f225SRoi Dayan 				       err);
192*8300f225SRoi Dayan 			continue;
193*8300f225SRoi Dayan 		}
194*8300f225SRoi Dayan 
1950d9f9647SVlad Buslov 		/* update from slow path rule to encap rule */
196*8300f225SRoi Dayan 		rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, flow->attr);
1970d9f9647SVlad Buslov 		if (IS_ERR(rule)) {
198*8300f225SRoi Dayan 			mlx5e_tc_unoffload_flow_post_acts(flow);
1990d9f9647SVlad Buslov 			err = PTR_ERR(rule);
2000d9f9647SVlad Buslov 			mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
2010d9f9647SVlad Buslov 				       err);
2020d9f9647SVlad Buslov 			continue;
2030d9f9647SVlad Buslov 		}
2040d9f9647SVlad Buslov 
2050d9f9647SVlad Buslov 		mlx5e_tc_unoffload_from_slow_path(esw, flow);
2060d9f9647SVlad Buslov 		flow->rule[0] = rule;
2070d9f9647SVlad Buslov 		/* was unset when slow path rule removed */
2080d9f9647SVlad Buslov 		flow_flag_set(flow, OFFLOADED);
2090d9f9647SVlad Buslov 	}
2100d9f9647SVlad Buslov }
2110d9f9647SVlad Buslov 
2120d9f9647SVlad Buslov void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
2130d9f9647SVlad Buslov 			      struct mlx5e_encap_entry *e,
2140d9f9647SVlad Buslov 			      struct list_head *flow_list)
2150d9f9647SVlad Buslov {
2160d9f9647SVlad Buslov 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2170d9f9647SVlad Buslov 	struct mlx5_esw_flow_attr *esw_attr;
2180d9f9647SVlad Buslov 	struct mlx5_flow_handle *rule;
2190d9f9647SVlad Buslov 	struct mlx5_flow_attr *attr;
2200d9f9647SVlad Buslov 	struct mlx5_flow_spec *spec;
2210d9f9647SVlad Buslov 	struct mlx5e_tc_flow *flow;
2220d9f9647SVlad Buslov 	int err;
2230d9f9647SVlad Buslov 
2240d9f9647SVlad Buslov 	list_for_each_entry(flow, flow_list, tmp_list) {
2259a5f9cc7SRoi Dayan 		if (!mlx5e_is_offloaded_flow(flow) || flow_flag_test(flow, SLOW))
2260d9f9647SVlad Buslov 			continue;
227*8300f225SRoi Dayan 		spec = &flow->attr->parse_attr->spec;
2280d9f9647SVlad Buslov 
2290d9f9647SVlad Buslov 		/* update from encap rule to slow path rule */
2300d9f9647SVlad Buslov 		rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec);
231*8300f225SRoi Dayan 
232*8300f225SRoi Dayan 		attr = mlx5e_tc_get_encap_attr(flow);
233*8300f225SRoi Dayan 		esw_attr = attr->esw_attr;
2340d9f9647SVlad Buslov 		/* mark the flow's encap dest as non-valid */
235021905f8SVlad Buslov 		esw_attr->dests[flow->tmp_entry_index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID;
2360d9f9647SVlad Buslov 
2370d9f9647SVlad Buslov 		if (IS_ERR(rule)) {
2380d9f9647SVlad Buslov 			err = PTR_ERR(rule);
2390d9f9647SVlad Buslov 			mlx5_core_warn(priv->mdev, "Failed to update slow path (encap) flow, %d\n",
2400d9f9647SVlad Buslov 				       err);
2410d9f9647SVlad Buslov 			continue;
2420d9f9647SVlad Buslov 		}
2430d9f9647SVlad Buslov 
244*8300f225SRoi Dayan 		mlx5e_tc_unoffload_fdb_rules(esw, flow, flow->attr);
245*8300f225SRoi Dayan 		mlx5e_tc_unoffload_flow_post_acts(flow);
2460d9f9647SVlad Buslov 		flow->rule[0] = rule;
2470d9f9647SVlad Buslov 		/* was unset when fast path rule removed */
2480d9f9647SVlad Buslov 		flow_flag_set(flow, OFFLOADED);
2490d9f9647SVlad Buslov 	}
2500d9f9647SVlad Buslov 
2510d9f9647SVlad Buslov 	/* we know that the encap is valid */
2520d9f9647SVlad Buslov 	e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
2530d9f9647SVlad Buslov 	mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat);
2540d9f9647SVlad Buslov }
2550d9f9647SVlad Buslov 
2568914add2SVlad Buslov static void mlx5e_take_tmp_flow(struct mlx5e_tc_flow *flow,
2578914add2SVlad Buslov 				struct list_head *flow_list,
2588914add2SVlad Buslov 				int index)
2598914add2SVlad Buslov {
260362980eaSVlad Buslov 	if (IS_ERR(mlx5e_flow_get(flow))) {
261362980eaSVlad Buslov 		/* Flow is being deleted concurrently. Wait for it to be
262362980eaSVlad Buslov 		 * unoffloaded from hardware, otherwise deleting encap will
263362980eaSVlad Buslov 		 * fail.
264362980eaSVlad Buslov 		 */
265362980eaSVlad Buslov 		wait_for_completion(&flow->del_hw_done);
2668914add2SVlad Buslov 		return;
267362980eaSVlad Buslov 	}
2688914add2SVlad Buslov 	wait_for_completion(&flow->init_done);
2698914add2SVlad Buslov 
2708914add2SVlad Buslov 	flow->tmp_entry_index = index;
2718914add2SVlad Buslov 	list_add(&flow->tmp_list, flow_list);
2728914add2SVlad Buslov }
2738914add2SVlad Buslov 
2740d9f9647SVlad Buslov /* Takes reference to all flows attached to encap and adds the flows to
2750d9f9647SVlad Buslov  * flow_list using 'tmp_list' list_head in mlx5e_tc_flow.
2760d9f9647SVlad Buslov  */
2770d9f9647SVlad Buslov void mlx5e_take_all_encap_flows(struct mlx5e_encap_entry *e, struct list_head *flow_list)
2780d9f9647SVlad Buslov {
2790d9f9647SVlad Buslov 	struct encap_flow_item *efi;
2800d9f9647SVlad Buslov 	struct mlx5e_tc_flow *flow;
2810d9f9647SVlad Buslov 
2820d9f9647SVlad Buslov 	list_for_each_entry(efi, &e->flows, list) {
2830d9f9647SVlad Buslov 		flow = container_of(efi, struct mlx5e_tc_flow, encaps[efi->index]);
2848914add2SVlad Buslov 		mlx5e_take_tmp_flow(flow, flow_list, efi->index);
2850d9f9647SVlad Buslov 	}
2860d9f9647SVlad Buslov }
2870d9f9647SVlad Buslov 
2888914add2SVlad Buslov /* Takes reference to all flows attached to route and adds the flows to
2898914add2SVlad Buslov  * flow_list using 'tmp_list' list_head in mlx5e_tc_flow.
2908914add2SVlad Buslov  */
2918914add2SVlad Buslov static void mlx5e_take_all_route_decap_flows(struct mlx5e_route_entry *r,
2928914add2SVlad Buslov 					     struct list_head *flow_list)
2938914add2SVlad Buslov {
2948914add2SVlad Buslov 	struct mlx5e_tc_flow *flow;
2958914add2SVlad Buslov 
2968914add2SVlad Buslov 	list_for_each_entry(flow, &r->decap_flows, decap_routes)
2978914add2SVlad Buslov 		mlx5e_take_tmp_flow(flow, flow_list, 0);
2988914add2SVlad Buslov }
2998914add2SVlad Buslov 
300fb1a3132SVlad Buslov typedef bool (match_cb)(struct mlx5e_encap_entry *);
301fb1a3132SVlad Buslov 
3020d9f9647SVlad Buslov static struct mlx5e_encap_entry *
303fb1a3132SVlad Buslov mlx5e_get_next_matching_encap(struct mlx5e_neigh_hash_entry *nhe,
304fb1a3132SVlad Buslov 			      struct mlx5e_encap_entry *e,
305fb1a3132SVlad Buslov 			      match_cb match)
3060d9f9647SVlad Buslov {
3070d9f9647SVlad Buslov 	struct mlx5e_encap_entry *next = NULL;
3080d9f9647SVlad Buslov 
3090d9f9647SVlad Buslov retry:
3100d9f9647SVlad Buslov 	rcu_read_lock();
3110d9f9647SVlad Buslov 
3120d9f9647SVlad Buslov 	/* find encap with non-zero reference counter value */
3130d9f9647SVlad Buslov 	for (next = e ?
3140d9f9647SVlad Buslov 		     list_next_or_null_rcu(&nhe->encap_list,
3150d9f9647SVlad Buslov 					   &e->encap_list,
3160d9f9647SVlad Buslov 					   struct mlx5e_encap_entry,
3170d9f9647SVlad Buslov 					   encap_list) :
3180d9f9647SVlad Buslov 		     list_first_or_null_rcu(&nhe->encap_list,
3190d9f9647SVlad Buslov 					    struct mlx5e_encap_entry,
3200d9f9647SVlad Buslov 					    encap_list);
3210d9f9647SVlad Buslov 	     next;
3220d9f9647SVlad Buslov 	     next = list_next_or_null_rcu(&nhe->encap_list,
3230d9f9647SVlad Buslov 					  &next->encap_list,
3240d9f9647SVlad Buslov 					  struct mlx5e_encap_entry,
3250d9f9647SVlad Buslov 					  encap_list))
3260d9f9647SVlad Buslov 		if (mlx5e_encap_take(next))
3270d9f9647SVlad Buslov 			break;
3280d9f9647SVlad Buslov 
3290d9f9647SVlad Buslov 	rcu_read_unlock();
3300d9f9647SVlad Buslov 
3310d9f9647SVlad Buslov 	/* release starting encap */
3320d9f9647SVlad Buslov 	if (e)
3330d9f9647SVlad Buslov 		mlx5e_encap_put(netdev_priv(e->out_dev), e);
3340d9f9647SVlad Buslov 	if (!next)
3350d9f9647SVlad Buslov 		return next;
3360d9f9647SVlad Buslov 
3370d9f9647SVlad Buslov 	/* wait for encap to be fully initialized */
3380d9f9647SVlad Buslov 	wait_for_completion(&next->res_ready);
3390d9f9647SVlad Buslov 	/* continue searching if encap entry is not in valid state after completion */
340fb1a3132SVlad Buslov 	if (!match(next)) {
3410d9f9647SVlad Buslov 		e = next;
3420d9f9647SVlad Buslov 		goto retry;
3430d9f9647SVlad Buslov 	}
3440d9f9647SVlad Buslov 
3450d9f9647SVlad Buslov 	return next;
3460d9f9647SVlad Buslov }
3470d9f9647SVlad Buslov 
348fb1a3132SVlad Buslov static bool mlx5e_encap_valid(struct mlx5e_encap_entry *e)
349fb1a3132SVlad Buslov {
350fb1a3132SVlad Buslov 	return e->flags & MLX5_ENCAP_ENTRY_VALID;
351fb1a3132SVlad Buslov }
352fb1a3132SVlad Buslov 
353fb1a3132SVlad Buslov static struct mlx5e_encap_entry *
354fb1a3132SVlad Buslov mlx5e_get_next_valid_encap(struct mlx5e_neigh_hash_entry *nhe,
355fb1a3132SVlad Buslov 			   struct mlx5e_encap_entry *e)
356fb1a3132SVlad Buslov {
357fb1a3132SVlad Buslov 	return mlx5e_get_next_matching_encap(nhe, e, mlx5e_encap_valid);
358fb1a3132SVlad Buslov }
359fb1a3132SVlad Buslov 
360fb1a3132SVlad Buslov static bool mlx5e_encap_initialized(struct mlx5e_encap_entry *e)
361fb1a3132SVlad Buslov {
362fb1a3132SVlad Buslov 	return e->compl_result >= 0;
363fb1a3132SVlad Buslov }
364fb1a3132SVlad Buslov 
365fb1a3132SVlad Buslov struct mlx5e_encap_entry *
366fb1a3132SVlad Buslov mlx5e_get_next_init_encap(struct mlx5e_neigh_hash_entry *nhe,
367fb1a3132SVlad Buslov 			  struct mlx5e_encap_entry *e)
368fb1a3132SVlad Buslov {
369fb1a3132SVlad Buslov 	return mlx5e_get_next_matching_encap(nhe, e, mlx5e_encap_initialized);
370fb1a3132SVlad Buslov }
371fb1a3132SVlad Buslov 
3720d9f9647SVlad Buslov void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
3730d9f9647SVlad Buslov {
3740d9f9647SVlad Buslov 	struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
3750d9f9647SVlad Buslov 	struct mlx5e_encap_entry *e = NULL;
3760d9f9647SVlad Buslov 	struct mlx5e_tc_flow *flow;
3770d9f9647SVlad Buslov 	struct mlx5_fc *counter;
3780d9f9647SVlad Buslov 	struct neigh_table *tbl;
3790d9f9647SVlad Buslov 	bool neigh_used = false;
3800d9f9647SVlad Buslov 	struct neighbour *n;
3810d9f9647SVlad Buslov 	u64 lastuse;
3820d9f9647SVlad Buslov 
3830d9f9647SVlad Buslov 	if (m_neigh->family == AF_INET)
3840d9f9647SVlad Buslov 		tbl = &arp_tbl;
3850d9f9647SVlad Buslov #if IS_ENABLED(CONFIG_IPV6)
3860d9f9647SVlad Buslov 	else if (m_neigh->family == AF_INET6)
3870d9f9647SVlad Buslov 		tbl = ipv6_stub->nd_tbl;
3880d9f9647SVlad Buslov #endif
3890d9f9647SVlad Buslov 	else
3900d9f9647SVlad Buslov 		return;
3910d9f9647SVlad Buslov 
3920d9f9647SVlad Buslov 	/* mlx5e_get_next_valid_encap() releases previous encap before returning
3930d9f9647SVlad Buslov 	 * next one.
3940d9f9647SVlad Buslov 	 */
3950d9f9647SVlad Buslov 	while ((e = mlx5e_get_next_valid_encap(nhe, e)) != NULL) {
3960d9f9647SVlad Buslov 		struct mlx5e_priv *priv = netdev_priv(e->out_dev);
3970d9f9647SVlad Buslov 		struct encap_flow_item *efi, *tmp;
3980d9f9647SVlad Buslov 		struct mlx5_eswitch *esw;
3990d9f9647SVlad Buslov 		LIST_HEAD(flow_list);
4000d9f9647SVlad Buslov 
4010d9f9647SVlad Buslov 		esw = priv->mdev->priv.eswitch;
4020d9f9647SVlad Buslov 		mutex_lock(&esw->offloads.encap_tbl_lock);
4030d9f9647SVlad Buslov 		list_for_each_entry_safe(efi, tmp, &e->flows, list) {
4040d9f9647SVlad Buslov 			flow = container_of(efi, struct mlx5e_tc_flow,
4050d9f9647SVlad Buslov 					    encaps[efi->index]);
4060d9f9647SVlad Buslov 			if (IS_ERR(mlx5e_flow_get(flow)))
4070d9f9647SVlad Buslov 				continue;
4080d9f9647SVlad Buslov 			list_add(&flow->tmp_list, &flow_list);
4090d9f9647SVlad Buslov 
4100d9f9647SVlad Buslov 			if (mlx5e_is_offloaded_flow(flow)) {
4110d9f9647SVlad Buslov 				counter = mlx5e_tc_get_counter(flow);
4120d9f9647SVlad Buslov 				lastuse = mlx5_fc_query_lastuse(counter);
4130d9f9647SVlad Buslov 				if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
4140d9f9647SVlad Buslov 					neigh_used = true;
4150d9f9647SVlad Buslov 					break;
4160d9f9647SVlad Buslov 				}
4170d9f9647SVlad Buslov 			}
4180d9f9647SVlad Buslov 		}
4190d9f9647SVlad Buslov 		mutex_unlock(&esw->offloads.encap_tbl_lock);
4200d9f9647SVlad Buslov 
421021905f8SVlad Buslov 		mlx5e_put_flow_list(priv, &flow_list);
4220d9f9647SVlad Buslov 		if (neigh_used) {
4230d9f9647SVlad Buslov 			/* release current encap before breaking the loop */
4240d9f9647SVlad Buslov 			mlx5e_encap_put(priv, e);
4250d9f9647SVlad Buslov 			break;
4260d9f9647SVlad Buslov 		}
4270d9f9647SVlad Buslov 	}
4280d9f9647SVlad Buslov 
4290d9f9647SVlad Buslov 	trace_mlx5e_tc_update_neigh_used_value(nhe, neigh_used);
4300d9f9647SVlad Buslov 
4310d9f9647SVlad Buslov 	if (neigh_used) {
4320d9f9647SVlad Buslov 		nhe->reported_lastuse = jiffies;
4330d9f9647SVlad Buslov 
4340d9f9647SVlad Buslov 		/* find the relevant neigh according to the cached device and
4350d9f9647SVlad Buslov 		 * dst ip pair
4360d9f9647SVlad Buslov 		 */
4372221d954SVlad Buslov 		n = neigh_lookup(tbl, &m_neigh->dst_ip, READ_ONCE(nhe->neigh_dev));
4380d9f9647SVlad Buslov 		if (!n)
4390d9f9647SVlad Buslov 			return;
4400d9f9647SVlad Buslov 
4410d9f9647SVlad Buslov 		neigh_event_send(n, NULL);
4420d9f9647SVlad Buslov 		neigh_release(n);
4430d9f9647SVlad Buslov 	}
4440d9f9647SVlad Buslov }
4450d9f9647SVlad Buslov 
4460d9f9647SVlad Buslov static void mlx5e_encap_dealloc(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e)
4470d9f9647SVlad Buslov {
4480d9f9647SVlad Buslov 	WARN_ON(!list_empty(&e->flows));
4490d9f9647SVlad Buslov 
4500d9f9647SVlad Buslov 	if (e->compl_result > 0) {
4510d9f9647SVlad Buslov 		mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
4520d9f9647SVlad Buslov 
4530d9f9647SVlad Buslov 		if (e->flags & MLX5_ENCAP_ENTRY_VALID)
4540d9f9647SVlad Buslov 			mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat);
4550d9f9647SVlad Buslov 	}
4560d9f9647SVlad Buslov 
4570d9f9647SVlad Buslov 	kfree(e->tun_info);
4580d9f9647SVlad Buslov 	kfree(e->encap_header);
4590d9f9647SVlad Buslov 	kfree_rcu(e, rcu);
4600d9f9647SVlad Buslov }
4610d9f9647SVlad Buslov 
4620d9f9647SVlad Buslov static void mlx5e_decap_dealloc(struct mlx5e_priv *priv,
4630d9f9647SVlad Buslov 				struct mlx5e_decap_entry *d)
4640d9f9647SVlad Buslov {
4650d9f9647SVlad Buslov 	WARN_ON(!list_empty(&d->flows));
4660d9f9647SVlad Buslov 
4670d9f9647SVlad Buslov 	if (!d->compl_result)
4680d9f9647SVlad Buslov 		mlx5_packet_reformat_dealloc(priv->mdev, d->pkt_reformat);
4690d9f9647SVlad Buslov 
4700d9f9647SVlad Buslov 	kfree_rcu(d, rcu);
4710d9f9647SVlad Buslov }
4720d9f9647SVlad Buslov 
4730d9f9647SVlad Buslov void mlx5e_encap_put(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e)
4740d9f9647SVlad Buslov {
4750d9f9647SVlad Buslov 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4760d9f9647SVlad Buslov 
4770d9f9647SVlad Buslov 	if (!refcount_dec_and_mutex_lock(&e->refcnt, &esw->offloads.encap_tbl_lock))
4780d9f9647SVlad Buslov 		return;
479777bb800SVlad Buslov 	list_del(&e->route_list);
4800d9f9647SVlad Buslov 	hash_del_rcu(&e->encap_hlist);
4810d9f9647SVlad Buslov 	mutex_unlock(&esw->offloads.encap_tbl_lock);
4820d9f9647SVlad Buslov 
4830d9f9647SVlad Buslov 	mlx5e_encap_dealloc(priv, e);
4840d9f9647SVlad Buslov }
4850d9f9647SVlad Buslov 
4860d9f9647SVlad Buslov static void mlx5e_decap_put(struct mlx5e_priv *priv, struct mlx5e_decap_entry *d)
4870d9f9647SVlad Buslov {
4880d9f9647SVlad Buslov 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4890d9f9647SVlad Buslov 
4900d9f9647SVlad Buslov 	if (!refcount_dec_and_mutex_lock(&d->refcnt, &esw->offloads.decap_tbl_lock))
4910d9f9647SVlad Buslov 		return;
4920d9f9647SVlad Buslov 	hash_del_rcu(&d->hlist);
4930d9f9647SVlad Buslov 	mutex_unlock(&esw->offloads.decap_tbl_lock);
4940d9f9647SVlad Buslov 
4950d9f9647SVlad Buslov 	mlx5e_decap_dealloc(priv, d);
4960d9f9647SVlad Buslov }
4970d9f9647SVlad Buslov 
498777bb800SVlad Buslov static void mlx5e_detach_encap_route(struct mlx5e_priv *priv,
499777bb800SVlad Buslov 				     struct mlx5e_tc_flow *flow,
500777bb800SVlad Buslov 				     int out_index);
501777bb800SVlad Buslov 
5020d9f9647SVlad Buslov void mlx5e_detach_encap(struct mlx5e_priv *priv,
503c118ebc9SRoi Dayan 			struct mlx5e_tc_flow *flow,
504c118ebc9SRoi Dayan 			struct mlx5_flow_attr *attr,
505c118ebc9SRoi Dayan 			int out_index)
5060d9f9647SVlad Buslov {
5070d9f9647SVlad Buslov 	struct mlx5e_encap_entry *e = flow->encaps[out_index].e;
5080d9f9647SVlad Buslov 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
5090d9f9647SVlad Buslov 
510*8300f225SRoi Dayan 	if (!mlx5e_is_eswitch_flow(flow))
511*8300f225SRoi Dayan 		return;
512*8300f225SRoi Dayan 
513c118ebc9SRoi Dayan 	if (attr->esw_attr->dests[out_index].flags &
514777bb800SVlad Buslov 	    MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
515777bb800SVlad Buslov 		mlx5e_detach_encap_route(priv, flow, out_index);
516777bb800SVlad Buslov 
5170d9f9647SVlad Buslov 	/* flow wasn't fully initialized */
5180d9f9647SVlad Buslov 	if (!e)
5190d9f9647SVlad Buslov 		return;
5200d9f9647SVlad Buslov 
5210d9f9647SVlad Buslov 	mutex_lock(&esw->offloads.encap_tbl_lock);
5220d9f9647SVlad Buslov 	list_del(&flow->encaps[out_index].list);
5230d9f9647SVlad Buslov 	flow->encaps[out_index].e = NULL;
5240d9f9647SVlad Buslov 	if (!refcount_dec_and_test(&e->refcnt)) {
5250d9f9647SVlad Buslov 		mutex_unlock(&esw->offloads.encap_tbl_lock);
5260d9f9647SVlad Buslov 		return;
5270d9f9647SVlad Buslov 	}
528777bb800SVlad Buslov 	list_del(&e->route_list);
5290d9f9647SVlad Buslov 	hash_del_rcu(&e->encap_hlist);
5300d9f9647SVlad Buslov 	mutex_unlock(&esw->offloads.encap_tbl_lock);
5310d9f9647SVlad Buslov 
5320d9f9647SVlad Buslov 	mlx5e_encap_dealloc(priv, e);
5330d9f9647SVlad Buslov }
5340d9f9647SVlad Buslov 
5350d9f9647SVlad Buslov void mlx5e_detach_decap(struct mlx5e_priv *priv,
5360d9f9647SVlad Buslov 			struct mlx5e_tc_flow *flow)
5370d9f9647SVlad Buslov {
5380d9f9647SVlad Buslov 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
5390d9f9647SVlad Buslov 	struct mlx5e_decap_entry *d = flow->decap_reformat;
5400d9f9647SVlad Buslov 
5410d9f9647SVlad Buslov 	if (!d)
5420d9f9647SVlad Buslov 		return;
5430d9f9647SVlad Buslov 
5440d9f9647SVlad Buslov 	mutex_lock(&esw->offloads.decap_tbl_lock);
5450d9f9647SVlad Buslov 	list_del(&flow->l3_to_l2_reformat);
5460d9f9647SVlad Buslov 	flow->decap_reformat = NULL;
5470d9f9647SVlad Buslov 
5480d9f9647SVlad Buslov 	if (!refcount_dec_and_test(&d->refcnt)) {
5490d9f9647SVlad Buslov 		mutex_unlock(&esw->offloads.decap_tbl_lock);
5500d9f9647SVlad Buslov 		return;
5510d9f9647SVlad Buslov 	}
5520d9f9647SVlad Buslov 	hash_del_rcu(&d->hlist);
5530d9f9647SVlad Buslov 	mutex_unlock(&esw->offloads.decap_tbl_lock);
5540d9f9647SVlad Buslov 
5550d9f9647SVlad Buslov 	mlx5e_decap_dealloc(priv, d);
5560d9f9647SVlad Buslov }
5570d9f9647SVlad Buslov 
558929a2fadSDima Chumak bool mlx5e_tc_tun_encap_info_equal_generic(struct mlx5e_encap_key *a,
559929a2fadSDima Chumak 					   struct mlx5e_encap_key *b)
5600d9f9647SVlad Buslov {
561929a2fadSDima Chumak 	return memcmp(a->ip_tun_key, b->ip_tun_key, sizeof(*a->ip_tun_key)) == 0 &&
562929a2fadSDima Chumak 		a->tc_tunnel->tunnel_type == b->tc_tunnel->tunnel_type;
5630d9f9647SVlad Buslov }
5640d9f9647SVlad Buslov 
5650d9f9647SVlad Buslov static int cmp_decap_info(struct mlx5e_decap_key *a,
5660d9f9647SVlad Buslov 			  struct mlx5e_decap_key *b)
5670d9f9647SVlad Buslov {
5680d9f9647SVlad Buslov 	return memcmp(&a->key, &b->key, sizeof(b->key));
5690d9f9647SVlad Buslov }
5700d9f9647SVlad Buslov 
571929a2fadSDima Chumak static int hash_encap_info(struct mlx5e_encap_key *key)
5720d9f9647SVlad Buslov {
5730d9f9647SVlad Buslov 	return jhash(key->ip_tun_key, sizeof(*key->ip_tun_key),
5740d9f9647SVlad Buslov 		     key->tc_tunnel->tunnel_type);
5750d9f9647SVlad Buslov }
5760d9f9647SVlad Buslov 
5770d9f9647SVlad Buslov static int hash_decap_info(struct mlx5e_decap_key *key)
5780d9f9647SVlad Buslov {
5790d9f9647SVlad Buslov 	return jhash(&key->key, sizeof(key->key), 0);
5800d9f9647SVlad Buslov }
5810d9f9647SVlad Buslov 
5820d9f9647SVlad Buslov bool mlx5e_encap_take(struct mlx5e_encap_entry *e)
5830d9f9647SVlad Buslov {
5840d9f9647SVlad Buslov 	return refcount_inc_not_zero(&e->refcnt);
5850d9f9647SVlad Buslov }
5860d9f9647SVlad Buslov 
5870d9f9647SVlad Buslov static bool mlx5e_decap_take(struct mlx5e_decap_entry *e)
5880d9f9647SVlad Buslov {
5890d9f9647SVlad Buslov 	return refcount_inc_not_zero(&e->refcnt);
5900d9f9647SVlad Buslov }
5910d9f9647SVlad Buslov 
5920d9f9647SVlad Buslov static struct mlx5e_encap_entry *
593929a2fadSDima Chumak mlx5e_encap_get(struct mlx5e_priv *priv, struct mlx5e_encap_key *key,
5940d9f9647SVlad Buslov 		uintptr_t hash_key)
5950d9f9647SVlad Buslov {
5960d9f9647SVlad Buslov 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
597929a2fadSDima Chumak 	struct mlx5e_encap_key e_key;
5980d9f9647SVlad Buslov 	struct mlx5e_encap_entry *e;
5990d9f9647SVlad Buslov 
6000d9f9647SVlad Buslov 	hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
6010d9f9647SVlad Buslov 				   encap_hlist, hash_key) {
6020d9f9647SVlad Buslov 		e_key.ip_tun_key = &e->tun_info->key;
6030d9f9647SVlad Buslov 		e_key.tc_tunnel = e->tunnel;
604929a2fadSDima Chumak 		if (e->tunnel->encap_info_equal(&e_key, key) &&
6050d9f9647SVlad Buslov 		    mlx5e_encap_take(e))
6060d9f9647SVlad Buslov 			return e;
6070d9f9647SVlad Buslov 	}
6080d9f9647SVlad Buslov 
6090d9f9647SVlad Buslov 	return NULL;
6100d9f9647SVlad Buslov }
6110d9f9647SVlad Buslov 
6120d9f9647SVlad Buslov static struct mlx5e_decap_entry *
6130d9f9647SVlad Buslov mlx5e_decap_get(struct mlx5e_priv *priv, struct mlx5e_decap_key *key,
6140d9f9647SVlad Buslov 		uintptr_t hash_key)
6150d9f9647SVlad Buslov {
6160d9f9647SVlad Buslov 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
6170d9f9647SVlad Buslov 	struct mlx5e_decap_key r_key;
6180d9f9647SVlad Buslov 	struct mlx5e_decap_entry *e;
6190d9f9647SVlad Buslov 
6200d9f9647SVlad Buslov 	hash_for_each_possible_rcu(esw->offloads.decap_tbl, e,
6210d9f9647SVlad Buslov 				   hlist, hash_key) {
6220d9f9647SVlad Buslov 		r_key = e->key;
6230d9f9647SVlad Buslov 		if (!cmp_decap_info(&r_key, key) &&
6240d9f9647SVlad Buslov 		    mlx5e_decap_take(e))
6250d9f9647SVlad Buslov 			return e;
6260d9f9647SVlad Buslov 	}
6270d9f9647SVlad Buslov 	return NULL;
6280d9f9647SVlad Buslov }
6290d9f9647SVlad Buslov 
6300d9f9647SVlad Buslov struct ip_tunnel_info *mlx5e_dup_tun_info(const struct ip_tunnel_info *tun_info)
6310d9f9647SVlad Buslov {
6320d9f9647SVlad Buslov 	size_t tun_size = sizeof(*tun_info) + tun_info->options_len;
6330d9f9647SVlad Buslov 
6340d9f9647SVlad Buslov 	return kmemdup(tun_info, tun_size, GFP_KERNEL);
6350d9f9647SVlad Buslov }
6360d9f9647SVlad Buslov 
6370d9f9647SVlad Buslov static bool is_duplicated_encap_entry(struct mlx5e_priv *priv,
6380d9f9647SVlad Buslov 				      struct mlx5e_tc_flow *flow,
6390d9f9647SVlad Buslov 				      int out_index,
6400d9f9647SVlad Buslov 				      struct mlx5e_encap_entry *e,
6410d9f9647SVlad Buslov 				      struct netlink_ext_ack *extack)
6420d9f9647SVlad Buslov {
6430d9f9647SVlad Buslov 	int i;
6440d9f9647SVlad Buslov 
6450d9f9647SVlad Buslov 	for (i = 0; i < out_index; i++) {
6460d9f9647SVlad Buslov 		if (flow->encaps[i].e != e)
6470d9f9647SVlad Buslov 			continue;
6480d9f9647SVlad Buslov 		NL_SET_ERR_MSG_MOD(extack, "can't duplicate encap action");
6490d9f9647SVlad Buslov 		netdev_err(priv->netdev, "can't duplicate encap action\n");
6500d9f9647SVlad Buslov 		return true;
6510d9f9647SVlad Buslov 	}
6520d9f9647SVlad Buslov 
6530d9f9647SVlad Buslov 	return false;
6540d9f9647SVlad Buslov }
6550d9f9647SVlad Buslov 
6560d9f9647SVlad Buslov static int mlx5e_set_vf_tunnel(struct mlx5_eswitch *esw,
6570d9f9647SVlad Buslov 			       struct mlx5_flow_attr *attr,
6580d9f9647SVlad Buslov 			       struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
6590d9f9647SVlad Buslov 			       struct net_device *out_dev,
6600d9f9647SVlad Buslov 			       int route_dev_ifindex,
6610d9f9647SVlad Buslov 			       int out_index)
6620d9f9647SVlad Buslov {
6630d9f9647SVlad Buslov 	struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
6640d9f9647SVlad Buslov 	struct net_device *route_dev;
6650d9f9647SVlad Buslov 	u16 vport_num;
6660d9f9647SVlad Buslov 	int err = 0;
6670d9f9647SVlad Buslov 	u32 data;
6680d9f9647SVlad Buslov 
6690d9f9647SVlad Buslov 	route_dev = dev_get_by_index(dev_net(out_dev), route_dev_ifindex);
6700d9f9647SVlad Buslov 
6710d9f9647SVlad Buslov 	if (!route_dev || route_dev->netdev_ops != &mlx5e_netdev_ops ||
6720d9f9647SVlad Buslov 	    !mlx5e_tc_is_vf_tunnel(out_dev, route_dev))
6730d9f9647SVlad Buslov 		goto out;
6740d9f9647SVlad Buslov 
6750d9f9647SVlad Buslov 	err = mlx5e_tc_query_route_vport(out_dev, route_dev, &vport_num);
6760d9f9647SVlad Buslov 	if (err)
6770d9f9647SVlad Buslov 		goto out;
6780d9f9647SVlad Buslov 
6790d9f9647SVlad Buslov 	attr->dest_chain = 0;
6800d9f9647SVlad Buslov 	attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
6810d9f9647SVlad Buslov 	esw_attr->dests[out_index].flags |= MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE;
6820d9f9647SVlad Buslov 	data = mlx5_eswitch_get_vport_metadata_for_set(esw_attr->in_mdev->priv.eswitch,
6830d9f9647SVlad Buslov 						       vport_num);
6848914add2SVlad Buslov 	err = mlx5e_tc_match_to_reg_set_and_get_id(esw->dev, mod_hdr_acts,
6858914add2SVlad Buslov 						   MLX5_FLOW_NAMESPACE_FDB,
6868914add2SVlad Buslov 						   VPORT_TO_REG, data);
6878914add2SVlad Buslov 	if (err >= 0) {
6888914add2SVlad Buslov 		esw_attr->dests[out_index].src_port_rewrite_act_id = err;
6898914add2SVlad Buslov 		err = 0;
6908914add2SVlad Buslov 	}
6910d9f9647SVlad Buslov 
6920d9f9647SVlad Buslov out:
6930d9f9647SVlad Buslov 	if (route_dev)
6940d9f9647SVlad Buslov 		dev_put(route_dev);
6950d9f9647SVlad Buslov 	return err;
6960d9f9647SVlad Buslov }
6970d9f9647SVlad Buslov 
6988914add2SVlad Buslov static int mlx5e_update_vf_tunnel(struct mlx5_eswitch *esw,
6998914add2SVlad Buslov 				  struct mlx5_esw_flow_attr *attr,
7008914add2SVlad Buslov 				  struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
7018914add2SVlad Buslov 				  struct net_device *out_dev,
7028914add2SVlad Buslov 				  int route_dev_ifindex,
7038914add2SVlad Buslov 				  int out_index)
7048914add2SVlad Buslov {
7058914add2SVlad Buslov 	int act_id = attr->dests[out_index].src_port_rewrite_act_id;
7068914add2SVlad Buslov 	struct net_device *route_dev;
7078914add2SVlad Buslov 	u16 vport_num;
7088914add2SVlad Buslov 	int err = 0;
7098914add2SVlad Buslov 	u32 data;
7108914add2SVlad Buslov 
7118914add2SVlad Buslov 	route_dev = dev_get_by_index(dev_net(out_dev), route_dev_ifindex);
7128914add2SVlad Buslov 
7138914add2SVlad Buslov 	if (!route_dev || route_dev->netdev_ops != &mlx5e_netdev_ops ||
7148914add2SVlad Buslov 	    !mlx5e_tc_is_vf_tunnel(out_dev, route_dev)) {
7158914add2SVlad Buslov 		err = -ENODEV;
7168914add2SVlad Buslov 		goto out;
7178914add2SVlad Buslov 	}
7188914add2SVlad Buslov 
7198914add2SVlad Buslov 	err = mlx5e_tc_query_route_vport(out_dev, route_dev, &vport_num);
7208914add2SVlad Buslov 	if (err)
7218914add2SVlad Buslov 		goto out;
7228914add2SVlad Buslov 
7238914add2SVlad Buslov 	data = mlx5_eswitch_get_vport_metadata_for_set(attr->in_mdev->priv.eswitch,
7248914add2SVlad Buslov 						       vport_num);
7258914add2SVlad Buslov 	mlx5e_tc_match_to_reg_mod_hdr_change(esw->dev, mod_hdr_acts, VPORT_TO_REG, act_id, data);
7268914add2SVlad Buslov 
7278914add2SVlad Buslov out:
7288914add2SVlad Buslov 	if (route_dev)
7298914add2SVlad Buslov 		dev_put(route_dev);
7308914add2SVlad Buslov 	return err;
7318914add2SVlad Buslov }
7328914add2SVlad Buslov 
7338914add2SVlad Buslov static unsigned int mlx5e_route_tbl_get_last_update(struct mlx5e_priv *priv)
7348914add2SVlad Buslov {
7358914add2SVlad Buslov 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
7368914add2SVlad Buslov 	struct mlx5_rep_uplink_priv *uplink_priv;
7378914add2SVlad Buslov 	struct mlx5e_rep_priv *uplink_rpriv;
7388914add2SVlad Buslov 	struct mlx5e_tc_tun_encap *encap;
7398914add2SVlad Buslov 	unsigned int ret;
7408914add2SVlad Buslov 
7418914add2SVlad Buslov 	uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
7428914add2SVlad Buslov 	uplink_priv = &uplink_rpriv->uplink_priv;
7438914add2SVlad Buslov 	encap = uplink_priv->encap;
7448914add2SVlad Buslov 
7458914add2SVlad Buslov 	spin_lock_bh(&encap->route_lock);
7468914add2SVlad Buslov 	ret = encap->route_tbl_last_update;
7478914add2SVlad Buslov 	spin_unlock_bh(&encap->route_lock);
7488914add2SVlad Buslov 	return ret;
7498914add2SVlad Buslov }
7508914add2SVlad Buslov 
751777bb800SVlad Buslov static int mlx5e_attach_encap_route(struct mlx5e_priv *priv,
752777bb800SVlad Buslov 				    struct mlx5e_tc_flow *flow,
753c118ebc9SRoi Dayan 				    struct mlx5_flow_attr *attr,
754777bb800SVlad Buslov 				    struct mlx5e_encap_entry *e,
755777bb800SVlad Buslov 				    bool new_encap_entry,
7568914add2SVlad Buslov 				    unsigned long tbl_time_before,
757777bb800SVlad Buslov 				    int out_index);
758777bb800SVlad Buslov 
7590d9f9647SVlad Buslov int mlx5e_attach_encap(struct mlx5e_priv *priv,
7600d9f9647SVlad Buslov 		       struct mlx5e_tc_flow *flow,
761c118ebc9SRoi Dayan 		       struct mlx5_flow_attr *attr,
7620d9f9647SVlad Buslov 		       struct net_device *mirred_dev,
7630d9f9647SVlad Buslov 		       int out_index,
7640d9f9647SVlad Buslov 		       struct netlink_ext_ack *extack,
7650d9f9647SVlad Buslov 		       struct net_device **encap_dev,
7660d9f9647SVlad Buslov 		       bool *encap_valid)
7670d9f9647SVlad Buslov {
7680d9f9647SVlad Buslov 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
7690d9f9647SVlad Buslov 	struct mlx5e_tc_flow_parse_attr *parse_attr;
7700d9f9647SVlad Buslov 	const struct ip_tunnel_info *tun_info;
7718914add2SVlad Buslov 	unsigned long tbl_time_before = 0;
7720d9f9647SVlad Buslov 	struct mlx5e_encap_entry *e;
773929a2fadSDima Chumak 	struct mlx5e_encap_key key;
774777bb800SVlad Buslov 	bool entry_created = false;
7750d9f9647SVlad Buslov 	unsigned short family;
7760d9f9647SVlad Buslov 	uintptr_t hash_key;
7770d9f9647SVlad Buslov 	int err = 0;
7780d9f9647SVlad Buslov 
7790d9f9647SVlad Buslov 	parse_attr = attr->parse_attr;
7800d9f9647SVlad Buslov 	tun_info = parse_attr->tun_info[out_index];
7810d9f9647SVlad Buslov 	family = ip_tunnel_info_af(tun_info);
7820d9f9647SVlad Buslov 	key.ip_tun_key = &tun_info->key;
7830d9f9647SVlad Buslov 	key.tc_tunnel = mlx5e_get_tc_tun(mirred_dev);
7840d9f9647SVlad Buslov 	if (!key.tc_tunnel) {
7850d9f9647SVlad Buslov 		NL_SET_ERR_MSG_MOD(extack, "Unsupported tunnel");
7860d9f9647SVlad Buslov 		return -EOPNOTSUPP;
7870d9f9647SVlad Buslov 	}
7880d9f9647SVlad Buslov 
7890d9f9647SVlad Buslov 	hash_key = hash_encap_info(&key);
7900d9f9647SVlad Buslov 
7910d9f9647SVlad Buslov 	mutex_lock(&esw->offloads.encap_tbl_lock);
7920d9f9647SVlad Buslov 	e = mlx5e_encap_get(priv, &key, hash_key);
7930d9f9647SVlad Buslov 
7940d9f9647SVlad Buslov 	/* must verify if encap is valid or not */
7950d9f9647SVlad Buslov 	if (e) {
7960d9f9647SVlad Buslov 		/* Check that entry was not already attached to this flow */
7970d9f9647SVlad Buslov 		if (is_duplicated_encap_entry(priv, flow, out_index, e, extack)) {
7980d9f9647SVlad Buslov 			err = -EOPNOTSUPP;
7990d9f9647SVlad Buslov 			goto out_err;
8000d9f9647SVlad Buslov 		}
8010d9f9647SVlad Buslov 
8020d9f9647SVlad Buslov 		mutex_unlock(&esw->offloads.encap_tbl_lock);
8030d9f9647SVlad Buslov 		wait_for_completion(&e->res_ready);
8040d9f9647SVlad Buslov 
8050d9f9647SVlad Buslov 		/* Protect against concurrent neigh update. */
8060d9f9647SVlad Buslov 		mutex_lock(&esw->offloads.encap_tbl_lock);
8070d9f9647SVlad Buslov 		if (e->compl_result < 0) {
8080d9f9647SVlad Buslov 			err = -EREMOTEIO;
8090d9f9647SVlad Buslov 			goto out_err;
8100d9f9647SVlad Buslov 		}
8110d9f9647SVlad Buslov 		goto attach_flow;
8120d9f9647SVlad Buslov 	}
8130d9f9647SVlad Buslov 
8140d9f9647SVlad Buslov 	e = kzalloc(sizeof(*e), GFP_KERNEL);
8150d9f9647SVlad Buslov 	if (!e) {
8160d9f9647SVlad Buslov 		err = -ENOMEM;
8170d9f9647SVlad Buslov 		goto out_err;
8180d9f9647SVlad Buslov 	}
8190d9f9647SVlad Buslov 
8200d9f9647SVlad Buslov 	refcount_set(&e->refcnt, 1);
8210d9f9647SVlad Buslov 	init_completion(&e->res_ready);
822777bb800SVlad Buslov 	entry_created = true;
823777bb800SVlad Buslov 	INIT_LIST_HEAD(&e->route_list);
8240d9f9647SVlad Buslov 
8250d9f9647SVlad Buslov 	tun_info = mlx5e_dup_tun_info(tun_info);
8260d9f9647SVlad Buslov 	if (!tun_info) {
8270d9f9647SVlad Buslov 		err = -ENOMEM;
8280d9f9647SVlad Buslov 		goto out_err_init;
8290d9f9647SVlad Buslov 	}
8300d9f9647SVlad Buslov 	e->tun_info = tun_info;
8310d9f9647SVlad Buslov 	err = mlx5e_tc_tun_init_encap_attr(mirred_dev, priv, e, extack);
8320d9f9647SVlad Buslov 	if (err)
8330d9f9647SVlad Buslov 		goto out_err_init;
8340d9f9647SVlad Buslov 
8350d9f9647SVlad Buslov 	INIT_LIST_HEAD(&e->flows);
8360d9f9647SVlad Buslov 	hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
8378914add2SVlad Buslov 	tbl_time_before = mlx5e_route_tbl_get_last_update(priv);
8380d9f9647SVlad Buslov 	mutex_unlock(&esw->offloads.encap_tbl_lock);
8390d9f9647SVlad Buslov 
8400d9f9647SVlad Buslov 	if (family == AF_INET)
8410d9f9647SVlad Buslov 		err = mlx5e_tc_tun_create_header_ipv4(priv, mirred_dev, e);
8420d9f9647SVlad Buslov 	else if (family == AF_INET6)
8430d9f9647SVlad Buslov 		err = mlx5e_tc_tun_create_header_ipv6(priv, mirred_dev, e);
8440d9f9647SVlad Buslov 
8450d9f9647SVlad Buslov 	/* Protect against concurrent neigh update. */
8460d9f9647SVlad Buslov 	mutex_lock(&esw->offloads.encap_tbl_lock);
8470d9f9647SVlad Buslov 	complete_all(&e->res_ready);
8480d9f9647SVlad Buslov 	if (err) {
8490d9f9647SVlad Buslov 		e->compl_result = err;
8500d9f9647SVlad Buslov 		goto out_err;
8510d9f9647SVlad Buslov 	}
8520d9f9647SVlad Buslov 	e->compl_result = 1;
8530d9f9647SVlad Buslov 
8540d9f9647SVlad Buslov attach_flow:
855c118ebc9SRoi Dayan 	err = mlx5e_attach_encap_route(priv, flow, attr, e, entry_created,
856c118ebc9SRoi Dayan 				       tbl_time_before, out_index);
8570d9f9647SVlad Buslov 	if (err)
8580d9f9647SVlad Buslov 		goto out_err;
8590d9f9647SVlad Buslov 
860100ad4e2SAriel Levkovich 	err = mlx5e_set_int_port_tunnel(priv, attr, e, out_index);
861100ad4e2SAriel Levkovich 	if (err == -EOPNOTSUPP) {
862100ad4e2SAriel Levkovich 		/* If device doesn't support int port offload,
863100ad4e2SAriel Levkovich 		 * redirect to uplink vport.
864100ad4e2SAriel Levkovich 		 */
865100ad4e2SAriel Levkovich 		mlx5_core_dbg(priv->mdev, "attaching int port as encap dev not supported, using uplink\n");
866100ad4e2SAriel Levkovich 		err = 0;
867100ad4e2SAriel Levkovich 	} else if (err) {
868100ad4e2SAriel Levkovich 		goto out_err;
869100ad4e2SAriel Levkovich 	}
870100ad4e2SAriel Levkovich 
8710d9f9647SVlad Buslov 	flow->encaps[out_index].e = e;
8720d9f9647SVlad Buslov 	list_add(&flow->encaps[out_index].list, &e->flows);
8730d9f9647SVlad Buslov 	flow->encaps[out_index].index = out_index;
8740d9f9647SVlad Buslov 	*encap_dev = e->out_dev;
8750d9f9647SVlad Buslov 	if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
8760d9f9647SVlad Buslov 		attr->esw_attr->dests[out_index].pkt_reformat = e->pkt_reformat;
8770d9f9647SVlad Buslov 		attr->esw_attr->dests[out_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
8780d9f9647SVlad Buslov 		*encap_valid = true;
8790d9f9647SVlad Buslov 	} else {
8800d9f9647SVlad Buslov 		*encap_valid = false;
8810d9f9647SVlad Buslov 	}
8820d9f9647SVlad Buslov 	mutex_unlock(&esw->offloads.encap_tbl_lock);
8830d9f9647SVlad Buslov 
8840d9f9647SVlad Buslov 	return err;
8850d9f9647SVlad Buslov 
8860d9f9647SVlad Buslov out_err:
8870d9f9647SVlad Buslov 	mutex_unlock(&esw->offloads.encap_tbl_lock);
8880d9f9647SVlad Buslov 	if (e)
8890d9f9647SVlad Buslov 		mlx5e_encap_put(priv, e);
8900d9f9647SVlad Buslov 	return err;
8910d9f9647SVlad Buslov 
8920d9f9647SVlad Buslov out_err_init:
8930d9f9647SVlad Buslov 	mutex_unlock(&esw->offloads.encap_tbl_lock);
8940d9f9647SVlad Buslov 	kfree(tun_info);
8950d9f9647SVlad Buslov 	kfree(e);
8960d9f9647SVlad Buslov 	return err;
8970d9f9647SVlad Buslov }
8980d9f9647SVlad Buslov 
8990d9f9647SVlad Buslov int mlx5e_attach_decap(struct mlx5e_priv *priv,
9000d9f9647SVlad Buslov 		       struct mlx5e_tc_flow *flow,
9010d9f9647SVlad Buslov 		       struct netlink_ext_ack *extack)
9020d9f9647SVlad Buslov {
9030d9f9647SVlad Buslov 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
9040d9f9647SVlad Buslov 	struct mlx5_esw_flow_attr *attr = flow->attr->esw_attr;
9053f3f05abSYevgeny Kliteynik 	struct mlx5_pkt_reformat_params reformat_params;
9060d9f9647SVlad Buslov 	struct mlx5e_tc_flow_parse_attr *parse_attr;
9070d9f9647SVlad Buslov 	struct mlx5e_decap_entry *d;
9080d9f9647SVlad Buslov 	struct mlx5e_decap_key key;
9090d9f9647SVlad Buslov 	uintptr_t hash_key;
9100d9f9647SVlad Buslov 	int err = 0;
9110d9f9647SVlad Buslov 
9120d9f9647SVlad Buslov 	parse_attr = flow->attr->parse_attr;
9130d9f9647SVlad Buslov 	if (sizeof(parse_attr->eth) > MLX5_CAP_ESW(priv->mdev, max_encap_header_size)) {
9140d9f9647SVlad Buslov 		NL_SET_ERR_MSG_MOD(extack,
9150d9f9647SVlad Buslov 				   "encap header larger than max supported");
9160d9f9647SVlad Buslov 		return -EOPNOTSUPP;
9170d9f9647SVlad Buslov 	}
9180d9f9647SVlad Buslov 
9190d9f9647SVlad Buslov 	key.key = parse_attr->eth;
9200d9f9647SVlad Buslov 	hash_key = hash_decap_info(&key);
9210d9f9647SVlad Buslov 	mutex_lock(&esw->offloads.decap_tbl_lock);
9220d9f9647SVlad Buslov 	d = mlx5e_decap_get(priv, &key, hash_key);
9230d9f9647SVlad Buslov 	if (d) {
9240d9f9647SVlad Buslov 		mutex_unlock(&esw->offloads.decap_tbl_lock);
9250d9f9647SVlad Buslov 		wait_for_completion(&d->res_ready);
9260d9f9647SVlad Buslov 		mutex_lock(&esw->offloads.decap_tbl_lock);
9270d9f9647SVlad Buslov 		if (d->compl_result) {
9280d9f9647SVlad Buslov 			err = -EREMOTEIO;
9290d9f9647SVlad Buslov 			goto out_free;
9300d9f9647SVlad Buslov 		}
9310d9f9647SVlad Buslov 		goto found;
9320d9f9647SVlad Buslov 	}
9330d9f9647SVlad Buslov 
9340d9f9647SVlad Buslov 	d = kzalloc(sizeof(*d), GFP_KERNEL);
9350d9f9647SVlad Buslov 	if (!d) {
9360d9f9647SVlad Buslov 		err = -ENOMEM;
9370d9f9647SVlad Buslov 		goto out_err;
9380d9f9647SVlad Buslov 	}
9390d9f9647SVlad Buslov 
9400d9f9647SVlad Buslov 	d->key = key;
9410d9f9647SVlad Buslov 	refcount_set(&d->refcnt, 1);
9420d9f9647SVlad Buslov 	init_completion(&d->res_ready);
9430d9f9647SVlad Buslov 	INIT_LIST_HEAD(&d->flows);
9440d9f9647SVlad Buslov 	hash_add_rcu(esw->offloads.decap_tbl, &d->hlist, hash_key);
9450d9f9647SVlad Buslov 	mutex_unlock(&esw->offloads.decap_tbl_lock);
9460d9f9647SVlad Buslov 
9473f3f05abSYevgeny Kliteynik 	memset(&reformat_params, 0, sizeof(reformat_params));
9483f3f05abSYevgeny Kliteynik 	reformat_params.type = MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
9493f3f05abSYevgeny Kliteynik 	reformat_params.size = sizeof(parse_attr->eth);
9503f3f05abSYevgeny Kliteynik 	reformat_params.data = &parse_attr->eth;
9510d9f9647SVlad Buslov 	d->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
9523f3f05abSYevgeny Kliteynik 						     &reformat_params,
9530d9f9647SVlad Buslov 						     MLX5_FLOW_NAMESPACE_FDB);
9540d9f9647SVlad Buslov 	if (IS_ERR(d->pkt_reformat)) {
9550d9f9647SVlad Buslov 		err = PTR_ERR(d->pkt_reformat);
9560d9f9647SVlad Buslov 		d->compl_result = err;
9570d9f9647SVlad Buslov 	}
9580d9f9647SVlad Buslov 	mutex_lock(&esw->offloads.decap_tbl_lock);
9590d9f9647SVlad Buslov 	complete_all(&d->res_ready);
9600d9f9647SVlad Buslov 	if (err)
9610d9f9647SVlad Buslov 		goto out_free;
9620d9f9647SVlad Buslov 
9630d9f9647SVlad Buslov found:
9640d9f9647SVlad Buslov 	flow->decap_reformat = d;
9650d9f9647SVlad Buslov 	attr->decap_pkt_reformat = d->pkt_reformat;
9660d9f9647SVlad Buslov 	list_add(&flow->l3_to_l2_reformat, &d->flows);
9670d9f9647SVlad Buslov 	mutex_unlock(&esw->offloads.decap_tbl_lock);
9680d9f9647SVlad Buslov 	return 0;
9690d9f9647SVlad Buslov 
9700d9f9647SVlad Buslov out_free:
9710d9f9647SVlad Buslov 	mutex_unlock(&esw->offloads.decap_tbl_lock);
9720d9f9647SVlad Buslov 	mlx5e_decap_put(priv, d);
9730d9f9647SVlad Buslov 	return err;
9740d9f9647SVlad Buslov 
9750d9f9647SVlad Buslov out_err:
9760d9f9647SVlad Buslov 	mutex_unlock(&esw->offloads.decap_tbl_lock);
9770d9f9647SVlad Buslov 	return err;
9780d9f9647SVlad Buslov }
979777bb800SVlad Buslov 
980777bb800SVlad Buslov static int cmp_route_info(struct mlx5e_route_key *a,
981777bb800SVlad Buslov 			  struct mlx5e_route_key *b)
982777bb800SVlad Buslov {
983777bb800SVlad Buslov 	if (a->ip_version == 4 && b->ip_version == 4)
984777bb800SVlad Buslov 		return memcmp(&a->endpoint_ip.v4, &b->endpoint_ip.v4,
985777bb800SVlad Buslov 			      sizeof(a->endpoint_ip.v4));
986777bb800SVlad Buslov 	else if (a->ip_version == 6 && b->ip_version == 6)
987777bb800SVlad Buslov 		return memcmp(&a->endpoint_ip.v6, &b->endpoint_ip.v6,
988777bb800SVlad Buslov 			      sizeof(a->endpoint_ip.v6));
989777bb800SVlad Buslov 	return 1;
990777bb800SVlad Buslov }
991777bb800SVlad Buslov 
992777bb800SVlad Buslov static u32 hash_route_info(struct mlx5e_route_key *key)
993777bb800SVlad Buslov {
994777bb800SVlad Buslov 	if (key->ip_version == 4)
995777bb800SVlad Buslov 		return jhash(&key->endpoint_ip.v4, sizeof(key->endpoint_ip.v4), 0);
996777bb800SVlad Buslov 	return jhash(&key->endpoint_ip.v6, sizeof(key->endpoint_ip.v6), 0);
997777bb800SVlad Buslov }
998777bb800SVlad Buslov 
9998914add2SVlad Buslov static void mlx5e_route_dealloc(struct mlx5e_priv *priv,
10008914add2SVlad Buslov 				struct mlx5e_route_entry *r)
10018914add2SVlad Buslov {
10028914add2SVlad Buslov 	WARN_ON(!list_empty(&r->decap_flows));
10038914add2SVlad Buslov 	WARN_ON(!list_empty(&r->encap_entries));
10048914add2SVlad Buslov 
10058914add2SVlad Buslov 	kfree_rcu(r, rcu);
10068914add2SVlad Buslov }
10078914add2SVlad Buslov 
10088914add2SVlad Buslov static void mlx5e_route_put(struct mlx5e_priv *priv, struct mlx5e_route_entry *r)
1009777bb800SVlad Buslov {
1010777bb800SVlad Buslov 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
10118914add2SVlad Buslov 
10128914add2SVlad Buslov 	if (!refcount_dec_and_mutex_lock(&r->refcnt, &esw->offloads.encap_tbl_lock))
10138914add2SVlad Buslov 		return;
10148914add2SVlad Buslov 
10158914add2SVlad Buslov 	hash_del_rcu(&r->hlist);
10168914add2SVlad Buslov 	mutex_unlock(&esw->offloads.encap_tbl_lock);
10178914add2SVlad Buslov 
10188914add2SVlad Buslov 	mlx5e_route_dealloc(priv, r);
10198914add2SVlad Buslov }
10208914add2SVlad Buslov 
10218914add2SVlad Buslov static void mlx5e_route_put_locked(struct mlx5e_priv *priv, struct mlx5e_route_entry *r)
10228914add2SVlad Buslov {
10238914add2SVlad Buslov 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
10248914add2SVlad Buslov 
10258914add2SVlad Buslov 	lockdep_assert_held(&esw->offloads.encap_tbl_lock);
10268914add2SVlad Buslov 
10278914add2SVlad Buslov 	if (!refcount_dec_and_test(&r->refcnt))
10288914add2SVlad Buslov 		return;
10298914add2SVlad Buslov 	hash_del_rcu(&r->hlist);
10308914add2SVlad Buslov 	mlx5e_route_dealloc(priv, r);
10318914add2SVlad Buslov }
10328914add2SVlad Buslov 
10338914add2SVlad Buslov static struct mlx5e_route_entry *
10348914add2SVlad Buslov mlx5e_route_get(struct mlx5e_tc_tun_encap *encap, struct mlx5e_route_key *key,
10358914add2SVlad Buslov 		u32 hash_key)
10368914add2SVlad Buslov {
1037777bb800SVlad Buslov 	struct mlx5e_route_key r_key;
1038777bb800SVlad Buslov 	struct mlx5e_route_entry *r;
1039777bb800SVlad Buslov 
10408914add2SVlad Buslov 	hash_for_each_possible(encap->route_tbl, r, hlist, hash_key) {
1041777bb800SVlad Buslov 		r_key = r->key;
1042777bb800SVlad Buslov 		if (!cmp_route_info(&r_key, key) &&
1043777bb800SVlad Buslov 		    refcount_inc_not_zero(&r->refcnt))
1044777bb800SVlad Buslov 			return r;
1045777bb800SVlad Buslov 	}
1046777bb800SVlad Buslov 	return NULL;
1047777bb800SVlad Buslov }
1048777bb800SVlad Buslov 
1049777bb800SVlad Buslov static struct mlx5e_route_entry *
1050777bb800SVlad Buslov mlx5e_route_get_create(struct mlx5e_priv *priv,
10518914add2SVlad Buslov 		       struct mlx5e_route_key *key,
10528914add2SVlad Buslov 		       int tunnel_dev_index,
10538914add2SVlad Buslov 		       unsigned long *route_tbl_change_time)
1054777bb800SVlad Buslov {
1055777bb800SVlad Buslov 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
10568914add2SVlad Buslov 	struct mlx5_rep_uplink_priv *uplink_priv;
10578914add2SVlad Buslov 	struct mlx5e_rep_priv *uplink_rpriv;
10588914add2SVlad Buslov 	struct mlx5e_tc_tun_encap *encap;
1059777bb800SVlad Buslov 	struct mlx5e_route_entry *r;
1060777bb800SVlad Buslov 	u32 hash_key;
1061777bb800SVlad Buslov 
10628914add2SVlad Buslov 	uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
10638914add2SVlad Buslov 	uplink_priv = &uplink_rpriv->uplink_priv;
10648914add2SVlad Buslov 	encap = uplink_priv->encap;
10658914add2SVlad Buslov 
1066777bb800SVlad Buslov 	hash_key = hash_route_info(key);
10678914add2SVlad Buslov 	spin_lock_bh(&encap->route_lock);
10688914add2SVlad Buslov 	r = mlx5e_route_get(encap, key, hash_key);
10698914add2SVlad Buslov 	spin_unlock_bh(&encap->route_lock);
10708914add2SVlad Buslov 	if (r) {
10718914add2SVlad Buslov 		if (!mlx5e_route_entry_valid(r)) {
10728914add2SVlad Buslov 			mlx5e_route_put_locked(priv, r);
10738914add2SVlad Buslov 			return ERR_PTR(-EINVAL);
10748914add2SVlad Buslov 		}
1075777bb800SVlad Buslov 		return r;
10768914add2SVlad Buslov 	}
1077777bb800SVlad Buslov 
1078777bb800SVlad Buslov 	r = kzalloc(sizeof(*r), GFP_KERNEL);
1079777bb800SVlad Buslov 	if (!r)
1080777bb800SVlad Buslov 		return ERR_PTR(-ENOMEM);
1081777bb800SVlad Buslov 
1082777bb800SVlad Buslov 	r->key = *key;
10838914add2SVlad Buslov 	r->flags |= MLX5E_ROUTE_ENTRY_VALID;
10848914add2SVlad Buslov 	r->tunnel_dev_index = tunnel_dev_index;
1085777bb800SVlad Buslov 	refcount_set(&r->refcnt, 1);
1086777bb800SVlad Buslov 	INIT_LIST_HEAD(&r->decap_flows);
1087777bb800SVlad Buslov 	INIT_LIST_HEAD(&r->encap_entries);
10888914add2SVlad Buslov 
10898914add2SVlad Buslov 	spin_lock_bh(&encap->route_lock);
10908914add2SVlad Buslov 	*route_tbl_change_time = encap->route_tbl_last_update;
10918914add2SVlad Buslov 	hash_add(encap->route_tbl, &r->hlist, hash_key);
10928914add2SVlad Buslov 	spin_unlock_bh(&encap->route_lock);
10938914add2SVlad Buslov 
1094777bb800SVlad Buslov 	return r;
1095777bb800SVlad Buslov }
1096777bb800SVlad Buslov 
10978914add2SVlad Buslov static struct mlx5e_route_entry *
10988914add2SVlad Buslov mlx5e_route_lookup_for_update(struct mlx5e_tc_tun_encap *encap, struct mlx5e_route_key *key)
10998914add2SVlad Buslov {
11008914add2SVlad Buslov 	u32 hash_key = hash_route_info(key);
11018914add2SVlad Buslov 	struct mlx5e_route_entry *r;
11028914add2SVlad Buslov 
11038914add2SVlad Buslov 	spin_lock_bh(&encap->route_lock);
11048914add2SVlad Buslov 	encap->route_tbl_last_update = jiffies;
11058914add2SVlad Buslov 	r = mlx5e_route_get(encap, key, hash_key);
11068914add2SVlad Buslov 	spin_unlock_bh(&encap->route_lock);
11078914add2SVlad Buslov 
11088914add2SVlad Buslov 	return r;
11098914add2SVlad Buslov }
11108914add2SVlad Buslov 
11118914add2SVlad Buslov struct mlx5e_tc_fib_event_data {
11128914add2SVlad Buslov 	struct work_struct work;
11138914add2SVlad Buslov 	unsigned long event;
11148914add2SVlad Buslov 	struct mlx5e_route_entry *r;
11158914add2SVlad Buslov 	struct net_device *ul_dev;
11168914add2SVlad Buslov };
11178914add2SVlad Buslov 
11188914add2SVlad Buslov static void mlx5e_tc_fib_event_work(struct work_struct *work);
11198914add2SVlad Buslov static struct mlx5e_tc_fib_event_data *
11208914add2SVlad Buslov mlx5e_tc_init_fib_work(unsigned long event, struct net_device *ul_dev, gfp_t flags)
11218914add2SVlad Buslov {
11228914add2SVlad Buslov 	struct mlx5e_tc_fib_event_data *fib_work;
11238914add2SVlad Buslov 
11248914add2SVlad Buslov 	fib_work = kzalloc(sizeof(*fib_work), flags);
11258914add2SVlad Buslov 	if (WARN_ON(!fib_work))
11268914add2SVlad Buslov 		return NULL;
11278914add2SVlad Buslov 
11288914add2SVlad Buslov 	INIT_WORK(&fib_work->work, mlx5e_tc_fib_event_work);
11298914add2SVlad Buslov 	fib_work->event = event;
11308914add2SVlad Buslov 	fib_work->ul_dev = ul_dev;
11318914add2SVlad Buslov 
11328914add2SVlad Buslov 	return fib_work;
11338914add2SVlad Buslov }
11348914add2SVlad Buslov 
11358914add2SVlad Buslov static int
11368914add2SVlad Buslov mlx5e_route_enqueue_update(struct mlx5e_priv *priv,
11378914add2SVlad Buslov 			   struct mlx5e_route_entry *r,
11388914add2SVlad Buslov 			   unsigned long event)
11398914add2SVlad Buslov {
11408914add2SVlad Buslov 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
11418914add2SVlad Buslov 	struct mlx5e_tc_fib_event_data *fib_work;
11428914add2SVlad Buslov 	struct mlx5e_rep_priv *uplink_rpriv;
11438914add2SVlad Buslov 	struct net_device *ul_dev;
11448914add2SVlad Buslov 
11458914add2SVlad Buslov 	uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
11468914add2SVlad Buslov 	ul_dev = uplink_rpriv->netdev;
11478914add2SVlad Buslov 
11488914add2SVlad Buslov 	fib_work = mlx5e_tc_init_fib_work(event, ul_dev, GFP_KERNEL);
11498914add2SVlad Buslov 	if (!fib_work)
11508914add2SVlad Buslov 		return -ENOMEM;
11518914add2SVlad Buslov 
11528914add2SVlad Buslov 	dev_hold(ul_dev);
11538914add2SVlad Buslov 	refcount_inc(&r->refcnt);
11548914add2SVlad Buslov 	fib_work->r = r;
11558914add2SVlad Buslov 	queue_work(priv->wq, &fib_work->work);
11568914add2SVlad Buslov 
11578914add2SVlad Buslov 	return 0;
11588914add2SVlad Buslov }
11598914add2SVlad Buslov 
1160777bb800SVlad Buslov int mlx5e_attach_decap_route(struct mlx5e_priv *priv,
1161777bb800SVlad Buslov 			     struct mlx5e_tc_flow *flow)
1162777bb800SVlad Buslov {
1163777bb800SVlad Buslov 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
11648914add2SVlad Buslov 	unsigned long tbl_time_before, tbl_time_after;
1165777bb800SVlad Buslov 	struct mlx5e_tc_flow_parse_attr *parse_attr;
1166777bb800SVlad Buslov 	struct mlx5_flow_attr *attr = flow->attr;
1167777bb800SVlad Buslov 	struct mlx5_esw_flow_attr *esw_attr;
1168777bb800SVlad Buslov 	struct mlx5e_route_entry *r;
1169777bb800SVlad Buslov 	struct mlx5e_route_key key;
1170777bb800SVlad Buslov 	int err = 0;
1171777bb800SVlad Buslov 
1172777bb800SVlad Buslov 	esw_attr = attr->esw_attr;
1173777bb800SVlad Buslov 	parse_attr = attr->parse_attr;
1174777bb800SVlad Buslov 	mutex_lock(&esw->offloads.encap_tbl_lock);
1175777bb800SVlad Buslov 	if (!esw_attr->rx_tun_attr)
1176777bb800SVlad Buslov 		goto out;
1177777bb800SVlad Buslov 
11788914add2SVlad Buslov 	tbl_time_before = mlx5e_route_tbl_get_last_update(priv);
11798914add2SVlad Buslov 	tbl_time_after = tbl_time_before;
1180819c319cSChris Mi 	err = mlx5e_tc_tun_route_lookup(priv, &parse_attr->spec, attr, parse_attr->filter_dev);
1181777bb800SVlad Buslov 	if (err || !esw_attr->rx_tun_attr->decap_vport)
1182777bb800SVlad Buslov 		goto out;
1183777bb800SVlad Buslov 
11841e74152eSRoi Dayan 	key.ip_version = attr->tun_ip_version;
1185777bb800SVlad Buslov 	if (key.ip_version == 4)
1186777bb800SVlad Buslov 		key.endpoint_ip.v4 = esw_attr->rx_tun_attr->dst_ip.v4;
1187777bb800SVlad Buslov 	else
1188777bb800SVlad Buslov 		key.endpoint_ip.v6 = esw_attr->rx_tun_attr->dst_ip.v6;
1189777bb800SVlad Buslov 
11908914add2SVlad Buslov 	r = mlx5e_route_get_create(priv, &key, parse_attr->filter_dev->ifindex,
11918914add2SVlad Buslov 				   &tbl_time_after);
1192777bb800SVlad Buslov 	if (IS_ERR(r)) {
1193777bb800SVlad Buslov 		err = PTR_ERR(r);
1194777bb800SVlad Buslov 		goto out;
1195777bb800SVlad Buslov 	}
11968914add2SVlad Buslov 	/* Routing changed concurrently. FIB event handler might have missed new
11978914add2SVlad Buslov 	 * entry, schedule update.
11988914add2SVlad Buslov 	 */
11998914add2SVlad Buslov 	if (tbl_time_before != tbl_time_after) {
12008914add2SVlad Buslov 		err = mlx5e_route_enqueue_update(priv, r, FIB_EVENT_ENTRY_REPLACE);
12018914add2SVlad Buslov 		if (err) {
12028914add2SVlad Buslov 			mlx5e_route_put_locked(priv, r);
12038914add2SVlad Buslov 			goto out;
12048914add2SVlad Buslov 		}
12058914add2SVlad Buslov 	}
1206777bb800SVlad Buslov 
1207777bb800SVlad Buslov 	flow->decap_route = r;
1208777bb800SVlad Buslov 	list_add(&flow->decap_routes, &r->decap_flows);
1209777bb800SVlad Buslov 	mutex_unlock(&esw->offloads.encap_tbl_lock);
1210777bb800SVlad Buslov 	return 0;
1211777bb800SVlad Buslov 
1212777bb800SVlad Buslov out:
1213777bb800SVlad Buslov 	mutex_unlock(&esw->offloads.encap_tbl_lock);
1214777bb800SVlad Buslov 	return err;
1215777bb800SVlad Buslov }
1216777bb800SVlad Buslov 
1217777bb800SVlad Buslov static int mlx5e_attach_encap_route(struct mlx5e_priv *priv,
1218777bb800SVlad Buslov 				    struct mlx5e_tc_flow *flow,
1219c118ebc9SRoi Dayan 				    struct mlx5_flow_attr *attr,
1220777bb800SVlad Buslov 				    struct mlx5e_encap_entry *e,
1221777bb800SVlad Buslov 				    bool new_encap_entry,
12228914add2SVlad Buslov 				    unsigned long tbl_time_before,
1223777bb800SVlad Buslov 				    int out_index)
1224777bb800SVlad Buslov {
1225777bb800SVlad Buslov 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
12268914add2SVlad Buslov 	unsigned long tbl_time_after = tbl_time_before;
1227777bb800SVlad Buslov 	struct mlx5e_tc_flow_parse_attr *parse_attr;
1228777bb800SVlad Buslov 	const struct ip_tunnel_info *tun_info;
1229777bb800SVlad Buslov 	struct mlx5_esw_flow_attr *esw_attr;
1230777bb800SVlad Buslov 	struct mlx5e_route_entry *r;
1231777bb800SVlad Buslov 	struct mlx5e_route_key key;
1232777bb800SVlad Buslov 	unsigned short family;
1233777bb800SVlad Buslov 	int err = 0;
1234777bb800SVlad Buslov 
1235777bb800SVlad Buslov 	esw_attr = attr->esw_attr;
1236777bb800SVlad Buslov 	parse_attr = attr->parse_attr;
1237777bb800SVlad Buslov 	tun_info = parse_attr->tun_info[out_index];
1238777bb800SVlad Buslov 	family = ip_tunnel_info_af(tun_info);
1239777bb800SVlad Buslov 
1240777bb800SVlad Buslov 	if (family == AF_INET) {
1241777bb800SVlad Buslov 		key.endpoint_ip.v4 = tun_info->key.u.ipv4.src;
1242777bb800SVlad Buslov 		key.ip_version = 4;
1243777bb800SVlad Buslov 	} else if (family == AF_INET6) {
1244777bb800SVlad Buslov 		key.endpoint_ip.v6 = tun_info->key.u.ipv6.src;
1245777bb800SVlad Buslov 		key.ip_version = 6;
1246777bb800SVlad Buslov 	}
1247777bb800SVlad Buslov 
1248777bb800SVlad Buslov 	err = mlx5e_set_vf_tunnel(esw, attr, &parse_attr->mod_hdr_acts, e->out_dev,
1249777bb800SVlad Buslov 				  e->route_dev_ifindex, out_index);
1250777bb800SVlad Buslov 	if (err || !(esw_attr->dests[out_index].flags &
1251777bb800SVlad Buslov 		     MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE))
1252777bb800SVlad Buslov 		return err;
1253777bb800SVlad Buslov 
12548914add2SVlad Buslov 	r = mlx5e_route_get_create(priv, &key, parse_attr->mirred_ifindex[out_index],
12558914add2SVlad Buslov 				   &tbl_time_after);
1256777bb800SVlad Buslov 	if (IS_ERR(r))
1257777bb800SVlad Buslov 		return PTR_ERR(r);
12588914add2SVlad Buslov 	/* Routing changed concurrently. FIB event handler might have missed new
12598914add2SVlad Buslov 	 * entry, schedule update.
12608914add2SVlad Buslov 	 */
12618914add2SVlad Buslov 	if (tbl_time_before != tbl_time_after) {
12628914add2SVlad Buslov 		err = mlx5e_route_enqueue_update(priv, r, FIB_EVENT_ENTRY_REPLACE);
12638914add2SVlad Buslov 		if (err) {
12648914add2SVlad Buslov 			mlx5e_route_put_locked(priv, r);
12658914add2SVlad Buslov 			return err;
12668914add2SVlad Buslov 		}
12678914add2SVlad Buslov 	}
1268777bb800SVlad Buslov 
1269777bb800SVlad Buslov 	flow->encap_routes[out_index].r = r;
1270777bb800SVlad Buslov 	if (new_encap_entry)
1271777bb800SVlad Buslov 		list_add(&e->route_list, &r->encap_entries);
1272777bb800SVlad Buslov 	flow->encap_routes[out_index].index = out_index;
1273777bb800SVlad Buslov 	return 0;
1274777bb800SVlad Buslov }
1275777bb800SVlad Buslov 
1276777bb800SVlad Buslov void mlx5e_detach_decap_route(struct mlx5e_priv *priv,
1277777bb800SVlad Buslov 			      struct mlx5e_tc_flow *flow)
1278777bb800SVlad Buslov {
1279777bb800SVlad Buslov 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1280777bb800SVlad Buslov 	struct mlx5e_route_entry *r = flow->decap_route;
1281777bb800SVlad Buslov 
1282777bb800SVlad Buslov 	if (!r)
1283777bb800SVlad Buslov 		return;
1284777bb800SVlad Buslov 
1285777bb800SVlad Buslov 	mutex_lock(&esw->offloads.encap_tbl_lock);
1286777bb800SVlad Buslov 	list_del(&flow->decap_routes);
1287777bb800SVlad Buslov 	flow->decap_route = NULL;
1288777bb800SVlad Buslov 
1289777bb800SVlad Buslov 	if (!refcount_dec_and_test(&r->refcnt)) {
1290777bb800SVlad Buslov 		mutex_unlock(&esw->offloads.encap_tbl_lock);
1291777bb800SVlad Buslov 		return;
1292777bb800SVlad Buslov 	}
1293777bb800SVlad Buslov 	hash_del_rcu(&r->hlist);
1294777bb800SVlad Buslov 	mutex_unlock(&esw->offloads.encap_tbl_lock);
1295777bb800SVlad Buslov 
1296777bb800SVlad Buslov 	mlx5e_route_dealloc(priv, r);
1297777bb800SVlad Buslov }
1298777bb800SVlad Buslov 
1299777bb800SVlad Buslov static void mlx5e_detach_encap_route(struct mlx5e_priv *priv,
1300777bb800SVlad Buslov 				     struct mlx5e_tc_flow *flow,
1301777bb800SVlad Buslov 				     int out_index)
1302777bb800SVlad Buslov {
1303777bb800SVlad Buslov 	struct mlx5e_route_entry *r = flow->encap_routes[out_index].r;
1304777bb800SVlad Buslov 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1305777bb800SVlad Buslov 	struct mlx5e_encap_entry *e, *tmp;
1306777bb800SVlad Buslov 
1307777bb800SVlad Buslov 	if (!r)
1308777bb800SVlad Buslov 		return;
1309777bb800SVlad Buslov 
1310777bb800SVlad Buslov 	mutex_lock(&esw->offloads.encap_tbl_lock);
1311777bb800SVlad Buslov 	flow->encap_routes[out_index].r = NULL;
1312777bb800SVlad Buslov 
1313777bb800SVlad Buslov 	if (!refcount_dec_and_test(&r->refcnt)) {
1314777bb800SVlad Buslov 		mutex_unlock(&esw->offloads.encap_tbl_lock);
1315777bb800SVlad Buslov 		return;
1316777bb800SVlad Buslov 	}
1317777bb800SVlad Buslov 	list_for_each_entry_safe(e, tmp, &r->encap_entries, route_list)
1318777bb800SVlad Buslov 		list_del_init(&e->route_list);
1319777bb800SVlad Buslov 	hash_del_rcu(&r->hlist);
1320777bb800SVlad Buslov 	mutex_unlock(&esw->offloads.encap_tbl_lock);
1321777bb800SVlad Buslov 
1322777bb800SVlad Buslov 	mlx5e_route_dealloc(priv, r);
1323777bb800SVlad Buslov }
1324777bb800SVlad Buslov 
13258914add2SVlad Buslov static void mlx5e_invalidate_encap(struct mlx5e_priv *priv,
13268914add2SVlad Buslov 				   struct mlx5e_encap_entry *e,
13278914add2SVlad Buslov 				   struct list_head *encap_flows)
13288914add2SVlad Buslov {
13298914add2SVlad Buslov 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
13308914add2SVlad Buslov 	struct mlx5e_tc_flow *flow;
13318914add2SVlad Buslov 
13328914add2SVlad Buslov 	list_for_each_entry(flow, encap_flows, tmp_list) {
13338914add2SVlad Buslov 		struct mlx5_flow_attr *attr = flow->attr;
13348914add2SVlad Buslov 		struct mlx5_esw_flow_attr *esw_attr;
13358914add2SVlad Buslov 
13368914add2SVlad Buslov 		if (!mlx5e_is_offloaded_flow(flow))
13378914add2SVlad Buslov 			continue;
13388914add2SVlad Buslov 		esw_attr = attr->esw_attr;
13398914add2SVlad Buslov 
13408914add2SVlad Buslov 		if (flow_flag_test(flow, SLOW))
13418914add2SVlad Buslov 			mlx5e_tc_unoffload_from_slow_path(esw, flow);
13428914add2SVlad Buslov 		else
13438914add2SVlad Buslov 			mlx5e_tc_unoffload_fdb_rules(esw, flow, flow->attr);
13448914add2SVlad Buslov 		mlx5_modify_header_dealloc(priv->mdev, attr->modify_hdr);
13458914add2SVlad Buslov 		attr->modify_hdr = NULL;
13468914add2SVlad Buslov 
13478914add2SVlad Buslov 		esw_attr->dests[flow->tmp_entry_index].flags &=
13488914add2SVlad Buslov 			~MLX5_ESW_DEST_ENCAP_VALID;
13498914add2SVlad Buslov 		esw_attr->dests[flow->tmp_entry_index].pkt_reformat = NULL;
13508914add2SVlad Buslov 	}
13518914add2SVlad Buslov 
13528914add2SVlad Buslov 	e->flags |= MLX5_ENCAP_ENTRY_NO_ROUTE;
13538914add2SVlad Buslov 	if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
13548914add2SVlad Buslov 		e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
13558914add2SVlad Buslov 		mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat);
13568914add2SVlad Buslov 		e->pkt_reformat = NULL;
13578914add2SVlad Buslov 	}
13588914add2SVlad Buslov }
13598914add2SVlad Buslov 
13608914add2SVlad Buslov static void mlx5e_reoffload_encap(struct mlx5e_priv *priv,
13618914add2SVlad Buslov 				  struct net_device *tunnel_dev,
13628914add2SVlad Buslov 				  struct mlx5e_encap_entry *e,
13638914add2SVlad Buslov 				  struct list_head *encap_flows)
13648914add2SVlad Buslov {
13658914add2SVlad Buslov 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
13668914add2SVlad Buslov 	struct mlx5e_tc_flow *flow;
13678914add2SVlad Buslov 	int err;
13688914add2SVlad Buslov 
13698914add2SVlad Buslov 	err = ip_tunnel_info_af(e->tun_info) == AF_INET ?
13708914add2SVlad Buslov 		mlx5e_tc_tun_update_header_ipv4(priv, tunnel_dev, e) :
13718914add2SVlad Buslov 		mlx5e_tc_tun_update_header_ipv6(priv, tunnel_dev, e);
13728914add2SVlad Buslov 	if (err)
13738914add2SVlad Buslov 		mlx5_core_warn(priv->mdev, "Failed to update encap header, %d", err);
13748914add2SVlad Buslov 	e->flags &= ~MLX5_ENCAP_ENTRY_NO_ROUTE;
13758914add2SVlad Buslov 
13768914add2SVlad Buslov 	list_for_each_entry(flow, encap_flows, tmp_list) {
13778914add2SVlad Buslov 		struct mlx5e_tc_flow_parse_attr *parse_attr;
13788914add2SVlad Buslov 		struct mlx5_esw_flow_attr *esw_attr;
13798914add2SVlad Buslov 		struct mlx5_flow_handle *rule;
1380*8300f225SRoi Dayan 		struct mlx5_flow_attr *attr;
13818914add2SVlad Buslov 		struct mlx5_flow_spec *spec;
13828914add2SVlad Buslov 
13838914add2SVlad Buslov 		if (flow_flag_test(flow, FAILED))
13848914add2SVlad Buslov 			continue;
13858914add2SVlad Buslov 
1386*8300f225SRoi Dayan 		spec = &flow->attr->parse_attr->spec;
1387*8300f225SRoi Dayan 
1388*8300f225SRoi Dayan 		attr = mlx5e_tc_get_encap_attr(flow);
13898914add2SVlad Buslov 		esw_attr = attr->esw_attr;
13908914add2SVlad Buslov 		parse_attr = attr->parse_attr;
13918914add2SVlad Buslov 
13928914add2SVlad Buslov 		err = mlx5e_update_vf_tunnel(esw, esw_attr, &parse_attr->mod_hdr_acts,
13938914add2SVlad Buslov 					     e->out_dev, e->route_dev_ifindex,
13948914add2SVlad Buslov 					     flow->tmp_entry_index);
13958914add2SVlad Buslov 		if (err) {
13968914add2SVlad Buslov 			mlx5_core_warn(priv->mdev, "Failed to update VF tunnel err=%d", err);
13978914add2SVlad Buslov 			continue;
13988914add2SVlad Buslov 		}
13998914add2SVlad Buslov 
1400ff993167SRoi Dayan 		err = mlx5e_tc_add_flow_mod_hdr(priv, flow, attr);
14018914add2SVlad Buslov 		if (err) {
14028914add2SVlad Buslov 			mlx5_core_warn(priv->mdev, "Failed to update flow mod_hdr err=%d",
14038914add2SVlad Buslov 				       err);
14048914add2SVlad Buslov 			continue;
14058914add2SVlad Buslov 		}
14068914add2SVlad Buslov 
14078914add2SVlad Buslov 		if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
14088914add2SVlad Buslov 			esw_attr->dests[flow->tmp_entry_index].pkt_reformat = e->pkt_reformat;
14098914add2SVlad Buslov 			esw_attr->dests[flow->tmp_entry_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
14108914add2SVlad Buslov 			if (!mlx5e_tc_flow_all_encaps_valid(esw_attr))
14118914add2SVlad Buslov 				goto offload_to_slow_path;
1412*8300f225SRoi Dayan 
1413*8300f225SRoi Dayan 			err = mlx5e_tc_offload_flow_post_acts(flow);
1414*8300f225SRoi Dayan 			if (err) {
1415*8300f225SRoi Dayan 				mlx5_core_warn(priv->mdev, "Failed to update flow post acts, %d\n",
1416*8300f225SRoi Dayan 					       err);
1417*8300f225SRoi Dayan 				goto offload_to_slow_path;
1418*8300f225SRoi Dayan 			}
1419*8300f225SRoi Dayan 
14208914add2SVlad Buslov 			/* update from slow path rule to encap rule */
1421*8300f225SRoi Dayan 			rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, flow->attr);
14228914add2SVlad Buslov 			if (IS_ERR(rule)) {
1423*8300f225SRoi Dayan 				mlx5e_tc_unoffload_flow_post_acts(flow);
14248914add2SVlad Buslov 				err = PTR_ERR(rule);
14258914add2SVlad Buslov 				mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
14268914add2SVlad Buslov 					       err);
14278914add2SVlad Buslov 			} else {
14288914add2SVlad Buslov 				flow->rule[0] = rule;
14298914add2SVlad Buslov 			}
14308914add2SVlad Buslov 		} else {
14318914add2SVlad Buslov offload_to_slow_path:
14328914add2SVlad Buslov 			rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec);
14338914add2SVlad Buslov 			/* mark the flow's encap dest as non-valid */
14348914add2SVlad Buslov 			esw_attr->dests[flow->tmp_entry_index].flags &=
14358914add2SVlad Buslov 				~MLX5_ESW_DEST_ENCAP_VALID;
14368914add2SVlad Buslov 
14378914add2SVlad Buslov 			if (IS_ERR(rule)) {
14388914add2SVlad Buslov 				err = PTR_ERR(rule);
14398914add2SVlad Buslov 				mlx5_core_warn(priv->mdev, "Failed to update slow path (encap) flow, %d\n",
14408914add2SVlad Buslov 					       err);
14418914add2SVlad Buslov 			} else {
14428914add2SVlad Buslov 				flow->rule[0] = rule;
14438914add2SVlad Buslov 			}
14448914add2SVlad Buslov 		}
14458914add2SVlad Buslov 		flow_flag_set(flow, OFFLOADED);
14468914add2SVlad Buslov 	}
14478914add2SVlad Buslov }
14488914add2SVlad Buslov 
14498914add2SVlad Buslov static int mlx5e_update_route_encaps(struct mlx5e_priv *priv,
14508914add2SVlad Buslov 				     struct mlx5e_route_entry *r,
14518914add2SVlad Buslov 				     struct list_head *flow_list,
14528914add2SVlad Buslov 				     bool replace)
14538914add2SVlad Buslov {
14548914add2SVlad Buslov 	struct net_device *tunnel_dev;
14558914add2SVlad Buslov 	struct mlx5e_encap_entry *e;
14568914add2SVlad Buslov 
14578914add2SVlad Buslov 	tunnel_dev = __dev_get_by_index(dev_net(priv->netdev), r->tunnel_dev_index);
14588914add2SVlad Buslov 	if (!tunnel_dev)
14598914add2SVlad Buslov 		return -ENODEV;
14608914add2SVlad Buslov 
14618914add2SVlad Buslov 	list_for_each_entry(e, &r->encap_entries, route_list) {
14628914add2SVlad Buslov 		LIST_HEAD(encap_flows);
14638914add2SVlad Buslov 
14648914add2SVlad Buslov 		mlx5e_take_all_encap_flows(e, &encap_flows);
14658914add2SVlad Buslov 		if (list_empty(&encap_flows))
14668914add2SVlad Buslov 			continue;
14678914add2SVlad Buslov 
14688914add2SVlad Buslov 		if (mlx5e_route_entry_valid(r))
14698914add2SVlad Buslov 			mlx5e_invalidate_encap(priv, e, &encap_flows);
14708914add2SVlad Buslov 
14718914add2SVlad Buslov 		if (!replace) {
14728914add2SVlad Buslov 			list_splice(&encap_flows, flow_list);
14738914add2SVlad Buslov 			continue;
14748914add2SVlad Buslov 		}
14758914add2SVlad Buslov 
14768914add2SVlad Buslov 		mlx5e_reoffload_encap(priv, tunnel_dev, e, &encap_flows);
14778914add2SVlad Buslov 		list_splice(&encap_flows, flow_list);
14788914add2SVlad Buslov 	}
14798914add2SVlad Buslov 
14808914add2SVlad Buslov 	return 0;
14818914add2SVlad Buslov }
14828914add2SVlad Buslov 
14838914add2SVlad Buslov static void mlx5e_unoffload_flow_list(struct mlx5e_priv *priv,
14848914add2SVlad Buslov 				      struct list_head *flow_list)
14858914add2SVlad Buslov {
14868914add2SVlad Buslov 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
14878914add2SVlad Buslov 	struct mlx5e_tc_flow *flow;
14888914add2SVlad Buslov 
14898914add2SVlad Buslov 	list_for_each_entry(flow, flow_list, tmp_list)
14908914add2SVlad Buslov 		if (mlx5e_is_offloaded_flow(flow))
14918914add2SVlad Buslov 			mlx5e_tc_unoffload_fdb_rules(esw, flow, flow->attr);
14928914add2SVlad Buslov }
14938914add2SVlad Buslov 
14948914add2SVlad Buslov static void mlx5e_reoffload_decap(struct mlx5e_priv *priv,
14958914add2SVlad Buslov 				  struct list_head *decap_flows)
14968914add2SVlad Buslov {
14978914add2SVlad Buslov 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
14988914add2SVlad Buslov 	struct mlx5e_tc_flow *flow;
14998914add2SVlad Buslov 
15008914add2SVlad Buslov 	list_for_each_entry(flow, decap_flows, tmp_list) {
15018914add2SVlad Buslov 		struct mlx5e_tc_flow_parse_attr *parse_attr;
15028914add2SVlad Buslov 		struct mlx5_flow_attr *attr = flow->attr;
15038914add2SVlad Buslov 		struct mlx5_flow_handle *rule;
15048914add2SVlad Buslov 		struct mlx5_flow_spec *spec;
15058914add2SVlad Buslov 		int err;
15068914add2SVlad Buslov 
15078914add2SVlad Buslov 		if (flow_flag_test(flow, FAILED))
15088914add2SVlad Buslov 			continue;
15098914add2SVlad Buslov 
15108914add2SVlad Buslov 		parse_attr = attr->parse_attr;
15118914add2SVlad Buslov 		spec = &parse_attr->spec;
1512819c319cSChris Mi 		err = mlx5e_tc_tun_route_lookup(priv, spec, attr, parse_attr->filter_dev);
15138914add2SVlad Buslov 		if (err) {
15148914add2SVlad Buslov 			mlx5_core_warn(priv->mdev, "Failed to lookup route for flow, %d\n",
15158914add2SVlad Buslov 				       err);
15168914add2SVlad Buslov 			continue;
15178914add2SVlad Buslov 		}
15188914add2SVlad Buslov 
15198914add2SVlad Buslov 		rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, attr);
15208914add2SVlad Buslov 		if (IS_ERR(rule)) {
15218914add2SVlad Buslov 			err = PTR_ERR(rule);
15228914add2SVlad Buslov 			mlx5_core_warn(priv->mdev, "Failed to update cached decap flow, %d\n",
15238914add2SVlad Buslov 				       err);
15248914add2SVlad Buslov 		} else {
15258914add2SVlad Buslov 			flow->rule[0] = rule;
15268914add2SVlad Buslov 			flow_flag_set(flow, OFFLOADED);
15278914add2SVlad Buslov 		}
15288914add2SVlad Buslov 	}
15298914add2SVlad Buslov }
15308914add2SVlad Buslov 
15318914add2SVlad Buslov static int mlx5e_update_route_decap_flows(struct mlx5e_priv *priv,
15328914add2SVlad Buslov 					  struct mlx5e_route_entry *r,
15338914add2SVlad Buslov 					  struct list_head *flow_list,
15348914add2SVlad Buslov 					  bool replace)
15358914add2SVlad Buslov {
15368914add2SVlad Buslov 	struct net_device *tunnel_dev;
15378914add2SVlad Buslov 	LIST_HEAD(decap_flows);
15388914add2SVlad Buslov 
15398914add2SVlad Buslov 	tunnel_dev = __dev_get_by_index(dev_net(priv->netdev), r->tunnel_dev_index);
15408914add2SVlad Buslov 	if (!tunnel_dev)
15418914add2SVlad Buslov 		return -ENODEV;
15428914add2SVlad Buslov 
15438914add2SVlad Buslov 	mlx5e_take_all_route_decap_flows(r, &decap_flows);
15448914add2SVlad Buslov 	if (mlx5e_route_entry_valid(r))
15458914add2SVlad Buslov 		mlx5e_unoffload_flow_list(priv, &decap_flows);
15468914add2SVlad Buslov 	if (replace)
15478914add2SVlad Buslov 		mlx5e_reoffload_decap(priv, &decap_flows);
15488914add2SVlad Buslov 
15498914add2SVlad Buslov 	list_splice(&decap_flows, flow_list);
15508914add2SVlad Buslov 
15518914add2SVlad Buslov 	return 0;
15528914add2SVlad Buslov }
15538914add2SVlad Buslov 
15548914add2SVlad Buslov static void mlx5e_tc_fib_event_work(struct work_struct *work)
15558914add2SVlad Buslov {
15568914add2SVlad Buslov 	struct mlx5e_tc_fib_event_data *event_data =
15578914add2SVlad Buslov 		container_of(work, struct mlx5e_tc_fib_event_data, work);
15588914add2SVlad Buslov 	struct net_device *ul_dev = event_data->ul_dev;
15598914add2SVlad Buslov 	struct mlx5e_priv *priv = netdev_priv(ul_dev);
15608914add2SVlad Buslov 	struct mlx5e_route_entry *r = event_data->r;
15618914add2SVlad Buslov 	struct mlx5_eswitch *esw;
15628914add2SVlad Buslov 	LIST_HEAD(flow_list);
15638914add2SVlad Buslov 	bool replace;
15648914add2SVlad Buslov 	int err;
15658914add2SVlad Buslov 
15668914add2SVlad Buslov 	/* sync with concurrent neigh updates */
15678914add2SVlad Buslov 	rtnl_lock();
15688914add2SVlad Buslov 	esw = priv->mdev->priv.eswitch;
15698914add2SVlad Buslov 	mutex_lock(&esw->offloads.encap_tbl_lock);
15708914add2SVlad Buslov 	replace = event_data->event == FIB_EVENT_ENTRY_REPLACE;
15718914add2SVlad Buslov 
15728914add2SVlad Buslov 	if (!mlx5e_route_entry_valid(r) && !replace)
15738914add2SVlad Buslov 		goto out;
15748914add2SVlad Buslov 
15758914add2SVlad Buslov 	err = mlx5e_update_route_encaps(priv, r, &flow_list, replace);
15768914add2SVlad Buslov 	if (err)
15778914add2SVlad Buslov 		mlx5_core_warn(priv->mdev, "Failed to update route encaps, %d\n",
15788914add2SVlad Buslov 			       err);
15798914add2SVlad Buslov 
15808914add2SVlad Buslov 	err = mlx5e_update_route_decap_flows(priv, r, &flow_list, replace);
15818914add2SVlad Buslov 	if (err)
15828914add2SVlad Buslov 		mlx5_core_warn(priv->mdev, "Failed to update route decap flows, %d\n",
15838914add2SVlad Buslov 			       err);
15848914add2SVlad Buslov 
15858914add2SVlad Buslov 	if (replace)
15868914add2SVlad Buslov 		r->flags |= MLX5E_ROUTE_ENTRY_VALID;
15878914add2SVlad Buslov out:
15888914add2SVlad Buslov 	mutex_unlock(&esw->offloads.encap_tbl_lock);
15898914add2SVlad Buslov 	rtnl_unlock();
15908914add2SVlad Buslov 
15918914add2SVlad Buslov 	mlx5e_put_flow_list(priv, &flow_list);
15928914add2SVlad Buslov 	mlx5e_route_put(priv, event_data->r);
15938914add2SVlad Buslov 	dev_put(event_data->ul_dev);
15948914add2SVlad Buslov 	kfree(event_data);
15958914add2SVlad Buslov }
15968914add2SVlad Buslov 
15978914add2SVlad Buslov static struct mlx5e_tc_fib_event_data *
15988914add2SVlad Buslov mlx5e_init_fib_work_ipv4(struct mlx5e_priv *priv,
15998914add2SVlad Buslov 			 struct net_device *ul_dev,
16008914add2SVlad Buslov 			 struct mlx5e_tc_tun_encap *encap,
16018914add2SVlad Buslov 			 unsigned long event,
16028914add2SVlad Buslov 			 struct fib_notifier_info *info)
16038914add2SVlad Buslov {
16048914add2SVlad Buslov 	struct fib_entry_notifier_info *fen_info;
16058914add2SVlad Buslov 	struct mlx5e_tc_fib_event_data *fib_work;
16068914add2SVlad Buslov 	struct mlx5e_route_entry *r;
16078914add2SVlad Buslov 	struct mlx5e_route_key key;
16088914add2SVlad Buslov 	struct net_device *fib_dev;
16098914add2SVlad Buslov 
16108914add2SVlad Buslov 	fen_info = container_of(info, struct fib_entry_notifier_info, info);
1611885751ebSMaor Dickman 	if (fen_info->fi->nh)
1612885751ebSMaor Dickman 		return NULL;
16138914add2SVlad Buslov 	fib_dev = fib_info_nh(fen_info->fi, 0)->fib_nh_dev;
1614eb96cc15SRoi Dayan 	if (!fib_dev || fib_dev->netdev_ops != &mlx5e_netdev_ops ||
16158914add2SVlad Buslov 	    fen_info->dst_len != 32)
16168914add2SVlad Buslov 		return NULL;
16178914add2SVlad Buslov 
16188914add2SVlad Buslov 	fib_work = mlx5e_tc_init_fib_work(event, ul_dev, GFP_ATOMIC);
16198914add2SVlad Buslov 	if (!fib_work)
16208914add2SVlad Buslov 		return ERR_PTR(-ENOMEM);
16218914add2SVlad Buslov 
16228914add2SVlad Buslov 	key.endpoint_ip.v4 = htonl(fen_info->dst);
16238914add2SVlad Buslov 	key.ip_version = 4;
16248914add2SVlad Buslov 
16258914add2SVlad Buslov 	/* Can't fail after this point because releasing reference to r
16268914add2SVlad Buslov 	 * requires obtaining sleeping mutex which we can't do in atomic
16278914add2SVlad Buslov 	 * context.
16288914add2SVlad Buslov 	 */
16298914add2SVlad Buslov 	r = mlx5e_route_lookup_for_update(encap, &key);
16308914add2SVlad Buslov 	if (!r)
16318914add2SVlad Buslov 		goto out;
16328914add2SVlad Buslov 	fib_work->r = r;
16338914add2SVlad Buslov 	dev_hold(ul_dev);
16348914add2SVlad Buslov 
16358914add2SVlad Buslov 	return fib_work;
16368914add2SVlad Buslov 
16378914add2SVlad Buslov out:
16388914add2SVlad Buslov 	kfree(fib_work);
16398914add2SVlad Buslov 	return NULL;
16408914add2SVlad Buslov }
16418914add2SVlad Buslov 
16428914add2SVlad Buslov static struct mlx5e_tc_fib_event_data *
16438914add2SVlad Buslov mlx5e_init_fib_work_ipv6(struct mlx5e_priv *priv,
16448914add2SVlad Buslov 			 struct net_device *ul_dev,
16458914add2SVlad Buslov 			 struct mlx5e_tc_tun_encap *encap,
16468914add2SVlad Buslov 			 unsigned long event,
16478914add2SVlad Buslov 			 struct fib_notifier_info *info)
16488914add2SVlad Buslov {
16498914add2SVlad Buslov 	struct fib6_entry_notifier_info *fen_info;
16508914add2SVlad Buslov 	struct mlx5e_tc_fib_event_data *fib_work;
16518914add2SVlad Buslov 	struct mlx5e_route_entry *r;
16528914add2SVlad Buslov 	struct mlx5e_route_key key;
16538914add2SVlad Buslov 	struct net_device *fib_dev;
16548914add2SVlad Buslov 
16558914add2SVlad Buslov 	fen_info = container_of(info, struct fib6_entry_notifier_info, info);
16568914add2SVlad Buslov 	fib_dev = fib6_info_nh_dev(fen_info->rt);
16578914add2SVlad Buslov 	if (fib_dev->netdev_ops != &mlx5e_netdev_ops ||
16588914add2SVlad Buslov 	    fen_info->rt->fib6_dst.plen != 128)
16598914add2SVlad Buslov 		return NULL;
16608914add2SVlad Buslov 
16618914add2SVlad Buslov 	fib_work = mlx5e_tc_init_fib_work(event, ul_dev, GFP_ATOMIC);
16628914add2SVlad Buslov 	if (!fib_work)
16638914add2SVlad Buslov 		return ERR_PTR(-ENOMEM);
16648914add2SVlad Buslov 
16658914add2SVlad Buslov 	memcpy(&key.endpoint_ip.v6, &fen_info->rt->fib6_dst.addr,
16668914add2SVlad Buslov 	       sizeof(fen_info->rt->fib6_dst.addr));
16678914add2SVlad Buslov 	key.ip_version = 6;
16688914add2SVlad Buslov 
16698914add2SVlad Buslov 	/* Can't fail after this point because releasing reference to r
16708914add2SVlad Buslov 	 * requires obtaining sleeping mutex which we can't do in atomic
16718914add2SVlad Buslov 	 * context.
16728914add2SVlad Buslov 	 */
16738914add2SVlad Buslov 	r = mlx5e_route_lookup_for_update(encap, &key);
16748914add2SVlad Buslov 	if (!r)
16758914add2SVlad Buslov 		goto out;
16768914add2SVlad Buslov 	fib_work->r = r;
16778914add2SVlad Buslov 	dev_hold(ul_dev);
16788914add2SVlad Buslov 
16798914add2SVlad Buslov 	return fib_work;
16808914add2SVlad Buslov 
16818914add2SVlad Buslov out:
16828914add2SVlad Buslov 	kfree(fib_work);
16838914add2SVlad Buslov 	return NULL;
16848914add2SVlad Buslov }
16858914add2SVlad Buslov 
16868914add2SVlad Buslov static int mlx5e_tc_tun_fib_event(struct notifier_block *nb, unsigned long event, void *ptr)
16878914add2SVlad Buslov {
16888914add2SVlad Buslov 	struct mlx5e_tc_fib_event_data *fib_work;
16898914add2SVlad Buslov 	struct fib_notifier_info *info = ptr;
16908914add2SVlad Buslov 	struct mlx5e_tc_tun_encap *encap;
16918914add2SVlad Buslov 	struct net_device *ul_dev;
16928914add2SVlad Buslov 	struct mlx5e_priv *priv;
16938914add2SVlad Buslov 
16948914add2SVlad Buslov 	encap = container_of(nb, struct mlx5e_tc_tun_encap, fib_nb);
16958914add2SVlad Buslov 	priv = encap->priv;
16968914add2SVlad Buslov 	ul_dev = priv->netdev;
16978914add2SVlad Buslov 	priv = netdev_priv(ul_dev);
16988914add2SVlad Buslov 
16998914add2SVlad Buslov 	switch (event) {
17008914add2SVlad Buslov 	case FIB_EVENT_ENTRY_REPLACE:
17018914add2SVlad Buslov 	case FIB_EVENT_ENTRY_DEL:
17028914add2SVlad Buslov 		if (info->family == AF_INET)
17038914add2SVlad Buslov 			fib_work = mlx5e_init_fib_work_ipv4(priv, ul_dev, encap, event, info);
17048914add2SVlad Buslov 		else if (info->family == AF_INET6)
17058914add2SVlad Buslov 			fib_work = mlx5e_init_fib_work_ipv6(priv, ul_dev, encap, event, info);
17068914add2SVlad Buslov 		else
17078914add2SVlad Buslov 			return NOTIFY_DONE;
17088914add2SVlad Buslov 
17098914add2SVlad Buslov 		if (!IS_ERR_OR_NULL(fib_work)) {
17108914add2SVlad Buslov 			queue_work(priv->wq, &fib_work->work);
17118914add2SVlad Buslov 		} else if (IS_ERR(fib_work)) {
17128914add2SVlad Buslov 			NL_SET_ERR_MSG_MOD(info->extack, "Failed to init fib work");
17138914add2SVlad Buslov 			mlx5_core_warn(priv->mdev, "Failed to init fib work, %ld\n",
17148914add2SVlad Buslov 				       PTR_ERR(fib_work));
17158914add2SVlad Buslov 		}
17168914add2SVlad Buslov 
17178914add2SVlad Buslov 		break;
17188914add2SVlad Buslov 	default:
17198914add2SVlad Buslov 		return NOTIFY_DONE;
17208914add2SVlad Buslov 	}
17218914add2SVlad Buslov 
17228914add2SVlad Buslov 	return NOTIFY_DONE;
17238914add2SVlad Buslov }
17248914add2SVlad Buslov 
17258914add2SVlad Buslov struct mlx5e_tc_tun_encap *mlx5e_tc_tun_init(struct mlx5e_priv *priv)
17268914add2SVlad Buslov {
17278914add2SVlad Buslov 	struct mlx5e_tc_tun_encap *encap;
17288914add2SVlad Buslov 	int err;
17298914add2SVlad Buslov 
17308914add2SVlad Buslov 	encap = kvzalloc(sizeof(*encap), GFP_KERNEL);
17318914add2SVlad Buslov 	if (!encap)
17328914add2SVlad Buslov 		return ERR_PTR(-ENOMEM);
17338914add2SVlad Buslov 
17348914add2SVlad Buslov 	encap->priv = priv;
17358914add2SVlad Buslov 	encap->fib_nb.notifier_call = mlx5e_tc_tun_fib_event;
17368914add2SVlad Buslov 	spin_lock_init(&encap->route_lock);
17378914add2SVlad Buslov 	hash_init(encap->route_tbl);
17388914add2SVlad Buslov 	err = register_fib_notifier(dev_net(priv->netdev), &encap->fib_nb,
17398914add2SVlad Buslov 				    NULL, NULL);
17408914add2SVlad Buslov 	if (err) {
17418914add2SVlad Buslov 		kvfree(encap);
17428914add2SVlad Buslov 		return ERR_PTR(err);
17438914add2SVlad Buslov 	}
17448914add2SVlad Buslov 
17458914add2SVlad Buslov 	return encap;
17468914add2SVlad Buslov }
17478914add2SVlad Buslov 
17488914add2SVlad Buslov void mlx5e_tc_tun_cleanup(struct mlx5e_tc_tun_encap *encap)
17498914add2SVlad Buslov {
17508914add2SVlad Buslov 	if (!encap)
17518914add2SVlad Buslov 		return;
17528914add2SVlad Buslov 
17538914add2SVlad Buslov 	unregister_fib_notifier(dev_net(encap->priv->netdev), &encap->fib_nb);
17548914add2SVlad Buslov 	flush_workqueue(encap->priv->wq); /* flush fib event works */
17558914add2SVlad Buslov 	kvfree(encap);
17568914add2SVlad Buslov }
1757