1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /* Copyright (c) 2021 Mellanox Technologies. */
3 
4 #ifndef __MLX5_EN_TC_PRIV_H__
5 #define __MLX5_EN_TC_PRIV_H__
6 
7 #include "en_tc.h"
8 
9 #define MLX5E_TC_FLOW_BASE (MLX5E_TC_FLAG_LAST_EXPORTED_BIT + 1)
10 
11 #define MLX5E_TC_MAX_SPLITS 1
12 
13 enum {
14 	MLX5E_TC_FLOW_FLAG_INGRESS               = MLX5E_TC_FLAG_INGRESS_BIT,
15 	MLX5E_TC_FLOW_FLAG_EGRESS                = MLX5E_TC_FLAG_EGRESS_BIT,
16 	MLX5E_TC_FLOW_FLAG_ESWITCH               = MLX5E_TC_FLAG_ESW_OFFLOAD_BIT,
17 	MLX5E_TC_FLOW_FLAG_FT                    = MLX5E_TC_FLAG_FT_OFFLOAD_BIT,
18 	MLX5E_TC_FLOW_FLAG_NIC                   = MLX5E_TC_FLAG_NIC_OFFLOAD_BIT,
19 	MLX5E_TC_FLOW_FLAG_OFFLOADED             = MLX5E_TC_FLOW_BASE,
20 	MLX5E_TC_FLOW_FLAG_HAIRPIN               = MLX5E_TC_FLOW_BASE + 1,
21 	MLX5E_TC_FLOW_FLAG_HAIRPIN_RSS           = MLX5E_TC_FLOW_BASE + 2,
22 	MLX5E_TC_FLOW_FLAG_SLOW                  = MLX5E_TC_FLOW_BASE + 3,
23 	MLX5E_TC_FLOW_FLAG_DUP                   = MLX5E_TC_FLOW_BASE + 4,
24 	MLX5E_TC_FLOW_FLAG_NOT_READY             = MLX5E_TC_FLOW_BASE + 5,
25 	MLX5E_TC_FLOW_FLAG_DELETED               = MLX5E_TC_FLOW_BASE + 6,
26 	MLX5E_TC_FLOW_FLAG_CT                    = MLX5E_TC_FLOW_BASE + 7,
27 	MLX5E_TC_FLOW_FLAG_L3_TO_L2_DECAP        = MLX5E_TC_FLOW_BASE + 8,
28 	MLX5E_TC_FLOW_FLAG_TUN_RX                = MLX5E_TC_FLOW_BASE + 9,
29 	MLX5E_TC_FLOW_FLAG_FAILED                = MLX5E_TC_FLOW_BASE + 10,
30 };
31 
32 struct mlx5e_tc_flow_parse_attr {
33 	const struct ip_tunnel_info *tun_info[MLX5_MAX_FLOW_FWD_VPORTS];
34 	struct net_device *filter_dev;
35 	struct mlx5_flow_spec spec;
36 	struct mlx5e_tc_mod_hdr_acts mod_hdr_acts;
37 	int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS];
38 	struct ethhdr eth;
39 };
40 
41 /* Helper struct for accessing a struct containing list_head array.
42  * Containing struct
43  *   |- Helper array
44  *      [0] Helper item 0
45  *          |- list_head item 0
46  *          |- index (0)
47  *      [1] Helper item 1
48  *          |- list_head item 1
49  *          |- index (1)
50  * To access the containing struct from one of the list_head items:
51  * 1. Get the helper item from the list_head item using
52  *    helper item =
53  *        container_of(list_head item, helper struct type, list_head field)
54  * 2. Get the contining struct from the helper item and its index in the array:
55  *    containing struct =
56  *        container_of(helper item, containing struct type, helper field[index])
57  */
58 struct encap_flow_item {
59 	struct mlx5e_encap_entry *e; /* attached encap instance */
60 	struct list_head list;
61 	int index;
62 };
63 
64 struct encap_route_flow_item {
65 	struct mlx5e_route_entry *r; /* attached route instance */
66 	int index;
67 };
68 
69 struct mlx5e_tc_flow {
70 	struct rhash_head node;
71 	struct mlx5e_priv *priv;
72 	u64 cookie;
73 	unsigned long flags;
74 	struct mlx5_flow_handle *rule[MLX5E_TC_MAX_SPLITS + 1];
75 
76 	/* flows sharing the same reformat object - currently mpls decap */
77 	struct list_head l3_to_l2_reformat;
78 	struct mlx5e_decap_entry *decap_reformat;
79 
80 	/* flows sharing same route entry */
81 	struct list_head decap_routes;
82 	struct mlx5e_route_entry *decap_route;
83 	struct encap_route_flow_item encap_routes[MLX5_MAX_FLOW_FWD_VPORTS];
84 
85 	/* Flow can be associated with multiple encap IDs.
86 	 * The number of encaps is bounded by the number of supported
87 	 * destinations.
88 	 */
89 	struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS];
90 	struct mlx5e_tc_flow *peer_flow;
91 	struct mlx5e_mod_hdr_handle *mh; /* attached mod header instance */
92 	struct mlx5e_hairpin_entry *hpe; /* attached hairpin instance */
93 	struct list_head hairpin; /* flows sharing the same hairpin */
94 	struct list_head peer;    /* flows with peer flow */
95 	struct list_head unready; /* flows not ready to be offloaded (e.g
96 				   * due to missing route)
97 				   */
98 	struct net_device *orig_dev; /* netdev adding flow first */
99 	int tmp_entry_index;
100 	struct list_head tmp_list; /* temporary flow list used by neigh update */
101 	refcount_t refcnt;
102 	struct rcu_head rcu_head;
103 	struct completion init_done;
104 	int tunnel_id; /* the mapped tunnel id of this flow */
105 	struct mlx5_flow_attr *attr;
106 };
107 
108 u8 mlx5e_tc_get_ip_version(struct mlx5_flow_spec *spec, bool outer);
109 
110 struct mlx5_flow_handle *
111 mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
112 			   struct mlx5e_tc_flow *flow,
113 			   struct mlx5_flow_spec *spec,
114 			   struct mlx5_flow_attr *attr);
115 
116 bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow);
117 
118 static inline void __flow_flag_set(struct mlx5e_tc_flow *flow, unsigned long flag)
119 {
120 	/* Complete all memory stores before setting bit. */
121 	smp_mb__before_atomic();
122 	set_bit(flag, &flow->flags);
123 }
124 
125 #define flow_flag_set(flow, flag) __flow_flag_set(flow, MLX5E_TC_FLOW_FLAG_##flag)
126 
127 static inline bool __flow_flag_test_and_set(struct mlx5e_tc_flow *flow,
128 					    unsigned long flag)
129 {
130 	/* test_and_set_bit() provides all necessary barriers */
131 	return test_and_set_bit(flag, &flow->flags);
132 }
133 
134 #define flow_flag_test_and_set(flow, flag)			\
135 	__flow_flag_test_and_set(flow,				\
136 				 MLX5E_TC_FLOW_FLAG_##flag)
137 
138 static inline void __flow_flag_clear(struct mlx5e_tc_flow *flow, unsigned long flag)
139 {
140 	/* Complete all memory stores before clearing bit. */
141 	smp_mb__before_atomic();
142 	clear_bit(flag, &flow->flags);
143 }
144 
145 #define flow_flag_clear(flow, flag) __flow_flag_clear(flow,		\
146 						      MLX5E_TC_FLOW_FLAG_##flag)
147 
148 static inline bool __flow_flag_test(struct mlx5e_tc_flow *flow, unsigned long flag)
149 {
150 	bool ret = test_bit(flag, &flow->flags);
151 
152 	/* Read fields of flow structure only after checking flags. */
153 	smp_mb__after_atomic();
154 	return ret;
155 }
156 
157 #define flow_flag_test(flow, flag) __flow_flag_test(flow,		\
158 						    MLX5E_TC_FLOW_FLAG_##flag)
159 
160 void mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
161 				       struct mlx5e_tc_flow *flow);
162 struct mlx5_flow_handle *
163 mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
164 			      struct mlx5e_tc_flow *flow,
165 			      struct mlx5_flow_spec *spec);
166 void mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
167 				  struct mlx5e_tc_flow *flow,
168 				  struct mlx5_flow_attr *attr);
169 
170 struct mlx5e_tc_flow *mlx5e_flow_get(struct mlx5e_tc_flow *flow);
171 void mlx5e_flow_put(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow);
172 
173 struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow);
174 
175 #endif /* __MLX5_EN_TC_PRIV_H__ */
176