1 /*
2  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #ifndef __MLX5_EN_TC_H__
34 #define __MLX5_EN_TC_H__
35 
36 #include <net/pkt_cls.h>
37 #include "en.h"
38 #include "eswitch.h"
39 #include "en/tc_ct.h"
40 #include "en/tc_tun.h"
41 #include "en/tc/int_port.h"
42 #include "en/tc/meter.h"
43 #include "en_rep.h"
44 
45 #define MLX5E_TC_FLOW_ID_MASK 0x0000ffff
46 
47 #ifdef CONFIG_MLX5_ESWITCH
48 
49 #define NIC_FLOW_ATTR_SZ (sizeof(struct mlx5_flow_attr) +\
50 			  sizeof(struct mlx5_nic_flow_attr))
51 #define ESW_FLOW_ATTR_SZ (sizeof(struct mlx5_flow_attr) +\
52 			  sizeof(struct mlx5_esw_flow_attr))
53 #define ns_to_attr_sz(ns) (((ns) == MLX5_FLOW_NAMESPACE_FDB) ?\
54 			    ESW_FLOW_ATTR_SZ :\
55 			    NIC_FLOW_ATTR_SZ)
56 
57 struct mlx5_fs_chains *mlx5e_nic_chains(struct mlx5e_tc_table *tc);
58 int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags);
59 
60 struct mlx5e_tc_update_priv {
61 	struct net_device *fwd_dev;
62 };
63 
64 struct mlx5_nic_flow_attr {
65 	u32 flow_tag;
66 	u32 hairpin_tirn;
67 	struct mlx5_flow_table *hairpin_ft;
68 };
69 
70 struct mlx5_flow_attr {
71 	u32 action;
72 	struct mlx5_fc *counter;
73 	struct mlx5_modify_hdr *modify_hdr;
74 	struct mlx5_ct_attr ct_attr;
75 	struct mlx5e_sample_attr sample_attr;
76 	struct mlx5e_meter_attr meter_attr;
77 	struct mlx5e_tc_flow_parse_attr *parse_attr;
78 	u32 chain;
79 	u16 prio;
80 	u32 dest_chain;
81 	struct mlx5_flow_table *ft;
82 	struct mlx5_flow_table *dest_ft;
83 	u8 inner_match_level;
84 	u8 outer_match_level;
85 	u8 ip_version;
86 	u8 tun_ip_version;
87 	int tunnel_id; /* mapped tunnel id */
88 	u32 flags;
89 	u32 exe_aso_type;
90 	struct list_head list;
91 	struct mlx5e_post_act_handle *post_act_handle;
92 	struct {
93 		/* Indicate whether the parsed flow should be counted for lag mode decision
94 		 * making
95 		 */
96 		bool count;
97 	} lag;
98 	struct mlx5_flow_attr *branch_true;
99 	struct mlx5_flow_attr *branch_false;
100 	struct mlx5_flow_attr *jumping_attr;
101 	/* keep this union last */
102 	union {
103 		DECLARE_FLEX_ARRAY(struct mlx5_esw_flow_attr, esw_attr);
104 		DECLARE_FLEX_ARRAY(struct mlx5_nic_flow_attr, nic_attr);
105 	};
106 };
107 
108 enum {
109 	MLX5_ATTR_FLAG_VLAN_HANDLED  = BIT(0),
110 	MLX5_ATTR_FLAG_SLOW_PATH     = BIT(1),
111 	MLX5_ATTR_FLAG_NO_IN_PORT    = BIT(2),
112 	MLX5_ATTR_FLAG_SRC_REWRITE   = BIT(3),
113 	MLX5_ATTR_FLAG_SAMPLE        = BIT(4),
114 	MLX5_ATTR_FLAG_ACCEPT        = BIT(5),
115 	MLX5_ATTR_FLAG_CT            = BIT(6),
116 	MLX5_ATTR_FLAG_TERMINATING   = BIT(7),
117 	MLX5_ATTR_FLAG_MTU           = BIT(8),
118 };
119 
120 /* Returns true if any of the flags that require skipping further TC/NF processing are set. */
121 static inline bool
122 mlx5e_tc_attr_flags_skip(u32 attr_flags)
123 {
124 	return attr_flags & (MLX5_ATTR_FLAG_SLOW_PATH | MLX5_ATTR_FLAG_ACCEPT);
125 }
126 
127 struct mlx5_rx_tun_attr {
128 	u16 decap_vport;
129 	union {
130 		__be32 v4;
131 		struct in6_addr v6;
132 	} src_ip; /* Valid if decap_vport is not zero */
133 	union {
134 		__be32 v4;
135 		struct in6_addr v6;
136 	} dst_ip; /* Valid if decap_vport is not zero */
137 	u32 vni;
138 };
139 
140 #define MLX5E_TC_TABLE_CHAIN_TAG_BITS 16
141 #define MLX5E_TC_TABLE_CHAIN_TAG_MASK GENMASK(MLX5E_TC_TABLE_CHAIN_TAG_BITS - 1, 0)
142 
143 #define MLX5E_TC_MAX_INT_PORT_NUM (8)
144 
145 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
146 
147 struct tunnel_match_key {
148 	struct flow_dissector_key_control enc_control;
149 	struct flow_dissector_key_keyid enc_key_id;
150 	struct flow_dissector_key_ports enc_tp;
151 	struct flow_dissector_key_ip enc_ip;
152 	union {
153 		struct flow_dissector_key_ipv4_addrs enc_ipv4;
154 		struct flow_dissector_key_ipv6_addrs enc_ipv6;
155 	};
156 
157 	int filter_ifindex;
158 };
159 
160 struct tunnel_match_enc_opts {
161 	struct flow_dissector_key_enc_opts key;
162 	struct flow_dissector_key_enc_opts mask;
163 };
164 
165 /* Tunnel_id mapping is TUNNEL_INFO_BITS + ENC_OPTS_BITS.
166  * Upper TUNNEL_INFO_BITS for general tunnel info.
167  * Lower ENC_OPTS_BITS bits for enc_opts.
168  */
169 #define TUNNEL_INFO_BITS 12
170 #define TUNNEL_INFO_BITS_MASK GENMASK(TUNNEL_INFO_BITS - 1, 0)
171 #define ENC_OPTS_BITS 11
172 #define ENC_OPTS_BITS_MASK GENMASK(ENC_OPTS_BITS - 1, 0)
173 #define TUNNEL_ID_BITS (TUNNEL_INFO_BITS + ENC_OPTS_BITS)
174 #define TUNNEL_ID_MASK GENMASK(TUNNEL_ID_BITS - 1, 0)
175 
176 enum {
177 	MLX5E_TC_FLAG_INGRESS_BIT,
178 	MLX5E_TC_FLAG_EGRESS_BIT,
179 	MLX5E_TC_FLAG_NIC_OFFLOAD_BIT,
180 	MLX5E_TC_FLAG_ESW_OFFLOAD_BIT,
181 	MLX5E_TC_FLAG_FT_OFFLOAD_BIT,
182 	MLX5E_TC_FLAG_LAST_EXPORTED_BIT = MLX5E_TC_FLAG_FT_OFFLOAD_BIT,
183 };
184 
185 #define MLX5_TC_FLAG(flag) BIT(MLX5E_TC_FLAG_##flag##_BIT)
186 
187 int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv);
188 void mlx5e_tc_esw_cleanup(struct mlx5_rep_uplink_priv *uplink_priv);
189 
190 int mlx5e_tc_ht_init(struct rhashtable *tc_ht);
191 void mlx5e_tc_ht_cleanup(struct rhashtable *tc_ht);
192 
193 int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
194 			   struct flow_cls_offload *f, unsigned long flags);
195 int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
196 			struct flow_cls_offload *f, unsigned long flags);
197 
198 int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
199 		       struct flow_cls_offload *f, unsigned long flags);
200 
201 int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv,
202 				struct tc_cls_matchall_offload *f);
203 int mlx5e_tc_delete_matchall(struct mlx5e_priv *priv,
204 			     struct tc_cls_matchall_offload *f);
205 void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv,
206 			     struct tc_cls_matchall_offload *ma);
207 
208 struct mlx5e_encap_entry;
209 void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
210 			      struct mlx5e_encap_entry *e,
211 			      struct list_head *flow_list);
212 void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
213 			      struct mlx5e_encap_entry *e,
214 			      struct list_head *flow_list);
215 bool mlx5e_encap_take(struct mlx5e_encap_entry *e);
216 void mlx5e_encap_put(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e);
217 
218 void mlx5e_take_all_encap_flows(struct mlx5e_encap_entry *e, struct list_head *flow_list);
219 void mlx5e_put_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list);
220 
221 struct mlx5e_neigh_hash_entry;
222 struct mlx5e_encap_entry *
223 mlx5e_get_next_init_encap(struct mlx5e_neigh_hash_entry *nhe,
224 			  struct mlx5e_encap_entry *e);
225 void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe);
226 
227 void mlx5e_tc_reoffload_flows_work(struct work_struct *work);
228 
229 enum mlx5e_tc_attr_to_reg {
230 	CHAIN_TO_REG,
231 	VPORT_TO_REG,
232 	TUNNEL_TO_REG,
233 	CTSTATE_TO_REG,
234 	ZONE_TO_REG,
235 	ZONE_RESTORE_TO_REG,
236 	MARK_TO_REG,
237 	LABELS_TO_REG,
238 	FTEID_TO_REG,
239 	NIC_CHAIN_TO_REG,
240 	NIC_ZONE_RESTORE_TO_REG,
241 	PACKET_COLOR_TO_REG,
242 };
243 
244 struct mlx5e_tc_attr_to_reg_mapping {
245 	int mfield; /* rewrite field */
246 	int moffset; /* bit offset of mfield */
247 	int mlen; /* bits to rewrite/match */
248 
249 	int soffset; /* byte offset of spec for match */
250 };
251 
252 extern struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[];
253 
254 #define MLX5_REG_MAPPING_MOFFSET(reg_id) (mlx5e_tc_attr_to_reg_mappings[reg_id].moffset)
255 #define MLX5_REG_MAPPING_MBITS(reg_id) (mlx5e_tc_attr_to_reg_mappings[reg_id].mlen)
256 #define MLX5_REG_MAPPING_MASK(reg_id) (GENMASK(mlx5e_tc_attr_to_reg_mappings[reg_id].mlen - 1, 0))
257 
258 bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
259 				    struct net_device *out_dev);
260 
261 int mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev,
262 			      struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
263 			      enum mlx5_flow_namespace_type ns,
264 			      enum mlx5e_tc_attr_to_reg type,
265 			      u32 data);
266 
267 void mlx5e_tc_match_to_reg_mod_hdr_change(struct mlx5_core_dev *mdev,
268 					  struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
269 					  enum mlx5e_tc_attr_to_reg type,
270 					  int act_id, u32 data);
271 
272 void mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec,
273 				 enum mlx5e_tc_attr_to_reg type,
274 				 u32 data,
275 				 u32 mask);
276 
277 void mlx5e_tc_match_to_reg_get_match(struct mlx5_flow_spec *spec,
278 				     enum mlx5e_tc_attr_to_reg type,
279 				     u32 *data,
280 				     u32 *mask);
281 
282 int mlx5e_tc_match_to_reg_set_and_get_id(struct mlx5_core_dev *mdev,
283 					 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
284 					 enum mlx5_flow_namespace_type ns,
285 					 enum mlx5e_tc_attr_to_reg type,
286 					 u32 data);
287 
288 int mlx5e_tc_add_flow_mod_hdr(struct mlx5e_priv *priv,
289 			      struct mlx5e_tc_flow *flow,
290 			      struct mlx5_flow_attr *attr);
291 
292 void mlx5e_tc_set_ethertype(struct mlx5_core_dev *mdev,
293 			    struct flow_match_basic *match, bool outer,
294 			    void *headers_c, void *headers_v);
295 
296 int mlx5e_tc_nic_init(struct mlx5e_priv *priv);
297 void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv);
298 
299 int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
300 			    void *cb_priv);
301 
302 struct mlx5_flow_handle *
303 mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
304 			     struct mlx5_flow_spec *spec,
305 			     struct mlx5_flow_attr *attr);
306 void mlx5e_del_offloaded_nic_rule(struct mlx5e_priv *priv,
307 				  struct mlx5_flow_handle *rule,
308 				  struct mlx5_flow_attr *attr);
309 
310 struct mlx5_flow_handle *
311 mlx5_tc_rule_insert(struct mlx5e_priv *priv,
312 		    struct mlx5_flow_spec *spec,
313 		    struct mlx5_flow_attr *attr);
314 void
315 mlx5_tc_rule_delete(struct mlx5e_priv *priv,
316 		    struct mlx5_flow_handle *rule,
317 		    struct mlx5_flow_attr *attr);
318 
319 bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_dev);
320 int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *route_dev,
321 			       u16 *vport);
322 
323 int mlx5e_set_fwd_to_int_port_actions(struct mlx5e_priv *priv,
324 				      struct mlx5_flow_attr *attr,
325 				      int ifindex,
326 				      enum mlx5e_tc_int_port_type type,
327 				      u32 *action,
328 				      int out_index);
329 #else /* CONFIG_MLX5_CLS_ACT */
330 static inline int  mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; }
331 static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {}
332 static inline int mlx5e_tc_ht_init(struct rhashtable *tc_ht) { return 0; }
333 static inline void mlx5e_tc_ht_cleanup(struct rhashtable *tc_ht) {}
334 static inline int
335 mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
336 { return -EOPNOTSUPP; }
337 
338 #endif /* CONFIG_MLX5_CLS_ACT */
339 
340 struct mlx5_flow_attr *mlx5_alloc_flow_attr(enum mlx5_flow_namespace_type type);
341 
342 struct mlx5_flow_handle *
343 mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
344 			     struct mlx5_flow_spec *spec,
345 			     struct mlx5_flow_attr *attr);
346 void mlx5e_del_offloaded_nic_rule(struct mlx5e_priv *priv,
347 				  struct mlx5_flow_handle *rule,
348 				  struct mlx5_flow_attr *attr);
349 
350 #else /* CONFIG_MLX5_ESWITCH */
351 static inline int  mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; }
352 static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {}
353 static inline int  mlx5e_tc_num_filters(struct mlx5e_priv *priv,
354 					unsigned long flags)
355 {
356 	return 0;
357 }
358 
359 static inline int
360 mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
361 { return -EOPNOTSUPP; }
362 #endif
363 
364 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
365 struct mlx5e_tc_table *mlx5e_tc_table_alloc(void);
366 void mlx5e_tc_table_free(struct mlx5e_tc_table *tc);
367 static inline bool mlx5e_cqe_regb_chain(struct mlx5_cqe64 *cqe)
368 {
369 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
370 	u32 chain, reg_b;
371 
372 	reg_b = be32_to_cpu(cqe->ft_metadata);
373 
374 	if (reg_b >> (MLX5E_TC_TABLE_CHAIN_TAG_BITS + ESW_ZONE_ID_BITS))
375 		return false;
376 
377 	chain = reg_b & MLX5E_TC_TABLE_CHAIN_TAG_MASK;
378 	if (chain)
379 		return true;
380 #endif
381 
382 	return false;
383 }
384 
385 bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe, struct sk_buff *skb);
386 #else /* CONFIG_MLX5_CLS_ACT */
387 static inline struct mlx5e_tc_table *mlx5e_tc_table_alloc(void) { return NULL; }
388 static inline void mlx5e_tc_table_free(struct mlx5e_tc_table *tc) {}
389 static inline bool mlx5e_cqe_regb_chain(struct mlx5_cqe64 *cqe)
390 { return false; }
391 static inline bool
392 mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe, struct sk_buff *skb)
393 { return true; }
394 #endif
395 
396 #endif /* __MLX5_EN_TC_H__ */
397