1 /*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #ifndef __MLX5_EN_TC_H__
34 #define __MLX5_EN_TC_H__
35
36 #include <net/pkt_cls.h>
37 #include "en.h"
38 #include "eswitch.h"
39 #include "en/tc_ct.h"
40 #include "en/tc_tun.h"
41 #include "en/tc/int_port.h"
42 #include "en/tc/meter.h"
43 #include "en_rep.h"
44
45 #define MLX5E_TC_FLOW_ID_MASK 0x0000ffff
46
47 #ifdef CONFIG_MLX5_ESWITCH
48
49 #define NIC_FLOW_ATTR_SZ (sizeof(struct mlx5_flow_attr) +\
50 sizeof(struct mlx5_nic_flow_attr))
51 #define ESW_FLOW_ATTR_SZ (sizeof(struct mlx5_flow_attr) +\
52 sizeof(struct mlx5_esw_flow_attr))
53 #define ns_to_attr_sz(ns) (((ns) == MLX5_FLOW_NAMESPACE_FDB) ?\
54 ESW_FLOW_ATTR_SZ :\
55 NIC_FLOW_ATTR_SZ)
56
57 struct mlx5_fs_chains *mlx5e_nic_chains(struct mlx5e_tc_table *tc);
58 int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags);
59
60 struct mlx5e_tc_update_priv {
61 struct net_device *fwd_dev;
62 bool skb_done;
63 bool forward_tx;
64 };
65
66 struct mlx5_nic_flow_attr {
67 u32 flow_tag;
68 u32 hairpin_tirn;
69 struct mlx5_flow_table *hairpin_ft;
70 };
71
72 struct mlx5_flow_attr {
73 u32 action;
74 unsigned long tc_act_cookies[TCA_ACT_MAX_PRIO];
75 struct mlx5_fc *counter;
76 struct mlx5_modify_hdr *modify_hdr;
77 struct mlx5e_mod_hdr_handle *mh; /* attached mod header instance */
78 struct mlx5e_mod_hdr_handle *slow_mh; /* attached mod header instance for slow path */
79 struct mlx5_ct_attr ct_attr;
80 struct mlx5e_sample_attr sample_attr;
81 struct mlx5e_meter_attr meter_attr;
82 struct mlx5e_tc_flow_parse_attr *parse_attr;
83 u32 chain;
84 u16 prio;
85 u16 tc_act_cookies_count;
86 u32 dest_chain;
87 struct mlx5_flow_table *ft;
88 struct mlx5_flow_table *dest_ft;
89 u8 inner_match_level;
90 u8 outer_match_level;
91 u8 tun_ip_version;
92 int tunnel_id; /* mapped tunnel id */
93 u32 flags;
94 u32 exe_aso_type;
95 struct list_head list;
96 struct mlx5e_post_act_handle *post_act_handle;
97 struct mlx5_flow_attr *branch_true;
98 struct mlx5_flow_attr *branch_false;
99 struct mlx5_flow_attr *jumping_attr;
100 struct mlx5_flow_handle *act_id_restore_rule;
101 /* keep this union last */
102 union {
103 DECLARE_FLEX_ARRAY(struct mlx5_esw_flow_attr, esw_attr);
104 DECLARE_FLEX_ARRAY(struct mlx5_nic_flow_attr, nic_attr);
105 };
106 };
107
108 enum {
109 MLX5_ATTR_FLAG_VLAN_HANDLED = BIT(0),
110 MLX5_ATTR_FLAG_SLOW_PATH = BIT(1),
111 MLX5_ATTR_FLAG_NO_IN_PORT = BIT(2),
112 MLX5_ATTR_FLAG_SRC_REWRITE = BIT(3),
113 MLX5_ATTR_FLAG_SAMPLE = BIT(4),
114 MLX5_ATTR_FLAG_ACCEPT = BIT(5),
115 MLX5_ATTR_FLAG_CT = BIT(6),
116 MLX5_ATTR_FLAG_TERMINATING = BIT(7),
117 MLX5_ATTR_FLAG_MTU = BIT(8),
118 };
119
120 /* Returns true if any of the flags that require skipping further TC/NF processing are set. */
121 static inline bool
mlx5e_tc_attr_flags_skip(u32 attr_flags)122 mlx5e_tc_attr_flags_skip(u32 attr_flags)
123 {
124 return attr_flags & (MLX5_ATTR_FLAG_SLOW_PATH | MLX5_ATTR_FLAG_ACCEPT);
125 }
126
127 struct mlx5_rx_tun_attr {
128 u16 decap_vport;
129 union {
130 __be32 v4;
131 struct in6_addr v6;
132 } src_ip; /* Valid if decap_vport is not zero */
133 union {
134 __be32 v4;
135 struct in6_addr v6;
136 } dst_ip; /* Valid if decap_vport is not zero */
137 };
138
139 #define MLX5E_TC_TABLE_CHAIN_TAG_BITS 16
140 #define MLX5E_TC_TABLE_CHAIN_TAG_MASK GENMASK(MLX5E_TC_TABLE_CHAIN_TAG_BITS - 1, 0)
141
142 #define MLX5E_TC_MAX_INT_PORT_NUM (8)
143
144 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
145
146 struct tunnel_match_key {
147 struct flow_dissector_key_control enc_control;
148 struct flow_dissector_key_keyid enc_key_id;
149 struct flow_dissector_key_ports enc_tp;
150 struct flow_dissector_key_ip enc_ip;
151 union {
152 struct flow_dissector_key_ipv4_addrs enc_ipv4;
153 struct flow_dissector_key_ipv6_addrs enc_ipv6;
154 };
155
156 int filter_ifindex;
157 };
158
159 struct tunnel_match_enc_opts {
160 struct flow_dissector_key_enc_opts key;
161 struct flow_dissector_key_enc_opts mask;
162 };
163
164 /* Tunnel_id mapping is TUNNEL_INFO_BITS + ENC_OPTS_BITS.
165 * Upper TUNNEL_INFO_BITS for general tunnel info.
166 * Lower ENC_OPTS_BITS bits for enc_opts.
167 */
168 #define TUNNEL_INFO_BITS 12
169 #define TUNNEL_INFO_BITS_MASK GENMASK(TUNNEL_INFO_BITS - 1, 0)
170 #define ENC_OPTS_BITS 11
171 #define ENC_OPTS_BITS_MASK GENMASK(ENC_OPTS_BITS - 1, 0)
172 #define TUNNEL_ID_BITS (TUNNEL_INFO_BITS + ENC_OPTS_BITS)
173 #define TUNNEL_ID_MASK GENMASK(TUNNEL_ID_BITS - 1, 0)
174
175 enum {
176 MLX5E_TC_FLAG_INGRESS_BIT,
177 MLX5E_TC_FLAG_EGRESS_BIT,
178 MLX5E_TC_FLAG_NIC_OFFLOAD_BIT,
179 MLX5E_TC_FLAG_ESW_OFFLOAD_BIT,
180 MLX5E_TC_FLAG_FT_OFFLOAD_BIT,
181 MLX5E_TC_FLAG_LAST_EXPORTED_BIT = MLX5E_TC_FLAG_FT_OFFLOAD_BIT,
182 };
183
184 #define MLX5_TC_FLAG(flag) BIT(MLX5E_TC_FLAG_##flag##_BIT)
185
186 int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv);
187 void mlx5e_tc_esw_cleanup(struct mlx5_rep_uplink_priv *uplink_priv);
188
189 int mlx5e_tc_ht_init(struct rhashtable *tc_ht);
190 void mlx5e_tc_ht_cleanup(struct rhashtable *tc_ht);
191
192 int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
193 struct flow_cls_offload *f, unsigned long flags);
194 int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
195 struct flow_cls_offload *f, unsigned long flags);
196
197 int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
198 struct flow_cls_offload *f, unsigned long flags);
199 int mlx5e_tc_fill_action_stats(struct mlx5e_priv *priv,
200 struct flow_offload_action *fl_act);
201
202 int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv,
203 struct tc_cls_matchall_offload *f);
204 int mlx5e_tc_delete_matchall(struct mlx5e_priv *priv,
205 struct tc_cls_matchall_offload *f);
206 void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv,
207 struct tc_cls_matchall_offload *ma);
208
209 struct mlx5e_encap_entry;
210 void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
211 struct mlx5e_encap_entry *e,
212 struct list_head *flow_list);
213 void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
214 struct mlx5e_encap_entry *e,
215 struct list_head *flow_list);
216 bool mlx5e_encap_take(struct mlx5e_encap_entry *e);
217 void mlx5e_encap_put(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e);
218
219 void mlx5e_take_all_encap_flows(struct mlx5e_encap_entry *e, struct list_head *flow_list);
220 void mlx5e_put_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list);
221
222 struct mlx5e_neigh_hash_entry;
223 struct mlx5e_encap_entry *
224 mlx5e_get_next_init_encap(struct mlx5e_neigh_hash_entry *nhe,
225 struct mlx5e_encap_entry *e);
226 void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe);
227
228 void mlx5e_tc_reoffload_flows_work(struct work_struct *work);
229
230 enum mlx5e_tc_attr_to_reg {
231 MAPPED_OBJ_TO_REG,
232 VPORT_TO_REG,
233 TUNNEL_TO_REG,
234 CTSTATE_TO_REG,
235 ZONE_TO_REG,
236 ZONE_RESTORE_TO_REG,
237 MARK_TO_REG,
238 LABELS_TO_REG,
239 FTEID_TO_REG,
240 NIC_MAPPED_OBJ_TO_REG,
241 NIC_ZONE_RESTORE_TO_REG,
242 PACKET_COLOR_TO_REG,
243 };
244
245 struct mlx5e_tc_attr_to_reg_mapping {
246 int mfield; /* rewrite field */
247 int moffset; /* bit offset of mfield */
248 int mlen; /* bits to rewrite/match */
249
250 int soffset; /* byte offset of spec for match */
251 };
252
253 extern struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[];
254
255 #define MLX5_REG_MAPPING_MOFFSET(reg_id) (mlx5e_tc_attr_to_reg_mappings[reg_id].moffset)
256 #define MLX5_REG_MAPPING_MBITS(reg_id) (mlx5e_tc_attr_to_reg_mappings[reg_id].mlen)
257 #define MLX5_REG_MAPPING_MASK(reg_id) (GENMASK(mlx5e_tc_attr_to_reg_mappings[reg_id].mlen - 1, 0))
258
259 bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
260 struct net_device *out_dev);
261
262 int mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev,
263 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
264 enum mlx5_flow_namespace_type ns,
265 enum mlx5e_tc_attr_to_reg type,
266 u32 data);
267
268 void mlx5e_tc_match_to_reg_mod_hdr_change(struct mlx5_core_dev *mdev,
269 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
270 enum mlx5e_tc_attr_to_reg type,
271 int act_id, u32 data);
272
273 void mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec,
274 enum mlx5e_tc_attr_to_reg type,
275 u32 data,
276 u32 mask);
277
278 void mlx5e_tc_match_to_reg_get_match(struct mlx5_flow_spec *spec,
279 enum mlx5e_tc_attr_to_reg type,
280 u32 *data,
281 u32 *mask);
282
283 int mlx5e_tc_match_to_reg_set_and_get_id(struct mlx5_core_dev *mdev,
284 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
285 enum mlx5_flow_namespace_type ns,
286 enum mlx5e_tc_attr_to_reg type,
287 u32 data);
288
289 int mlx5e_tc_attach_mod_hdr(struct mlx5e_priv *priv,
290 struct mlx5e_tc_flow *flow,
291 struct mlx5_flow_attr *attr);
292
293 void mlx5e_tc_detach_mod_hdr(struct mlx5e_priv *priv,
294 struct mlx5e_tc_flow *flow,
295 struct mlx5_flow_attr *attr);
296
297 void mlx5e_tc_set_ethertype(struct mlx5_core_dev *mdev,
298 struct flow_match_basic *match, bool outer,
299 void *headers_c, void *headers_v);
300
301 int mlx5e_tc_nic_init(struct mlx5e_priv *priv);
302 void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv);
303
304 int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
305 void *cb_priv);
306
307 struct mlx5_flow_handle *
308 mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
309 struct mlx5_flow_spec *spec,
310 struct mlx5_flow_attr *attr);
311 void mlx5e_del_offloaded_nic_rule(struct mlx5e_priv *priv,
312 struct mlx5_flow_handle *rule,
313 struct mlx5_flow_attr *attr);
314
315 struct mlx5_flow_handle *
316 mlx5_tc_rule_insert(struct mlx5e_priv *priv,
317 struct mlx5_flow_spec *spec,
318 struct mlx5_flow_attr *attr);
319 void
320 mlx5_tc_rule_delete(struct mlx5e_priv *priv,
321 struct mlx5_flow_handle *rule,
322 struct mlx5_flow_attr *attr);
323
324 bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_dev);
325 int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *route_dev,
326 u16 *vport);
327
328 int mlx5e_set_fwd_to_int_port_actions(struct mlx5e_priv *priv,
329 struct mlx5_flow_attr *attr,
330 int ifindex,
331 enum mlx5e_tc_int_port_type type,
332 u32 *action,
333 int out_index);
334 #else /* CONFIG_MLX5_CLS_ACT */
mlx5e_tc_nic_init(struct mlx5e_priv * priv)335 static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; }
mlx5e_tc_nic_cleanup(struct mlx5e_priv * priv)336 static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {}
mlx5e_tc_ht_init(struct rhashtable * tc_ht)337 static inline int mlx5e_tc_ht_init(struct rhashtable *tc_ht) { return 0; }
mlx5e_tc_ht_cleanup(struct rhashtable * tc_ht)338 static inline void mlx5e_tc_ht_cleanup(struct rhashtable *tc_ht) {}
339 static inline int
mlx5e_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)340 mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
341 { return -EOPNOTSUPP; }
342
343 #endif /* CONFIG_MLX5_CLS_ACT */
344
345 struct mlx5_flow_attr *mlx5_alloc_flow_attr(enum mlx5_flow_namespace_type type);
346
347 struct mlx5_flow_handle *
348 mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
349 struct mlx5_flow_spec *spec,
350 struct mlx5_flow_attr *attr);
351 void mlx5e_del_offloaded_nic_rule(struct mlx5e_priv *priv,
352 struct mlx5_flow_handle *rule,
353 struct mlx5_flow_attr *attr);
354
355 #else /* CONFIG_MLX5_ESWITCH */
mlx5e_tc_nic_init(struct mlx5e_priv * priv)356 static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; }
mlx5e_tc_nic_cleanup(struct mlx5e_priv * priv)357 static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {}
mlx5e_tc_num_filters(struct mlx5e_priv * priv,unsigned long flags)358 static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv,
359 unsigned long flags)
360 {
361 return 0;
362 }
363
364 static inline int
mlx5e_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)365 mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
366 { return -EOPNOTSUPP; }
367 #endif
368
369 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
370 struct mlx5e_tc_table *mlx5e_tc_table_alloc(void);
371 void mlx5e_tc_table_free(struct mlx5e_tc_table *tc);
mlx5e_cqe_regb_chain(struct mlx5_cqe64 * cqe)372 static inline bool mlx5e_cqe_regb_chain(struct mlx5_cqe64 *cqe)
373 {
374 u32 chain, reg_b;
375
376 reg_b = be32_to_cpu(cqe->ft_metadata);
377
378 if (reg_b >> (MLX5E_TC_TABLE_CHAIN_TAG_BITS + ESW_ZONE_ID_BITS))
379 return false;
380
381 chain = reg_b & MLX5E_TC_TABLE_CHAIN_TAG_MASK;
382 if (chain)
383 return true;
384
385 return false;
386 }
387
388 bool mlx5e_tc_update_skb_nic(struct mlx5_cqe64 *cqe, struct sk_buff *skb);
389 bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe, struct sk_buff *skb,
390 struct mapping_ctx *mapping_ctx, u32 mapped_obj_id,
391 struct mlx5_tc_ct_priv *ct_priv,
392 u32 zone_restore_id, u32 tunnel_id,
393 struct mlx5e_tc_update_priv *tc_priv);
394 #else /* CONFIG_MLX5_CLS_ACT */
mlx5e_tc_table_alloc(void)395 static inline struct mlx5e_tc_table *mlx5e_tc_table_alloc(void) { return NULL; }
mlx5e_tc_table_free(struct mlx5e_tc_table * tc)396 static inline void mlx5e_tc_table_free(struct mlx5e_tc_table *tc) {}
mlx5e_cqe_regb_chain(struct mlx5_cqe64 * cqe)397 static inline bool mlx5e_cqe_regb_chain(struct mlx5_cqe64 *cqe)
398 { return false; }
399 static inline bool
mlx5e_tc_update_skb_nic(struct mlx5_cqe64 * cqe,struct sk_buff * skb)400 mlx5e_tc_update_skb_nic(struct mlx5_cqe64 *cqe, struct sk_buff *skb)
401 { return true; }
402 #endif
403
404 int mlx5e_tc_action_miss_mapping_get(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr,
405 u64 act_miss_cookie, u32 *act_miss_mapping);
406 void mlx5e_tc_action_miss_mapping_put(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr,
407 u32 act_miss_mapping);
408
409 #endif /* __MLX5_EN_TC_H__ */
410