1 /* 2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #ifndef __MLX5_EN_TC_H__ 34 #define __MLX5_EN_TC_H__ 35 36 #include <net/pkt_cls.h> 37 #include "en.h" 38 #include "eswitch.h" 39 #include "en/tc_ct.h" 40 #include "en/tc_tun.h" 41 #include "en/tc/int_port.h" 42 #include "en_rep.h" 43 44 #define MLX5E_TC_FLOW_ID_MASK 0x0000ffff 45 46 #ifdef CONFIG_MLX5_ESWITCH 47 48 #define NIC_FLOW_ATTR_SZ (sizeof(struct mlx5_flow_attr) +\ 49 sizeof(struct mlx5_nic_flow_attr)) 50 #define ESW_FLOW_ATTR_SZ (sizeof(struct mlx5_flow_attr) +\ 51 sizeof(struct mlx5_esw_flow_attr)) 52 #define ns_to_attr_sz(ns) (((ns) == MLX5_FLOW_NAMESPACE_FDB) ?\ 53 ESW_FLOW_ATTR_SZ :\ 54 NIC_FLOW_ATTR_SZ) 55 56 int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags); 57 58 struct mlx5e_tc_update_priv { 59 struct net_device *fwd_dev; 60 }; 61 62 struct mlx5_nic_flow_attr { 63 u32 flow_tag; 64 u32 hairpin_tirn; 65 struct mlx5_flow_table *hairpin_ft; 66 }; 67 68 struct mlx5_flow_attr { 69 u32 action; 70 struct mlx5_fc *counter; 71 struct mlx5_modify_hdr *modify_hdr; 72 struct mlx5_ct_attr ct_attr; 73 struct mlx5e_sample_attr sample_attr; 74 struct mlx5e_tc_flow_parse_attr *parse_attr; 75 u32 chain; 76 u16 prio; 77 u32 dest_chain; 78 struct mlx5_flow_table *ft; 79 struct mlx5_flow_table *dest_ft; 80 u8 inner_match_level; 81 u8 outer_match_level; 82 u8 ip_version; 83 u8 tun_ip_version; 84 int tunnel_id; /* mapped tunnel id */ 85 u32 flags; 86 struct list_head list; 87 struct mlx5e_post_act_handle *post_act_handle; 88 struct { 89 /* Indicate whether the parsed flow should be counted for lag mode decision 90 * making 91 */ 92 bool count; 93 } lag; 94 /* keep this union last */ 95 union { 96 struct mlx5_esw_flow_attr esw_attr[0]; 97 struct mlx5_nic_flow_attr nic_attr[0]; 98 }; 99 }; 100 101 enum { 102 MLX5_ATTR_FLAG_VLAN_HANDLED = BIT(0), 103 MLX5_ATTR_FLAG_SLOW_PATH = BIT(1), 104 MLX5_ATTR_FLAG_NO_IN_PORT = BIT(2), 105 MLX5_ATTR_FLAG_SRC_REWRITE = BIT(3), 106 MLX5_ATTR_FLAG_SAMPLE = BIT(4), 107 MLX5_ATTR_FLAG_ACCEPT = BIT(5), 108 MLX5_ATTR_FLAG_CT = BIT(6), 109 }; 110 111 /* Returns true if any of the flags that require skipping further TC/NF processing are set. */ 112 static inline bool 113 mlx5e_tc_attr_flags_skip(u32 attr_flags) 114 { 115 return attr_flags & (MLX5_ATTR_FLAG_SLOW_PATH | MLX5_ATTR_FLAG_ACCEPT); 116 } 117 118 struct mlx5_rx_tun_attr { 119 u16 decap_vport; 120 union { 121 __be32 v4; 122 struct in6_addr v6; 123 } src_ip; /* Valid if decap_vport is not zero */ 124 union { 125 __be32 v4; 126 struct in6_addr v6; 127 } dst_ip; /* Valid if decap_vport is not zero */ 128 u32 vni; 129 }; 130 131 #define MLX5E_TC_TABLE_CHAIN_TAG_BITS 16 132 #define MLX5E_TC_TABLE_CHAIN_TAG_MASK GENMASK(MLX5E_TC_TABLE_CHAIN_TAG_BITS - 1, 0) 133 134 #define MLX5E_TC_MAX_INT_PORT_NUM (8) 135 136 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT) 137 138 struct tunnel_match_key { 139 struct flow_dissector_key_control enc_control; 140 struct flow_dissector_key_keyid enc_key_id; 141 struct flow_dissector_key_ports enc_tp; 142 struct flow_dissector_key_ip enc_ip; 143 union { 144 struct flow_dissector_key_ipv4_addrs enc_ipv4; 145 struct flow_dissector_key_ipv6_addrs enc_ipv6; 146 }; 147 148 int filter_ifindex; 149 }; 150 151 struct tunnel_match_enc_opts { 152 struct flow_dissector_key_enc_opts key; 153 struct flow_dissector_key_enc_opts mask; 154 }; 155 156 /* Tunnel_id mapping is TUNNEL_INFO_BITS + ENC_OPTS_BITS. 157 * Upper TUNNEL_INFO_BITS for general tunnel info. 158 * Lower ENC_OPTS_BITS bits for enc_opts. 159 */ 160 #define TUNNEL_INFO_BITS 12 161 #define TUNNEL_INFO_BITS_MASK GENMASK(TUNNEL_INFO_BITS - 1, 0) 162 #define ENC_OPTS_BITS 11 163 #define ENC_OPTS_BITS_MASK GENMASK(ENC_OPTS_BITS - 1, 0) 164 #define TUNNEL_ID_BITS (TUNNEL_INFO_BITS + ENC_OPTS_BITS) 165 #define TUNNEL_ID_MASK GENMASK(TUNNEL_ID_BITS - 1, 0) 166 167 enum { 168 MLX5E_TC_FLAG_INGRESS_BIT, 169 MLX5E_TC_FLAG_EGRESS_BIT, 170 MLX5E_TC_FLAG_NIC_OFFLOAD_BIT, 171 MLX5E_TC_FLAG_ESW_OFFLOAD_BIT, 172 MLX5E_TC_FLAG_FT_OFFLOAD_BIT, 173 MLX5E_TC_FLAG_LAST_EXPORTED_BIT = MLX5E_TC_FLAG_FT_OFFLOAD_BIT, 174 }; 175 176 #define MLX5_TC_FLAG(flag) BIT(MLX5E_TC_FLAG_##flag##_BIT) 177 178 int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv); 179 void mlx5e_tc_esw_cleanup(struct mlx5_rep_uplink_priv *uplink_priv); 180 181 int mlx5e_tc_ht_init(struct rhashtable *tc_ht); 182 void mlx5e_tc_ht_cleanup(struct rhashtable *tc_ht); 183 184 int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv, 185 struct flow_cls_offload *f, unsigned long flags); 186 int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv, 187 struct flow_cls_offload *f, unsigned long flags); 188 189 int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv, 190 struct flow_cls_offload *f, unsigned long flags); 191 192 int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv, 193 struct tc_cls_matchall_offload *f); 194 int mlx5e_tc_delete_matchall(struct mlx5e_priv *priv, 195 struct tc_cls_matchall_offload *f); 196 void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv, 197 struct tc_cls_matchall_offload *ma); 198 199 struct mlx5e_encap_entry; 200 void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv, 201 struct mlx5e_encap_entry *e, 202 struct list_head *flow_list); 203 void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv, 204 struct mlx5e_encap_entry *e, 205 struct list_head *flow_list); 206 bool mlx5e_encap_take(struct mlx5e_encap_entry *e); 207 void mlx5e_encap_put(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e); 208 209 void mlx5e_take_all_encap_flows(struct mlx5e_encap_entry *e, struct list_head *flow_list); 210 void mlx5e_put_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list); 211 212 struct mlx5e_neigh_hash_entry; 213 struct mlx5e_encap_entry * 214 mlx5e_get_next_init_encap(struct mlx5e_neigh_hash_entry *nhe, 215 struct mlx5e_encap_entry *e); 216 void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe); 217 218 void mlx5e_tc_reoffload_flows_work(struct work_struct *work); 219 220 enum mlx5e_tc_attr_to_reg { 221 CHAIN_TO_REG, 222 VPORT_TO_REG, 223 TUNNEL_TO_REG, 224 CTSTATE_TO_REG, 225 ZONE_TO_REG, 226 ZONE_RESTORE_TO_REG, 227 MARK_TO_REG, 228 LABELS_TO_REG, 229 FTEID_TO_REG, 230 NIC_CHAIN_TO_REG, 231 NIC_ZONE_RESTORE_TO_REG, 232 }; 233 234 struct mlx5e_tc_attr_to_reg_mapping { 235 int mfield; /* rewrite field */ 236 int moffset; /* bit offset of mfield */ 237 int mlen; /* bits to rewrite/match */ 238 239 int soffset; /* byte offset of spec for match */ 240 }; 241 242 extern struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[]; 243 244 bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv, 245 struct net_device *out_dev); 246 247 int mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev, 248 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts, 249 enum mlx5_flow_namespace_type ns, 250 enum mlx5e_tc_attr_to_reg type, 251 u32 data); 252 253 void mlx5e_tc_match_to_reg_mod_hdr_change(struct mlx5_core_dev *mdev, 254 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts, 255 enum mlx5e_tc_attr_to_reg type, 256 int act_id, u32 data); 257 258 void mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec, 259 enum mlx5e_tc_attr_to_reg type, 260 u32 data, 261 u32 mask); 262 263 void mlx5e_tc_match_to_reg_get_match(struct mlx5_flow_spec *spec, 264 enum mlx5e_tc_attr_to_reg type, 265 u32 *data, 266 u32 *mask); 267 268 int mlx5e_tc_match_to_reg_set_and_get_id(struct mlx5_core_dev *mdev, 269 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts, 270 enum mlx5_flow_namespace_type ns, 271 enum mlx5e_tc_attr_to_reg type, 272 u32 data); 273 274 int mlx5e_tc_add_flow_mod_hdr(struct mlx5e_priv *priv, 275 struct mlx5e_tc_flow *flow, 276 struct mlx5_flow_attr *attr); 277 278 void mlx5e_tc_set_ethertype(struct mlx5_core_dev *mdev, 279 struct flow_match_basic *match, bool outer, 280 void *headers_c, void *headers_v); 281 282 int mlx5e_tc_nic_init(struct mlx5e_priv *priv); 283 void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv); 284 285 int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 286 void *cb_priv); 287 288 struct mlx5_flow_handle * 289 mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv, 290 struct mlx5_flow_spec *spec, 291 struct mlx5_flow_attr *attr); 292 void mlx5e_del_offloaded_nic_rule(struct mlx5e_priv *priv, 293 struct mlx5_flow_handle *rule, 294 struct mlx5_flow_attr *attr); 295 296 struct mlx5_flow_handle * 297 mlx5_tc_rule_insert(struct mlx5e_priv *priv, 298 struct mlx5_flow_spec *spec, 299 struct mlx5_flow_attr *attr); 300 void 301 mlx5_tc_rule_delete(struct mlx5e_priv *priv, 302 struct mlx5_flow_handle *rule, 303 struct mlx5_flow_attr *attr); 304 305 bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_dev); 306 int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *route_dev, 307 u16 *vport); 308 309 int mlx5e_set_fwd_to_int_port_actions(struct mlx5e_priv *priv, 310 struct mlx5_flow_attr *attr, 311 int ifindex, 312 enum mlx5e_tc_int_port_type type, 313 u32 *action, 314 int out_index); 315 #else /* CONFIG_MLX5_CLS_ACT */ 316 static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; } 317 static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {} 318 static inline int mlx5e_tc_ht_init(struct rhashtable *tc_ht) { return 0; } 319 static inline void mlx5e_tc_ht_cleanup(struct rhashtable *tc_ht) {} 320 static inline int 321 mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) 322 { return -EOPNOTSUPP; } 323 324 #endif /* CONFIG_MLX5_CLS_ACT */ 325 326 struct mlx5_flow_attr *mlx5_alloc_flow_attr(enum mlx5_flow_namespace_type type); 327 328 struct mlx5_flow_handle * 329 mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv, 330 struct mlx5_flow_spec *spec, 331 struct mlx5_flow_attr *attr); 332 void mlx5e_del_offloaded_nic_rule(struct mlx5e_priv *priv, 333 struct mlx5_flow_handle *rule, 334 struct mlx5_flow_attr *attr); 335 336 #else /* CONFIG_MLX5_ESWITCH */ 337 static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; } 338 static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {} 339 static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv, 340 unsigned long flags) 341 { 342 return 0; 343 } 344 345 static inline int 346 mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) 347 { return -EOPNOTSUPP; } 348 #endif 349 350 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT) 351 static inline bool mlx5e_cqe_regb_chain(struct mlx5_cqe64 *cqe) 352 { 353 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 354 u32 chain, reg_b; 355 356 reg_b = be32_to_cpu(cqe->ft_metadata); 357 358 if (reg_b >> (MLX5E_TC_TABLE_CHAIN_TAG_BITS + ESW_ZONE_ID_BITS)) 359 return false; 360 361 chain = reg_b & MLX5E_TC_TABLE_CHAIN_TAG_MASK; 362 if (chain) 363 return true; 364 #endif 365 366 return false; 367 } 368 369 bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe, struct sk_buff *skb); 370 #else /* CONFIG_MLX5_CLS_ACT */ 371 static inline bool mlx5e_cqe_regb_chain(struct mlx5_cqe64 *cqe) 372 { return false; } 373 static inline bool 374 mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe, struct sk_buff *skb) 375 { return true; } 376 #endif 377 378 #endif /* __MLX5_EN_TC_H__ */ 379