1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ 2 /* Copyright (c) 2021 Mellanox Technologies. */ 3 4 #ifndef __MLX5_EN_TC_PRIV_H__ 5 #define __MLX5_EN_TC_PRIV_H__ 6 7 #include "en_tc.h" 8 #include "en/tc/act/act.h" 9 10 #define MLX5E_TC_FLOW_BASE (MLX5E_TC_FLAG_LAST_EXPORTED_BIT + 1) 11 12 #define MLX5E_TC_MAX_SPLITS 1 13 14 #define mlx5e_nic_chains(priv) ((priv)->fs.tc.chains) 15 16 enum { 17 MLX5E_TC_FLOW_FLAG_INGRESS = MLX5E_TC_FLAG_INGRESS_BIT, 18 MLX5E_TC_FLOW_FLAG_EGRESS = MLX5E_TC_FLAG_EGRESS_BIT, 19 MLX5E_TC_FLOW_FLAG_ESWITCH = MLX5E_TC_FLAG_ESW_OFFLOAD_BIT, 20 MLX5E_TC_FLOW_FLAG_FT = MLX5E_TC_FLAG_FT_OFFLOAD_BIT, 21 MLX5E_TC_FLOW_FLAG_NIC = MLX5E_TC_FLAG_NIC_OFFLOAD_BIT, 22 MLX5E_TC_FLOW_FLAG_OFFLOADED = MLX5E_TC_FLOW_BASE, 23 MLX5E_TC_FLOW_FLAG_HAIRPIN = MLX5E_TC_FLOW_BASE + 1, 24 MLX5E_TC_FLOW_FLAG_HAIRPIN_RSS = MLX5E_TC_FLOW_BASE + 2, 25 MLX5E_TC_FLOW_FLAG_SLOW = MLX5E_TC_FLOW_BASE + 3, 26 MLX5E_TC_FLOW_FLAG_DUP = MLX5E_TC_FLOW_BASE + 4, 27 MLX5E_TC_FLOW_FLAG_NOT_READY = MLX5E_TC_FLOW_BASE + 5, 28 MLX5E_TC_FLOW_FLAG_DELETED = MLX5E_TC_FLOW_BASE + 6, 29 MLX5E_TC_FLOW_FLAG_CT = MLX5E_TC_FLOW_BASE + 7, 30 MLX5E_TC_FLOW_FLAG_L3_TO_L2_DECAP = MLX5E_TC_FLOW_BASE + 8, 31 MLX5E_TC_FLOW_FLAG_TUN_RX = MLX5E_TC_FLOW_BASE + 9, 32 MLX5E_TC_FLOW_FLAG_FAILED = MLX5E_TC_FLOW_BASE + 10, 33 MLX5E_TC_FLOW_FLAG_SAMPLE = MLX5E_TC_FLOW_BASE + 11, 34 }; 35 36 struct mlx5e_tc_flow_parse_attr { 37 const struct ip_tunnel_info *tun_info[MLX5_MAX_FLOW_FWD_VPORTS]; 38 struct net_device *filter_dev; 39 struct mlx5_flow_spec spec; 40 struct pedit_headers_action hdrs[__PEDIT_CMD_MAX]; 41 struct mlx5e_tc_mod_hdr_acts mod_hdr_acts; 42 int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS]; 43 struct ethhdr eth; 44 struct mlx5e_tc_act_parse_state parse_state; 45 }; 46 47 /* Helper struct for accessing a struct containing list_head array. 48 * Containing struct 49 * |- Helper array 50 * [0] Helper item 0 51 * |- list_head item 0 52 * |- index (0) 53 * [1] Helper item 1 54 * |- list_head item 1 55 * |- index (1) 56 * To access the containing struct from one of the list_head items: 57 * 1. Get the helper item from the list_head item using 58 * helper item = 59 * container_of(list_head item, helper struct type, list_head field) 60 * 2. Get the contining struct from the helper item and its index in the array: 61 * containing struct = 62 * container_of(helper item, containing struct type, helper field[index]) 63 */ 64 struct encap_flow_item { 65 struct mlx5e_encap_entry *e; /* attached encap instance */ 66 struct list_head list; 67 int index; 68 }; 69 70 struct encap_route_flow_item { 71 struct mlx5e_route_entry *r; /* attached route instance */ 72 int index; 73 }; 74 75 struct mlx5e_tc_flow { 76 struct rhash_head node; 77 struct mlx5e_priv *priv; 78 u64 cookie; 79 unsigned long flags; 80 struct mlx5_flow_handle *rule[MLX5E_TC_MAX_SPLITS + 1]; 81 82 /* flows sharing the same reformat object - currently mpls decap */ 83 struct list_head l3_to_l2_reformat; 84 struct mlx5e_decap_entry *decap_reformat; 85 86 /* flows sharing same route entry */ 87 struct list_head decap_routes; 88 struct mlx5e_route_entry *decap_route; 89 struct encap_route_flow_item encap_routes[MLX5_MAX_FLOW_FWD_VPORTS]; 90 91 /* Flow can be associated with multiple encap IDs. 92 * The number of encaps is bounded by the number of supported 93 * destinations. 94 */ 95 struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS]; 96 struct mlx5e_tc_flow *peer_flow; 97 struct mlx5e_mod_hdr_handle *mh; /* attached mod header instance */ 98 struct mlx5e_hairpin_entry *hpe; /* attached hairpin instance */ 99 struct list_head hairpin; /* flows sharing the same hairpin */ 100 struct list_head peer; /* flows with peer flow */ 101 struct list_head unready; /* flows not ready to be offloaded (e.g 102 * due to missing route) 103 */ 104 struct net_device *orig_dev; /* netdev adding flow first */ 105 int tmp_entry_index; 106 struct list_head tmp_list; /* temporary flow list used by neigh update */ 107 refcount_t refcnt; 108 struct rcu_head rcu_head; 109 struct completion init_done; 110 struct completion del_hw_done; 111 struct mlx5_flow_attr *attr; 112 }; 113 114 struct mlx5_flow_handle * 115 mlx5e_tc_rule_offload(struct mlx5e_priv *priv, 116 struct mlx5_flow_spec *spec, 117 struct mlx5_flow_attr *attr); 118 119 void 120 mlx5e_tc_rule_unoffload(struct mlx5e_priv *priv, 121 struct mlx5_flow_handle *rule, 122 struct mlx5_flow_attr *attr); 123 124 u8 mlx5e_tc_get_ip_version(struct mlx5_flow_spec *spec, bool outer); 125 126 struct mlx5_flow_handle * 127 mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw, 128 struct mlx5e_tc_flow *flow, 129 struct mlx5_flow_spec *spec, 130 struct mlx5_flow_attr *attr); 131 132 bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow); 133 bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow); 134 bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow); 135 int mlx5e_get_flow_namespace(struct mlx5e_tc_flow *flow); 136 bool mlx5e_same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv); 137 138 static inline void __flow_flag_set(struct mlx5e_tc_flow *flow, unsigned long flag) 139 { 140 /* Complete all memory stores before setting bit. */ 141 smp_mb__before_atomic(); 142 set_bit(flag, &flow->flags); 143 } 144 145 #define flow_flag_set(flow, flag) __flow_flag_set(flow, MLX5E_TC_FLOW_FLAG_##flag) 146 147 static inline bool __flow_flag_test_and_set(struct mlx5e_tc_flow *flow, 148 unsigned long flag) 149 { 150 /* test_and_set_bit() provides all necessary barriers */ 151 return test_and_set_bit(flag, &flow->flags); 152 } 153 154 #define flow_flag_test_and_set(flow, flag) \ 155 __flow_flag_test_and_set(flow, \ 156 MLX5E_TC_FLOW_FLAG_##flag) 157 158 static inline void __flow_flag_clear(struct mlx5e_tc_flow *flow, unsigned long flag) 159 { 160 /* Complete all memory stores before clearing bit. */ 161 smp_mb__before_atomic(); 162 clear_bit(flag, &flow->flags); 163 } 164 165 #define flow_flag_clear(flow, flag) __flow_flag_clear(flow, \ 166 MLX5E_TC_FLOW_FLAG_##flag) 167 168 static inline bool __flow_flag_test(struct mlx5e_tc_flow *flow, unsigned long flag) 169 { 170 bool ret = test_bit(flag, &flow->flags); 171 172 /* Read fields of flow structure only after checking flags. */ 173 smp_mb__after_atomic(); 174 return ret; 175 } 176 177 #define flow_flag_test(flow, flag) __flow_flag_test(flow, \ 178 MLX5E_TC_FLOW_FLAG_##flag) 179 180 void mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw, 181 struct mlx5e_tc_flow *flow); 182 struct mlx5_flow_handle * 183 mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw, 184 struct mlx5e_tc_flow *flow, 185 struct mlx5_flow_spec *spec); 186 187 void mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw, 188 struct mlx5e_tc_flow *flow, 189 struct mlx5_flow_attr *attr); 190 191 struct mlx5e_tc_flow *mlx5e_flow_get(struct mlx5e_tc_flow *flow); 192 void mlx5e_flow_put(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow); 193 194 struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow); 195 196 struct mlx5e_tc_int_port_priv * 197 mlx5e_get_int_port_priv(struct mlx5e_priv *priv); 198 199 void *mlx5e_get_match_headers_value(u32 flags, struct mlx5_flow_spec *spec); 200 void *mlx5e_get_match_headers_criteria(u32 flags, struct mlx5_flow_spec *spec); 201 202 #endif /* __MLX5_EN_TC_PRIV_H__ */ 203