1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /* Copyright (c) 2021 Mellanox Technologies. */
3 
4 #ifndef __MLX5_EN_TC_PRIV_H__
5 #define __MLX5_EN_TC_PRIV_H__
6 
7 #include "en_tc.h"
8 
9 #define MLX5E_TC_FLOW_BASE (MLX5E_TC_FLAG_LAST_EXPORTED_BIT + 1)
10 
11 #define MLX5E_TC_MAX_SPLITS 1
12 
13 enum {
14 	MLX5E_TC_FLOW_FLAG_INGRESS               = MLX5E_TC_FLAG_INGRESS_BIT,
15 	MLX5E_TC_FLOW_FLAG_EGRESS                = MLX5E_TC_FLAG_EGRESS_BIT,
16 	MLX5E_TC_FLOW_FLAG_ESWITCH               = MLX5E_TC_FLAG_ESW_OFFLOAD_BIT,
17 	MLX5E_TC_FLOW_FLAG_FT                    = MLX5E_TC_FLAG_FT_OFFLOAD_BIT,
18 	MLX5E_TC_FLOW_FLAG_NIC                   = MLX5E_TC_FLAG_NIC_OFFLOAD_BIT,
19 	MLX5E_TC_FLOW_FLAG_OFFLOADED             = MLX5E_TC_FLOW_BASE,
20 	MLX5E_TC_FLOW_FLAG_HAIRPIN               = MLX5E_TC_FLOW_BASE + 1,
21 	MLX5E_TC_FLOW_FLAG_HAIRPIN_RSS           = MLX5E_TC_FLOW_BASE + 2,
22 	MLX5E_TC_FLOW_FLAG_SLOW                  = MLX5E_TC_FLOW_BASE + 3,
23 	MLX5E_TC_FLOW_FLAG_DUP                   = MLX5E_TC_FLOW_BASE + 4,
24 	MLX5E_TC_FLOW_FLAG_NOT_READY             = MLX5E_TC_FLOW_BASE + 5,
25 	MLX5E_TC_FLOW_FLAG_DELETED               = MLX5E_TC_FLOW_BASE + 6,
26 	MLX5E_TC_FLOW_FLAG_CT                    = MLX5E_TC_FLOW_BASE + 7,
27 	MLX5E_TC_FLOW_FLAG_L3_TO_L2_DECAP        = MLX5E_TC_FLOW_BASE + 8,
28 	MLX5E_TC_FLOW_FLAG_TUN_RX                = MLX5E_TC_FLOW_BASE + 9,
29 	MLX5E_TC_FLOW_FLAG_FAILED                = MLX5E_TC_FLOW_BASE + 10,
30 	MLX5E_TC_FLOW_FLAG_SAMPLE                = MLX5E_TC_FLOW_BASE + 11,
31 };
32 
33 struct mlx5e_tc_flow_parse_attr {
34 	const struct ip_tunnel_info *tun_info[MLX5_MAX_FLOW_FWD_VPORTS];
35 	struct net_device *filter_dev;
36 	struct mlx5_flow_spec spec;
37 	struct mlx5e_tc_mod_hdr_acts mod_hdr_acts;
38 	int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS];
39 	struct ethhdr eth;
40 };
41 
42 /* Helper struct for accessing a struct containing list_head array.
43  * Containing struct
44  *   |- Helper array
45  *      [0] Helper item 0
46  *          |- list_head item 0
47  *          |- index (0)
48  *      [1] Helper item 1
49  *          |- list_head item 1
50  *          |- index (1)
51  * To access the containing struct from one of the list_head items:
52  * 1. Get the helper item from the list_head item using
53  *    helper item =
54  *        container_of(list_head item, helper struct type, list_head field)
55  * 2. Get the contining struct from the helper item and its index in the array:
56  *    containing struct =
57  *        container_of(helper item, containing struct type, helper field[index])
58  */
59 struct encap_flow_item {
60 	struct mlx5e_encap_entry *e; /* attached encap instance */
61 	struct list_head list;
62 	int index;
63 };
64 
65 struct encap_route_flow_item {
66 	struct mlx5e_route_entry *r; /* attached route instance */
67 	int index;
68 };
69 
70 struct mlx5e_tc_flow {
71 	struct rhash_head node;
72 	struct mlx5e_priv *priv;
73 	u64 cookie;
74 	unsigned long flags;
75 	struct mlx5_flow_handle *rule[MLX5E_TC_MAX_SPLITS + 1];
76 
77 	/* flows sharing the same reformat object - currently mpls decap */
78 	struct list_head l3_to_l2_reformat;
79 	struct mlx5e_decap_entry *decap_reformat;
80 
81 	/* flows sharing same route entry */
82 	struct list_head decap_routes;
83 	struct mlx5e_route_entry *decap_route;
84 	struct encap_route_flow_item encap_routes[MLX5_MAX_FLOW_FWD_VPORTS];
85 
86 	/* Flow can be associated with multiple encap IDs.
87 	 * The number of encaps is bounded by the number of supported
88 	 * destinations.
89 	 */
90 	struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS];
91 	struct mlx5e_tc_flow *peer_flow;
92 	struct mlx5e_mod_hdr_handle *mh; /* attached mod header instance */
93 	struct mlx5e_hairpin_entry *hpe; /* attached hairpin instance */
94 	struct list_head hairpin; /* flows sharing the same hairpin */
95 	struct list_head peer;    /* flows with peer flow */
96 	struct list_head unready; /* flows not ready to be offloaded (e.g
97 				   * due to missing route)
98 				   */
99 	struct net_device *orig_dev; /* netdev adding flow first */
100 	int tmp_entry_index;
101 	struct list_head tmp_list; /* temporary flow list used by neigh update */
102 	refcount_t refcnt;
103 	struct rcu_head rcu_head;
104 	struct completion init_done;
105 	int tunnel_id; /* the mapped tunnel id of this flow */
106 	struct mlx5_flow_attr *attr;
107 };
108 
109 u8 mlx5e_tc_get_ip_version(struct mlx5_flow_spec *spec, bool outer);
110 
111 struct mlx5_flow_handle *
112 mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
113 			   struct mlx5e_tc_flow *flow,
114 			   struct mlx5_flow_spec *spec,
115 			   struct mlx5_flow_attr *attr);
116 
117 bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow);
118 
119 static inline void __flow_flag_set(struct mlx5e_tc_flow *flow, unsigned long flag)
120 {
121 	/* Complete all memory stores before setting bit. */
122 	smp_mb__before_atomic();
123 	set_bit(flag, &flow->flags);
124 }
125 
126 #define flow_flag_set(flow, flag) __flow_flag_set(flow, MLX5E_TC_FLOW_FLAG_##flag)
127 
128 static inline bool __flow_flag_test_and_set(struct mlx5e_tc_flow *flow,
129 					    unsigned long flag)
130 {
131 	/* test_and_set_bit() provides all necessary barriers */
132 	return test_and_set_bit(flag, &flow->flags);
133 }
134 
135 #define flow_flag_test_and_set(flow, flag)			\
136 	__flow_flag_test_and_set(flow,				\
137 				 MLX5E_TC_FLOW_FLAG_##flag)
138 
139 static inline void __flow_flag_clear(struct mlx5e_tc_flow *flow, unsigned long flag)
140 {
141 	/* Complete all memory stores before clearing bit. */
142 	smp_mb__before_atomic();
143 	clear_bit(flag, &flow->flags);
144 }
145 
146 #define flow_flag_clear(flow, flag) __flow_flag_clear(flow,		\
147 						      MLX5E_TC_FLOW_FLAG_##flag)
148 
149 static inline bool __flow_flag_test(struct mlx5e_tc_flow *flow, unsigned long flag)
150 {
151 	bool ret = test_bit(flag, &flow->flags);
152 
153 	/* Read fields of flow structure only after checking flags. */
154 	smp_mb__after_atomic();
155 	return ret;
156 }
157 
158 #define flow_flag_test(flow, flag) __flow_flag_test(flow,		\
159 						    MLX5E_TC_FLOW_FLAG_##flag)
160 
161 void mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
162 				       struct mlx5e_tc_flow *flow);
163 struct mlx5_flow_handle *
164 mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
165 			      struct mlx5e_tc_flow *flow,
166 			      struct mlx5_flow_spec *spec);
167 void mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
168 				  struct mlx5e_tc_flow *flow,
169 				  struct mlx5_flow_attr *attr);
170 
171 struct mlx5e_tc_flow *mlx5e_flow_get(struct mlx5e_tc_flow *flow);
172 void mlx5e_flow_put(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow);
173 
174 struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow);
175 
176 #endif /* __MLX5_EN_TC_PRIV_H__ */
177