1 /*
2  * drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
3  * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Neither the names of the copyright holders nor the names of its
15  *    contributors may be used to endorse or promote products derived from
16  *    this software without specific prior written permission.
17  *
18  * Alternatively, this software may be distributed under the terms of the
19  * GNU General Public License ("GPL") version 2 as published by the Free
20  * Software Foundation.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32  * POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <linux/kernel.h>
36 #include <linux/errno.h>
37 #include <linux/netdevice.h>
38 #include <net/flow_dissector.h>
39 #include <net/pkt_cls.h>
40 #include <net/tc_act/tc_gact.h>
41 #include <net/tc_act/tc_mirred.h>
42 #include <net/tc_act/tc_vlan.h>
43 
44 #include "spectrum.h"
45 #include "core_acl_flex_keys.h"
46 
47 static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
48 					 struct net_device *dev,
49 					 struct mlxsw_sp_acl_rule_info *rulei,
50 					 struct tcf_exts *exts)
51 {
52 	const struct tc_action *a;
53 	LIST_HEAD(actions);
54 	int err;
55 
56 	if (tc_no_actions(exts))
57 		return 0;
58 
59 	/* Count action is inserted first */
60 	err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei);
61 	if (err)
62 		return err;
63 
64 	tcf_exts_to_list(exts, &actions);
65 	list_for_each_entry(a, &actions, list) {
66 		if (is_tcf_gact_shot(a)) {
67 			err = mlxsw_sp_acl_rulei_act_drop(rulei);
68 			if (err)
69 				return err;
70 		} else if (is_tcf_mirred_egress_redirect(a)) {
71 			int ifindex = tcf_mirred_ifindex(a);
72 			struct net_device *out_dev;
73 
74 			err = mlxsw_sp_acl_rulei_act_fid_set(mlxsw_sp, rulei,
75 							     MLXSW_SP_DUMMY_FID);
76 			if (err)
77 				return err;
78 
79 			out_dev = __dev_get_by_index(dev_net(dev), ifindex);
80 			if (out_dev == dev)
81 				out_dev = NULL;
82 
83 			err = mlxsw_sp_acl_rulei_act_fwd(mlxsw_sp, rulei,
84 							 out_dev);
85 			if (err)
86 				return err;
87 		} else if (is_tcf_vlan(a)) {
88 			u16 proto = be16_to_cpu(tcf_vlan_push_proto(a));
89 			u32 action = tcf_vlan_action(a);
90 			u8 prio = tcf_vlan_push_prio(a);
91 			u16 vid = tcf_vlan_push_vid(a);
92 
93 			return mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei,
94 							   action, vid,
95 							   proto, prio);
96 		} else {
97 			dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n");
98 			return -EOPNOTSUPP;
99 		}
100 	}
101 	return 0;
102 }
103 
104 static void mlxsw_sp_flower_parse_ipv4(struct mlxsw_sp_acl_rule_info *rulei,
105 				       struct tc_cls_flower_offload *f)
106 {
107 	struct flow_dissector_key_ipv4_addrs *key =
108 		skb_flow_dissector_target(f->dissector,
109 					  FLOW_DISSECTOR_KEY_IPV4_ADDRS,
110 					  f->key);
111 	struct flow_dissector_key_ipv4_addrs *mask =
112 		skb_flow_dissector_target(f->dissector,
113 					  FLOW_DISSECTOR_KEY_IPV4_ADDRS,
114 					  f->mask);
115 
116 	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_IP4,
117 				       ntohl(key->src), ntohl(mask->src));
118 	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_IP4,
119 				       ntohl(key->dst), ntohl(mask->dst));
120 }
121 
122 static void mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info *rulei,
123 				       struct tc_cls_flower_offload *f)
124 {
125 	struct flow_dissector_key_ipv6_addrs *key =
126 		skb_flow_dissector_target(f->dissector,
127 					  FLOW_DISSECTOR_KEY_IPV6_ADDRS,
128 					  f->key);
129 	struct flow_dissector_key_ipv6_addrs *mask =
130 		skb_flow_dissector_target(f->dissector,
131 					  FLOW_DISSECTOR_KEY_IPV6_ADDRS,
132 					  f->mask);
133 	size_t addr_half_size = sizeof(key->src) / 2;
134 
135 	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP6_HI,
136 				       &key->src.s6_addr[0],
137 				       &mask->src.s6_addr[0],
138 				       addr_half_size);
139 	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP6_LO,
140 				       &key->src.s6_addr[addr_half_size],
141 				       &mask->src.s6_addr[addr_half_size],
142 				       addr_half_size);
143 	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP6_HI,
144 				       &key->dst.s6_addr[0],
145 				       &mask->dst.s6_addr[0],
146 				       addr_half_size);
147 	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP6_LO,
148 				       &key->dst.s6_addr[addr_half_size],
149 				       &mask->dst.s6_addr[addr_half_size],
150 				       addr_half_size);
151 }
152 
153 static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp,
154 				       struct mlxsw_sp_acl_rule_info *rulei,
155 				       struct tc_cls_flower_offload *f,
156 				       u8 ip_proto)
157 {
158 	struct flow_dissector_key_ports *key, *mask;
159 
160 	if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS))
161 		return 0;
162 
163 	if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) {
164 		dev_err(mlxsw_sp->bus_info->dev, "Only UDP and TCP keys are supported\n");
165 		return -EINVAL;
166 	}
167 
168 	key = skb_flow_dissector_target(f->dissector,
169 					FLOW_DISSECTOR_KEY_PORTS,
170 					f->key);
171 	mask = skb_flow_dissector_target(f->dissector,
172 					 FLOW_DISSECTOR_KEY_PORTS,
173 					 f->mask);
174 	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_L4_PORT,
175 				       ntohs(key->dst), ntohs(mask->dst));
176 	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_L4_PORT,
177 				       ntohs(key->src), ntohs(mask->src));
178 	return 0;
179 }
180 
181 static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
182 				 struct net_device *dev,
183 				 struct mlxsw_sp_acl_rule_info *rulei,
184 				 struct tc_cls_flower_offload *f)
185 {
186 	u16 addr_type = 0;
187 	u8 ip_proto = 0;
188 	int err;
189 
190 	if (f->dissector->used_keys &
191 	    ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
192 	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
193 	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
194 	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
195 	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
196 	      BIT(FLOW_DISSECTOR_KEY_PORTS) |
197 	      BIT(FLOW_DISSECTOR_KEY_VLAN))) {
198 		dev_err(mlxsw_sp->bus_info->dev, "Unsupported key\n");
199 		return -EOPNOTSUPP;
200 	}
201 
202 	mlxsw_sp_acl_rulei_priority(rulei, f->prio);
203 
204 	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
205 		struct flow_dissector_key_control *key =
206 			skb_flow_dissector_target(f->dissector,
207 						  FLOW_DISSECTOR_KEY_CONTROL,
208 						  f->key);
209 		addr_type = key->addr_type;
210 	}
211 
212 	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
213 		struct flow_dissector_key_basic *key =
214 			skb_flow_dissector_target(f->dissector,
215 						  FLOW_DISSECTOR_KEY_BASIC,
216 						  f->key);
217 		struct flow_dissector_key_basic *mask =
218 			skb_flow_dissector_target(f->dissector,
219 						  FLOW_DISSECTOR_KEY_BASIC,
220 						  f->mask);
221 		u16 n_proto_key = ntohs(key->n_proto);
222 		u16 n_proto_mask = ntohs(mask->n_proto);
223 
224 		if (n_proto_key == ETH_P_ALL) {
225 			n_proto_key = 0;
226 			n_proto_mask = 0;
227 		}
228 		mlxsw_sp_acl_rulei_keymask_u32(rulei,
229 					       MLXSW_AFK_ELEMENT_ETHERTYPE,
230 					       n_proto_key, n_proto_mask);
231 
232 		ip_proto = key->ip_proto;
233 		mlxsw_sp_acl_rulei_keymask_u32(rulei,
234 					       MLXSW_AFK_ELEMENT_IP_PROTO,
235 					       key->ip_proto, mask->ip_proto);
236 	}
237 
238 	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
239 		struct flow_dissector_key_eth_addrs *key =
240 			skb_flow_dissector_target(f->dissector,
241 						  FLOW_DISSECTOR_KEY_ETH_ADDRS,
242 						  f->key);
243 		struct flow_dissector_key_eth_addrs *mask =
244 			skb_flow_dissector_target(f->dissector,
245 						  FLOW_DISSECTOR_KEY_ETH_ADDRS,
246 						  f->mask);
247 
248 		mlxsw_sp_acl_rulei_keymask_buf(rulei,
249 					       MLXSW_AFK_ELEMENT_DMAC,
250 					       key->dst, mask->dst,
251 					       sizeof(key->dst));
252 		mlxsw_sp_acl_rulei_keymask_buf(rulei,
253 					       MLXSW_AFK_ELEMENT_SMAC,
254 					       key->src, mask->src,
255 					       sizeof(key->src));
256 	}
257 
258 	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
259 		struct flow_dissector_key_vlan *key =
260 			skb_flow_dissector_target(f->dissector,
261 						  FLOW_DISSECTOR_KEY_VLAN,
262 						  f->key);
263 		struct flow_dissector_key_vlan *mask =
264 			skb_flow_dissector_target(f->dissector,
265 						  FLOW_DISSECTOR_KEY_VLAN,
266 						  f->mask);
267 		if (mask->vlan_id != 0)
268 			mlxsw_sp_acl_rulei_keymask_u32(rulei,
269 						       MLXSW_AFK_ELEMENT_VID,
270 						       key->vlan_id,
271 						       mask->vlan_id);
272 		if (mask->vlan_priority != 0)
273 			mlxsw_sp_acl_rulei_keymask_u32(rulei,
274 						       MLXSW_AFK_ELEMENT_PCP,
275 						       key->vlan_priority,
276 						       mask->vlan_priority);
277 	}
278 
279 	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS)
280 		mlxsw_sp_flower_parse_ipv4(rulei, f);
281 
282 	if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS)
283 		mlxsw_sp_flower_parse_ipv6(rulei, f);
284 
285 	err = mlxsw_sp_flower_parse_ports(mlxsw_sp, rulei, f, ip_proto);
286 	if (err)
287 		return err;
288 
289 	return mlxsw_sp_flower_parse_actions(mlxsw_sp, dev, rulei, f->exts);
290 }
291 
292 int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
293 			    __be16 protocol, struct tc_cls_flower_offload *f)
294 {
295 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
296 	struct net_device *dev = mlxsw_sp_port->dev;
297 	struct mlxsw_sp_acl_rule_info *rulei;
298 	struct mlxsw_sp_acl_ruleset *ruleset;
299 	struct mlxsw_sp_acl_rule *rule;
300 	int err;
301 
302 	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, dev, ingress,
303 					   MLXSW_SP_ACL_PROFILE_FLOWER);
304 	if (IS_ERR(ruleset))
305 		return PTR_ERR(ruleset);
306 
307 	rule = mlxsw_sp_acl_rule_create(mlxsw_sp, ruleset, f->cookie);
308 	if (IS_ERR(rule)) {
309 		err = PTR_ERR(rule);
310 		goto err_rule_create;
311 	}
312 
313 	rulei = mlxsw_sp_acl_rule_rulei(rule);
314 	err = mlxsw_sp_flower_parse(mlxsw_sp, dev, rulei, f);
315 	if (err)
316 		goto err_flower_parse;
317 
318 	err = mlxsw_sp_acl_rulei_commit(rulei);
319 	if (err)
320 		goto err_rulei_commit;
321 
322 	err = mlxsw_sp_acl_rule_add(mlxsw_sp, rule);
323 	if (err)
324 		goto err_rule_add;
325 
326 	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
327 	return 0;
328 
329 err_rule_add:
330 err_rulei_commit:
331 err_flower_parse:
332 	mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
333 err_rule_create:
334 	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
335 	return err;
336 }
337 
338 void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
339 			     struct tc_cls_flower_offload *f)
340 {
341 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
342 	struct mlxsw_sp_acl_ruleset *ruleset;
343 	struct mlxsw_sp_acl_rule *rule;
344 
345 	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, mlxsw_sp_port->dev,
346 					   ingress,
347 					   MLXSW_SP_ACL_PROFILE_FLOWER);
348 	if (IS_ERR(ruleset))
349 		return;
350 
351 	rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
352 	if (rule) {
353 		mlxsw_sp_acl_rule_del(mlxsw_sp, rule);
354 		mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
355 	}
356 
357 	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
358 }
359 
360 int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
361 			  struct tc_cls_flower_offload *f)
362 {
363 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
364 	struct mlxsw_sp_acl_ruleset *ruleset;
365 	struct mlxsw_sp_acl_rule *rule;
366 	struct tc_action *a;
367 	LIST_HEAD(actions);
368 	u64 packets;
369 	u64 lastuse;
370 	u64 bytes;
371 	int err;
372 
373 	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, mlxsw_sp_port->dev,
374 					   ingress,
375 					   MLXSW_SP_ACL_PROFILE_FLOWER);
376 	if (WARN_ON(IS_ERR(ruleset)))
377 		return -EINVAL;
378 
379 	rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
380 	if (!rule)
381 		return -EINVAL;
382 
383 	err = mlxsw_sp_acl_rule_get_stats(mlxsw_sp, rule, &packets, &bytes,
384 					  &lastuse);
385 	if (err)
386 		goto err_rule_get_stats;
387 
388 	preempt_disable();
389 
390 	tcf_exts_to_list(f->exts, &actions);
391 	list_for_each_entry(a, &actions, list)
392 		tcf_action_stats_update(a, bytes, packets, lastuse);
393 
394 	preempt_enable();
395 
396 	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
397 	return 0;
398 
399 err_rule_get_stats:
400 	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
401 	return err;
402 }
403