1 /*
2  * Copyright (C) 2017 Netronome Systems, Inc.
3  *
4  * This software is dual licensed under the GNU General License Version 2,
5  * June 1991 as shown in the file COPYING in the top-level directory of this
6  * source tree or the BSD 2-Clause License provided below.  You have the
7  * option to license this software under the complete terms of either license.
8  *
9  * The BSD 2-Clause License:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      1. Redistributions of source code must retain the above
16  *         copyright notice, this list of conditions and the following
17  *         disclaimer.
18  *
19  *      2. Redistributions in binary form must reproduce the above
20  *         copyright notice, this list of conditions and the following
21  *         disclaimer in the documentation and/or other materials
22  *         provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/skbuff.h>
35 #include <net/devlink.h>
36 #include <net/pkt_cls.h>
37 
38 #include "cmsg.h"
39 #include "main.h"
40 #include "../nfpcore/nfp_cpp.h"
41 #include "../nfpcore/nfp_nsp.h"
42 #include "../nfp_app.h"
43 #include "../nfp_main.h"
44 #include "../nfp_net.h"
45 #include "../nfp_port.h"
46 
47 #define NFP_FLOWER_WHITELIST_DISSECTOR \
48 	(BIT(FLOW_DISSECTOR_KEY_CONTROL) | \
49 	 BIT(FLOW_DISSECTOR_KEY_BASIC) | \
50 	 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | \
51 	 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | \
52 	 BIT(FLOW_DISSECTOR_KEY_PORTS) | \
53 	 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | \
54 	 BIT(FLOW_DISSECTOR_KEY_VLAN) | \
55 	 BIT(FLOW_DISSECTOR_KEY_IP))
56 
57 static int
58 nfp_flower_xmit_flow(struct net_device *netdev,
59 		     struct nfp_fl_payload *nfp_flow, u8 mtype)
60 {
61 	u32 meta_len, key_len, mask_len, act_len, tot_len;
62 	struct nfp_repr *priv = netdev_priv(netdev);
63 	struct sk_buff *skb;
64 	unsigned char *msg;
65 
66 	meta_len =  sizeof(struct nfp_fl_rule_metadata);
67 	key_len = nfp_flow->meta.key_len;
68 	mask_len = nfp_flow->meta.mask_len;
69 	act_len = nfp_flow->meta.act_len;
70 
71 	tot_len = meta_len + key_len + mask_len + act_len;
72 
73 	/* Convert to long words as firmware expects
74 	 * lengths in units of NFP_FL_LW_SIZ.
75 	 */
76 	nfp_flow->meta.key_len >>= NFP_FL_LW_SIZ;
77 	nfp_flow->meta.mask_len >>= NFP_FL_LW_SIZ;
78 	nfp_flow->meta.act_len >>= NFP_FL_LW_SIZ;
79 
80 	skb = nfp_flower_cmsg_alloc(priv->app, tot_len, mtype);
81 	if (!skb)
82 		return -ENOMEM;
83 
84 	msg = nfp_flower_cmsg_get_data(skb);
85 	memcpy(msg, &nfp_flow->meta, meta_len);
86 	memcpy(&msg[meta_len], nfp_flow->unmasked_data, key_len);
87 	memcpy(&msg[meta_len + key_len], nfp_flow->mask_data, mask_len);
88 	memcpy(&msg[meta_len + key_len + mask_len],
89 	       nfp_flow->action_data, act_len);
90 
91 	/* Convert back to bytes as software expects
92 	 * lengths in units of bytes.
93 	 */
94 	nfp_flow->meta.key_len <<= NFP_FL_LW_SIZ;
95 	nfp_flow->meta.mask_len <<= NFP_FL_LW_SIZ;
96 	nfp_flow->meta.act_len <<= NFP_FL_LW_SIZ;
97 
98 	nfp_ctrl_tx(priv->app->ctrl, skb);
99 
100 	return 0;
101 }
102 
103 static bool nfp_flower_check_higher_than_mac(struct tc_cls_flower_offload *f)
104 {
105 	return dissector_uses_key(f->dissector,
106 				  FLOW_DISSECTOR_KEY_IPV4_ADDRS) ||
107 		dissector_uses_key(f->dissector,
108 				   FLOW_DISSECTOR_KEY_IPV6_ADDRS) ||
109 		dissector_uses_key(f->dissector,
110 				   FLOW_DISSECTOR_KEY_PORTS) ||
111 		dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ICMP);
112 }
113 
114 static int
115 nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
116 				struct tc_cls_flower_offload *flow)
117 {
118 	struct flow_dissector_key_basic *mask_basic = NULL;
119 	struct flow_dissector_key_basic *key_basic = NULL;
120 	struct flow_dissector_key_ip *mask_ip = NULL;
121 	u32 key_layer_two;
122 	u8 key_layer;
123 	int key_size;
124 
125 	if (flow->dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR)
126 		return -EOPNOTSUPP;
127 
128 	if (dissector_uses_key(flow->dissector,
129 			       FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
130 		struct flow_dissector_key_control *mask_enc_ctl =
131 			skb_flow_dissector_target(flow->dissector,
132 						  FLOW_DISSECTOR_KEY_ENC_CONTROL,
133 						  flow->mask);
134 		/* We are expecting a tunnel. For now we ignore offloading. */
135 		if (mask_enc_ctl->addr_type)
136 			return -EOPNOTSUPP;
137 	}
138 
139 	if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
140 		mask_basic = skb_flow_dissector_target(flow->dissector,
141 						       FLOW_DISSECTOR_KEY_BASIC,
142 						       flow->mask);
143 
144 		key_basic = skb_flow_dissector_target(flow->dissector,
145 						      FLOW_DISSECTOR_KEY_BASIC,
146 						      flow->key);
147 	}
148 
149 	if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_IP))
150 		mask_ip = skb_flow_dissector_target(flow->dissector,
151 						    FLOW_DISSECTOR_KEY_IP,
152 						    flow->mask);
153 
154 	key_layer_two = 0;
155 	key_layer = NFP_FLOWER_LAYER_PORT | NFP_FLOWER_LAYER_MAC;
156 	key_size = sizeof(struct nfp_flower_meta_one) +
157 		   sizeof(struct nfp_flower_in_port) +
158 		   sizeof(struct nfp_flower_mac_mpls);
159 
160 	if (mask_basic && mask_basic->n_proto) {
161 		/* Ethernet type is present in the key. */
162 		switch (key_basic->n_proto) {
163 		case cpu_to_be16(ETH_P_IP):
164 			if (mask_ip && mask_ip->tos)
165 				return -EOPNOTSUPP;
166 			if (mask_ip && mask_ip->ttl)
167 				return -EOPNOTSUPP;
168 			key_layer |= NFP_FLOWER_LAYER_IPV4;
169 			key_size += sizeof(struct nfp_flower_ipv4);
170 			break;
171 
172 		case cpu_to_be16(ETH_P_IPV6):
173 			if (mask_ip && mask_ip->tos)
174 				return -EOPNOTSUPP;
175 			if (mask_ip && mask_ip->ttl)
176 				return -EOPNOTSUPP;
177 			key_layer |= NFP_FLOWER_LAYER_IPV6;
178 			key_size += sizeof(struct nfp_flower_ipv6);
179 			break;
180 
181 		/* Currently we do not offload ARP
182 		 * because we rely on it to get to the host.
183 		 */
184 		case cpu_to_be16(ETH_P_ARP):
185 			return -EOPNOTSUPP;
186 
187 		/* Currently we do not offload MPLS. */
188 		case cpu_to_be16(ETH_P_MPLS_UC):
189 		case cpu_to_be16(ETH_P_MPLS_MC):
190 			return -EOPNOTSUPP;
191 
192 		/* Will be included in layer 2. */
193 		case cpu_to_be16(ETH_P_8021Q):
194 			break;
195 
196 		default:
197 			/* Other ethtype - we need check the masks for the
198 			 * remainder of the key to ensure we can offload.
199 			 */
200 			if (nfp_flower_check_higher_than_mac(flow))
201 				return -EOPNOTSUPP;
202 			break;
203 		}
204 	}
205 
206 	if (mask_basic && mask_basic->ip_proto) {
207 		/* Ethernet type is present in the key. */
208 		switch (key_basic->ip_proto) {
209 		case IPPROTO_TCP:
210 		case IPPROTO_UDP:
211 		case IPPROTO_SCTP:
212 		case IPPROTO_ICMP:
213 		case IPPROTO_ICMPV6:
214 			key_layer |= NFP_FLOWER_LAYER_TP;
215 			key_size += sizeof(struct nfp_flower_tp_ports);
216 			break;
217 		default:
218 			/* Other ip proto - we need check the masks for the
219 			 * remainder of the key to ensure we can offload.
220 			 */
221 			return -EOPNOTSUPP;
222 		}
223 	}
224 
225 	ret_key_ls->key_layer = key_layer;
226 	ret_key_ls->key_layer_two = key_layer_two;
227 	ret_key_ls->key_size = key_size;
228 
229 	return 0;
230 }
231 
232 static struct nfp_fl_payload *
233 nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
234 {
235 	struct nfp_fl_payload *flow_pay;
236 
237 	flow_pay = kmalloc(sizeof(*flow_pay), GFP_KERNEL);
238 	if (!flow_pay)
239 		return NULL;
240 
241 	flow_pay->meta.key_len = key_layer->key_size;
242 	flow_pay->unmasked_data = kmalloc(key_layer->key_size, GFP_KERNEL);
243 	if (!flow_pay->unmasked_data)
244 		goto err_free_flow;
245 
246 	flow_pay->meta.mask_len = key_layer->key_size;
247 	flow_pay->mask_data = kmalloc(key_layer->key_size, GFP_KERNEL);
248 	if (!flow_pay->mask_data)
249 		goto err_free_unmasked;
250 
251 	flow_pay->action_data = kmalloc(NFP_FL_MAX_A_SIZ, GFP_KERNEL);
252 	if (!flow_pay->action_data)
253 		goto err_free_mask;
254 
255 	flow_pay->meta.flags = 0;
256 	spin_lock_init(&flow_pay->lock);
257 
258 	return flow_pay;
259 
260 err_free_mask:
261 	kfree(flow_pay->mask_data);
262 err_free_unmasked:
263 	kfree(flow_pay->unmasked_data);
264 err_free_flow:
265 	kfree(flow_pay);
266 	return NULL;
267 }
268 
269 /**
270  * nfp_flower_add_offload() - Adds a new flow to hardware.
271  * @app:	Pointer to the APP handle
272  * @netdev:	netdev structure.
273  * @flow:	TC flower classifier offload structure.
274  *
275  * Adds a new flow to the repeated hash structure and action payload.
276  *
277  * Return: negative value on error, 0 if configured successfully.
278  */
279 static int
280 nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
281 		       struct tc_cls_flower_offload *flow)
282 {
283 	struct nfp_flower_priv *priv = app->priv;
284 	struct nfp_fl_payload *flow_pay;
285 	struct nfp_fl_key_ls *key_layer;
286 	int err;
287 
288 	key_layer = kmalloc(sizeof(*key_layer), GFP_KERNEL);
289 	if (!key_layer)
290 		return -ENOMEM;
291 
292 	err = nfp_flower_calculate_key_layers(key_layer, flow);
293 	if (err)
294 		goto err_free_key_ls;
295 
296 	flow_pay = nfp_flower_allocate_new(key_layer);
297 	if (!flow_pay) {
298 		err = -ENOMEM;
299 		goto err_free_key_ls;
300 	}
301 
302 	err = nfp_flower_compile_flow_match(flow, key_layer, netdev, flow_pay);
303 	if (err)
304 		goto err_destroy_flow;
305 
306 	err = nfp_flower_compile_action(flow, netdev, flow_pay);
307 	if (err)
308 		goto err_destroy_flow;
309 
310 	err = nfp_compile_flow_metadata(app, flow, flow_pay);
311 	if (err)
312 		goto err_destroy_flow;
313 
314 	err = nfp_flower_xmit_flow(netdev, flow_pay,
315 				   NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
316 	if (err)
317 		goto err_destroy_flow;
318 
319 	INIT_HLIST_NODE(&flow_pay->link);
320 	flow_pay->tc_flower_cookie = flow->cookie;
321 	hash_add_rcu(priv->flow_table, &flow_pay->link, flow->cookie);
322 
323 	/* Deallocate flow payload when flower rule has been destroyed. */
324 	kfree(key_layer);
325 
326 	return 0;
327 
328 err_destroy_flow:
329 	kfree(flow_pay->action_data);
330 	kfree(flow_pay->mask_data);
331 	kfree(flow_pay->unmasked_data);
332 	kfree(flow_pay);
333 err_free_key_ls:
334 	kfree(key_layer);
335 	return err;
336 }
337 
338 /**
339  * nfp_flower_del_offload() - Removes a flow from hardware.
340  * @app:	Pointer to the APP handle
341  * @netdev:	netdev structure.
342  * @flow:	TC flower classifier offload structure
343  *
344  * Removes a flow from the repeated hash structure and clears the
345  * action payload.
346  *
347  * Return: negative value on error, 0 if removed successfully.
348  */
349 static int
350 nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
351 		       struct tc_cls_flower_offload *flow)
352 {
353 	struct nfp_fl_payload *nfp_flow;
354 	int err;
355 
356 	nfp_flow = nfp_flower_search_fl_table(app, flow->cookie);
357 	if (!nfp_flow)
358 		return -ENOENT;
359 
360 	err = nfp_modify_flow_metadata(app, nfp_flow);
361 	if (err)
362 		goto err_free_flow;
363 
364 	err = nfp_flower_xmit_flow(netdev, nfp_flow,
365 				   NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
366 	if (err)
367 		goto err_free_flow;
368 
369 err_free_flow:
370 	hash_del_rcu(&nfp_flow->link);
371 	kfree(nfp_flow->action_data);
372 	kfree(nfp_flow->mask_data);
373 	kfree(nfp_flow->unmasked_data);
374 	kfree_rcu(nfp_flow, rcu);
375 	return err;
376 }
377 
378 /**
379  * nfp_flower_get_stats() - Populates flow stats obtained from hardware.
380  * @app:	Pointer to the APP handle
381  * @flow:	TC flower classifier offload structure
382  *
383  * Populates a flow statistics structure which which corresponds to a
384  * specific flow.
385  *
386  * Return: negative value on error, 0 if stats populated successfully.
387  */
388 static int
389 nfp_flower_get_stats(struct nfp_app *app, struct tc_cls_flower_offload *flow)
390 {
391 	struct nfp_fl_payload *nfp_flow;
392 
393 	nfp_flow = nfp_flower_search_fl_table(app, flow->cookie);
394 	if (!nfp_flow)
395 		return -EINVAL;
396 
397 	spin_lock_bh(&nfp_flow->lock);
398 	tcf_exts_stats_update(flow->exts, nfp_flow->stats.bytes,
399 			      nfp_flow->stats.pkts, nfp_flow->stats.used);
400 
401 	nfp_flow->stats.pkts = 0;
402 	nfp_flow->stats.bytes = 0;
403 	spin_unlock_bh(&nfp_flow->lock);
404 
405 	return 0;
406 }
407 
408 static int
409 nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
410 			struct tc_cls_flower_offload *flower)
411 {
412 	switch (flower->command) {
413 	case TC_CLSFLOWER_REPLACE:
414 		return nfp_flower_add_offload(app, netdev, flower);
415 	case TC_CLSFLOWER_DESTROY:
416 		return nfp_flower_del_offload(app, netdev, flower);
417 	case TC_CLSFLOWER_STATS:
418 		return nfp_flower_get_stats(app, flower);
419 	}
420 
421 	return -EOPNOTSUPP;
422 }
423 
424 int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
425 			enum tc_setup_type type, void *type_data)
426 {
427 	struct tc_cls_flower_offload *cls_flower = type_data;
428 
429 	if (type != TC_SETUP_CLSFLOWER ||
430 	    !is_classid_clsact_ingress(cls_flower->common.classid) ||
431 	    !eth_proto_is_802_3(cls_flower->common.protocol) ||
432 	    cls_flower->common.chain_index)
433 		return -EOPNOTSUPP;
434 
435 	return nfp_flower_repr_offload(app, netdev, cls_flower);
436 }
437