1 /*
2  * This file is part of the Chelsio T4 Ethernet driver for Linux.
3  *
4  * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #include <net/tc_act/tc_gact.h>
36 #include <net/tc_act/tc_mirred.h>
37 
38 #include "cxgb4.h"
39 #include "cxgb4_tc_u32_parse.h"
40 #include "cxgb4_tc_u32.h"
41 
42 /* Fill ch_filter_specification with parsed match value/mask pair. */
43 static int fill_match_fields(struct adapter *adap,
44 			     struct ch_filter_specification *fs,
45 			     struct tc_cls_u32_offload *cls,
46 			     const struct cxgb4_match_field *entry,
47 			     bool next_header)
48 {
49 	unsigned int i, j;
50 	u32 val, mask;
51 	int off, err;
52 	bool found;
53 
54 	for (i = 0; i < cls->knode.sel->nkeys; i++) {
55 		off = cls->knode.sel->keys[i].off;
56 		val = cls->knode.sel->keys[i].val;
57 		mask = cls->knode.sel->keys[i].mask;
58 
59 		if (next_header) {
60 			/* For next headers, parse only keys with offmask */
61 			if (!cls->knode.sel->keys[i].offmask)
62 				continue;
63 		} else {
64 			/* For the remaining, parse only keys without offmask */
65 			if (cls->knode.sel->keys[i].offmask)
66 				continue;
67 		}
68 
69 		found = false;
70 
71 		for (j = 0; entry[j].val; j++) {
72 			if (off == entry[j].off) {
73 				found = true;
74 				err = entry[j].val(fs, val, mask);
75 				if (err)
76 					return err;
77 				break;
78 			}
79 		}
80 
81 		if (!found)
82 			return -EINVAL;
83 	}
84 
85 	return 0;
86 }
87 
88 /* Fill ch_filter_specification with parsed action. */
89 static int fill_action_fields(struct adapter *adap,
90 			      struct ch_filter_specification *fs,
91 			      struct tc_cls_u32_offload *cls)
92 {
93 	unsigned int num_actions = 0;
94 	const struct tc_action *a;
95 	struct tcf_exts *exts;
96 	int i;
97 
98 	exts = cls->knode.exts;
99 	if (!tcf_exts_has_actions(exts))
100 		return -EINVAL;
101 
102 	tcf_exts_for_each_action(i, a, exts) {
103 		/* Don't allow more than one action per rule. */
104 		if (num_actions)
105 			return -EINVAL;
106 
107 		/* Drop in hardware. */
108 		if (is_tcf_gact_shot(a)) {
109 			fs->action = FILTER_DROP;
110 			num_actions++;
111 			continue;
112 		}
113 
114 		/* Re-direct to specified port in hardware. */
115 		if (is_tcf_mirred_egress_redirect(a)) {
116 			struct net_device *n_dev, *target_dev;
117 			bool found = false;
118 			unsigned int i;
119 
120 			target_dev = tcf_mirred_dev(a);
121 			for_each_port(adap, i) {
122 				n_dev = adap->port[i];
123 				if (target_dev == n_dev) {
124 					fs->action = FILTER_SWITCH;
125 					fs->eport = i;
126 					found = true;
127 					break;
128 				}
129 			}
130 
131 			/* Interface doesn't belong to any port of
132 			 * the underlying hardware.
133 			 */
134 			if (!found)
135 				return -EINVAL;
136 
137 			num_actions++;
138 			continue;
139 		}
140 
141 		/* Un-supported action. */
142 		return -EINVAL;
143 	}
144 
145 	return 0;
146 }
147 
148 int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
149 {
150 	const struct cxgb4_match_field *start, *link_start = NULL;
151 	struct adapter *adapter = netdev2adap(dev);
152 	__be16 protocol = cls->common.protocol;
153 	struct ch_filter_specification fs;
154 	struct cxgb4_tc_u32_table *t;
155 	struct cxgb4_link *link;
156 	unsigned int filter_id;
157 	u32 uhtid, link_uhtid;
158 	bool is_ipv6 = false;
159 	int ret;
160 
161 	if (!can_tc_u32_offload(dev))
162 		return -EOPNOTSUPP;
163 
164 	if (protocol != htons(ETH_P_IP) && protocol != htons(ETH_P_IPV6))
165 		return -EOPNOTSUPP;
166 
167 	/* Fetch the location to insert the filter. */
168 	filter_id = cls->knode.handle & 0xFFFFF;
169 
170 	if (filter_id > adapter->tids.nftids) {
171 		dev_err(adapter->pdev_dev,
172 			"Location %d out of range for insertion. Max: %d\n",
173 			filter_id, adapter->tids.nftids);
174 		return -ERANGE;
175 	}
176 
177 	t = adapter->tc_u32;
178 	uhtid = TC_U32_USERHTID(cls->knode.handle);
179 	link_uhtid = TC_U32_USERHTID(cls->knode.link_handle);
180 
181 	/* Ensure that uhtid is either root u32 (i.e. 0x800)
182 	 * or a a valid linked bucket.
183 	 */
184 	if (uhtid != 0x800 && uhtid >= t->size)
185 		return -EINVAL;
186 
187 	/* Ensure link handle uhtid is sane, if specified. */
188 	if (link_uhtid >= t->size)
189 		return -EINVAL;
190 
191 	memset(&fs, 0, sizeof(fs));
192 
193 	if (protocol == htons(ETH_P_IPV6)) {
194 		start = cxgb4_ipv6_fields;
195 		is_ipv6 = true;
196 	} else {
197 		start = cxgb4_ipv4_fields;
198 		is_ipv6 = false;
199 	}
200 
201 	if (uhtid != 0x800) {
202 		/* Link must exist from root node before insertion. */
203 		if (!t->table[uhtid - 1].link_handle)
204 			return -EINVAL;
205 
206 		/* Link must have a valid supported next header. */
207 		link_start = t->table[uhtid - 1].match_field;
208 		if (!link_start)
209 			return -EINVAL;
210 	}
211 
212 	/* Parse links and record them for subsequent jumps to valid
213 	 * next headers.
214 	 */
215 	if (link_uhtid) {
216 		const struct cxgb4_next_header *next;
217 		bool found = false;
218 		unsigned int i, j;
219 		u32 val, mask;
220 		int off;
221 
222 		if (t->table[link_uhtid - 1].link_handle) {
223 			dev_err(adapter->pdev_dev,
224 				"Link handle exists for: 0x%x\n",
225 				link_uhtid);
226 			return -EINVAL;
227 		}
228 
229 		next = is_ipv6 ? cxgb4_ipv6_jumps : cxgb4_ipv4_jumps;
230 
231 		/* Try to find matches that allow jumps to next header. */
232 		for (i = 0; next[i].jump; i++) {
233 			if (next[i].offoff != cls->knode.sel->offoff ||
234 			    next[i].shift != cls->knode.sel->offshift ||
235 			    next[i].mask != cls->knode.sel->offmask ||
236 			    next[i].offset != cls->knode.sel->off)
237 				continue;
238 
239 			/* Found a possible candidate.  Find a key that
240 			 * matches the corresponding offset, value, and
241 			 * mask to jump to next header.
242 			 */
243 			for (j = 0; j < cls->knode.sel->nkeys; j++) {
244 				off = cls->knode.sel->keys[j].off;
245 				val = cls->knode.sel->keys[j].val;
246 				mask = cls->knode.sel->keys[j].mask;
247 
248 				if (next[i].match_off == off &&
249 				    next[i].match_val == val &&
250 				    next[i].match_mask == mask) {
251 					found = true;
252 					break;
253 				}
254 			}
255 
256 			if (!found)
257 				continue; /* Try next candidate. */
258 
259 			/* Candidate to jump to next header found.
260 			 * Translate all keys to internal specification
261 			 * and store them in jump table. This spec is copied
262 			 * later to set the actual filters.
263 			 */
264 			ret = fill_match_fields(adapter, &fs, cls,
265 						start, false);
266 			if (ret)
267 				goto out;
268 
269 			link = &t->table[link_uhtid - 1];
270 			link->match_field = next[i].jump;
271 			link->link_handle = cls->knode.handle;
272 			memcpy(&link->fs, &fs, sizeof(fs));
273 			break;
274 		}
275 
276 		/* No candidate found to jump to next header. */
277 		if (!found)
278 			return -EINVAL;
279 
280 		return 0;
281 	}
282 
283 	/* Fill ch_filter_specification match fields to be shipped to hardware.
284 	 * Copy the linked spec (if any) first.  And then update the spec as
285 	 * needed.
286 	 */
287 	if (uhtid != 0x800 && t->table[uhtid - 1].link_handle) {
288 		/* Copy linked ch_filter_specification */
289 		memcpy(&fs, &t->table[uhtid - 1].fs, sizeof(fs));
290 		ret = fill_match_fields(adapter, &fs, cls,
291 					link_start, true);
292 		if (ret)
293 			goto out;
294 	}
295 
296 	ret = fill_match_fields(adapter, &fs, cls, start, false);
297 	if (ret)
298 		goto out;
299 
300 	/* Fill ch_filter_specification action fields to be shipped to
301 	 * hardware.
302 	 */
303 	ret = fill_action_fields(adapter, &fs, cls);
304 	if (ret)
305 		goto out;
306 
307 	/* The filter spec has been completely built from the info
308 	 * provided from u32.  We now set some default fields in the
309 	 * spec for sanity.
310 	 */
311 
312 	/* Match only packets coming from the ingress port where this
313 	 * filter will be created.
314 	 */
315 	fs.val.iport = netdev2pinfo(dev)->port_id;
316 	fs.mask.iport = ~0;
317 
318 	/* Enable filter hit counts. */
319 	fs.hitcnts = 1;
320 
321 	/* Set type of filter - IPv6 or IPv4 */
322 	fs.type = is_ipv6 ? 1 : 0;
323 
324 	/* Set the filter */
325 	ret = cxgb4_set_filter(dev, filter_id, &fs);
326 	if (ret)
327 		goto out;
328 
329 	/* If this is a linked bucket, then set the corresponding
330 	 * entry in the bitmap to mark it as belonging to this linked
331 	 * bucket.
332 	 */
333 	if (uhtid != 0x800 && t->table[uhtid - 1].link_handle)
334 		set_bit(filter_id, t->table[uhtid - 1].tid_map);
335 
336 out:
337 	return ret;
338 }
339 
340 int cxgb4_delete_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
341 {
342 	struct adapter *adapter = netdev2adap(dev);
343 	unsigned int filter_id, max_tids, i, j;
344 	struct cxgb4_link *link = NULL;
345 	struct cxgb4_tc_u32_table *t;
346 	u32 handle, uhtid;
347 	int ret;
348 
349 	if (!can_tc_u32_offload(dev))
350 		return -EOPNOTSUPP;
351 
352 	/* Fetch the location to delete the filter. */
353 	filter_id = cls->knode.handle & 0xFFFFF;
354 
355 	if (filter_id > adapter->tids.nftids) {
356 		dev_err(adapter->pdev_dev,
357 			"Location %d out of range for deletion. Max: %d\n",
358 			filter_id, adapter->tids.nftids);
359 		return -ERANGE;
360 	}
361 
362 	t = adapter->tc_u32;
363 	handle = cls->knode.handle;
364 	uhtid = TC_U32_USERHTID(cls->knode.handle);
365 
366 	/* Ensure that uhtid is either root u32 (i.e. 0x800)
367 	 * or a a valid linked bucket.
368 	 */
369 	if (uhtid != 0x800 && uhtid >= t->size)
370 		return -EINVAL;
371 
372 	/* Delete the specified filter */
373 	if (uhtid != 0x800) {
374 		link = &t->table[uhtid - 1];
375 		if (!link->link_handle)
376 			return -EINVAL;
377 
378 		if (!test_bit(filter_id, link->tid_map))
379 			return -EINVAL;
380 	}
381 
382 	ret = cxgb4_del_filter(dev, filter_id, NULL);
383 	if (ret)
384 		goto out;
385 
386 	if (link)
387 		clear_bit(filter_id, link->tid_map);
388 
389 	/* If a link is being deleted, then delete all filters
390 	 * associated with the link.
391 	 */
392 	max_tids = adapter->tids.nftids;
393 	for (i = 0; i < t->size; i++) {
394 		link = &t->table[i];
395 
396 		if (link->link_handle == handle) {
397 			for (j = 0; j < max_tids; j++) {
398 				if (!test_bit(j, link->tid_map))
399 					continue;
400 
401 				ret = __cxgb4_del_filter(dev, j, NULL, NULL);
402 				if (ret)
403 					goto out;
404 
405 				clear_bit(j, link->tid_map);
406 			}
407 
408 			/* Clear the link state */
409 			link->match_field = NULL;
410 			link->link_handle = 0;
411 			memset(&link->fs, 0, sizeof(link->fs));
412 			break;
413 		}
414 	}
415 
416 out:
417 	return ret;
418 }
419 
420 void cxgb4_cleanup_tc_u32(struct adapter *adap)
421 {
422 	struct cxgb4_tc_u32_table *t;
423 	unsigned int i;
424 
425 	if (!adap->tc_u32)
426 		return;
427 
428 	/* Free up all allocated memory. */
429 	t = adap->tc_u32;
430 	for (i = 0; i < t->size; i++) {
431 		struct cxgb4_link *link = &t->table[i];
432 
433 		kvfree(link->tid_map);
434 	}
435 	kvfree(adap->tc_u32);
436 }
437 
438 struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap)
439 {
440 	unsigned int max_tids = adap->tids.nftids;
441 	struct cxgb4_tc_u32_table *t;
442 	unsigned int i;
443 
444 	if (!max_tids)
445 		return NULL;
446 
447 	t = kvzalloc(sizeof(*t) +
448 			 (max_tids * sizeof(struct cxgb4_link)), GFP_KERNEL);
449 	if (!t)
450 		return NULL;
451 
452 	t->size = max_tids;
453 
454 	for (i = 0; i < t->size; i++) {
455 		struct cxgb4_link *link = &t->table[i];
456 		unsigned int bmap_size;
457 
458 		bmap_size = BITS_TO_LONGS(max_tids);
459 		link->tid_map = kvcalloc(bmap_size, sizeof(unsigned long),
460 					 GFP_KERNEL);
461 		if (!link->tid_map)
462 			goto out_no_mem;
463 		bitmap_zero(link->tid_map, max_tids);
464 	}
465 
466 	return t;
467 
468 out_no_mem:
469 	for (i = 0; i < t->size; i++) {
470 		struct cxgb4_link *link = &t->table[i];
471 
472 		if (link->tid_map)
473 			kvfree(link->tid_map);
474 	}
475 
476 	if (t)
477 		kvfree(t);
478 
479 	return NULL;
480 }
481