1 /*
2  * This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux.
3  *
4  * Copyright (c) 2017 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #include <net/tc_act/tc_mirred.h>
36 #include <net/tc_act/tc_pedit.h>
37 #include <net/tc_act/tc_gact.h>
38 #include <net/tc_act/tc_vlan.h>
39 
40 #include "cxgb4.h"
41 #include "cxgb4_filter.h"
42 #include "cxgb4_tc_flower.h"
43 
44 #define STATS_CHECK_PERIOD (HZ / 2)
45 
46 static struct ch_tc_pedit_fields pedits[] = {
47 	PEDIT_FIELDS(ETH_, DMAC_31_0, 4, dmac, 0),
48 	PEDIT_FIELDS(ETH_, DMAC_47_32, 2, dmac, 4),
49 	PEDIT_FIELDS(ETH_, SMAC_15_0, 2, smac, 0),
50 	PEDIT_FIELDS(ETH_, SMAC_47_16, 4, smac, 2),
51 	PEDIT_FIELDS(IP4_, SRC, 4, nat_fip, 0),
52 	PEDIT_FIELDS(IP4_, DST, 4, nat_lip, 0),
53 	PEDIT_FIELDS(IP6_, SRC_31_0, 4, nat_fip, 0),
54 	PEDIT_FIELDS(IP6_, SRC_63_32, 4, nat_fip, 4),
55 	PEDIT_FIELDS(IP6_, SRC_95_64, 4, nat_fip, 8),
56 	PEDIT_FIELDS(IP6_, SRC_127_96, 4, nat_fip, 12),
57 	PEDIT_FIELDS(IP6_, DST_31_0, 4, nat_lip, 0),
58 	PEDIT_FIELDS(IP6_, DST_63_32, 4, nat_lip, 4),
59 	PEDIT_FIELDS(IP6_, DST_95_64, 4, nat_lip, 8),
60 	PEDIT_FIELDS(IP6_, DST_127_96, 4, nat_lip, 12),
61 	PEDIT_FIELDS(TCP_, SPORT, 2, nat_fport, 0),
62 	PEDIT_FIELDS(TCP_, DPORT, 2, nat_lport, 0),
63 	PEDIT_FIELDS(UDP_, SPORT, 2, nat_fport, 0),
64 	PEDIT_FIELDS(UDP_, DPORT, 2, nat_lport, 0),
65 };
66 
67 static struct ch_tc_flower_entry *allocate_flower_entry(void)
68 {
69 	struct ch_tc_flower_entry *new = kzalloc(sizeof(*new), GFP_KERNEL);
70 	spin_lock_init(&new->lock);
71 	return new;
72 }
73 
74 /* Must be called with either RTNL or rcu_read_lock */
75 static struct ch_tc_flower_entry *ch_flower_lookup(struct adapter *adap,
76 						   unsigned long flower_cookie)
77 {
78 	return rhashtable_lookup_fast(&adap->flower_tbl, &flower_cookie,
79 				      adap->flower_ht_params);
80 }
81 
82 static void cxgb4_process_flow_match(struct net_device *dev,
83 				     struct tc_cls_flower_offload *cls,
84 				     struct ch_filter_specification *fs)
85 {
86 	u16 addr_type = 0;
87 
88 	if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
89 		struct flow_dissector_key_control *key =
90 			skb_flow_dissector_target(cls->dissector,
91 						  FLOW_DISSECTOR_KEY_CONTROL,
92 						  cls->key);
93 
94 		addr_type = key->addr_type;
95 	}
96 
97 	if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
98 		struct flow_dissector_key_basic *key =
99 			skb_flow_dissector_target(cls->dissector,
100 						  FLOW_DISSECTOR_KEY_BASIC,
101 						  cls->key);
102 		struct flow_dissector_key_basic *mask =
103 			skb_flow_dissector_target(cls->dissector,
104 						  FLOW_DISSECTOR_KEY_BASIC,
105 						  cls->mask);
106 		u16 ethtype_key = ntohs(key->n_proto);
107 		u16 ethtype_mask = ntohs(mask->n_proto);
108 
109 		if (ethtype_key == ETH_P_ALL) {
110 			ethtype_key = 0;
111 			ethtype_mask = 0;
112 		}
113 
114 		if (ethtype_key == ETH_P_IPV6)
115 			fs->type = 1;
116 
117 		fs->val.ethtype = ethtype_key;
118 		fs->mask.ethtype = ethtype_mask;
119 		fs->val.proto = key->ip_proto;
120 		fs->mask.proto = mask->ip_proto;
121 	}
122 
123 	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
124 		struct flow_dissector_key_ipv4_addrs *key =
125 			skb_flow_dissector_target(cls->dissector,
126 						  FLOW_DISSECTOR_KEY_IPV4_ADDRS,
127 						  cls->key);
128 		struct flow_dissector_key_ipv4_addrs *mask =
129 			skb_flow_dissector_target(cls->dissector,
130 						  FLOW_DISSECTOR_KEY_IPV4_ADDRS,
131 						  cls->mask);
132 		fs->type = 0;
133 		memcpy(&fs->val.lip[0], &key->dst, sizeof(key->dst));
134 		memcpy(&fs->val.fip[0], &key->src, sizeof(key->src));
135 		memcpy(&fs->mask.lip[0], &mask->dst, sizeof(mask->dst));
136 		memcpy(&fs->mask.fip[0], &mask->src, sizeof(mask->src));
137 
138 		/* also initialize nat_lip/fip to same values */
139 		memcpy(&fs->nat_lip[0], &key->dst, sizeof(key->dst));
140 		memcpy(&fs->nat_fip[0], &key->src, sizeof(key->src));
141 
142 	}
143 
144 	if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
145 		struct flow_dissector_key_ipv6_addrs *key =
146 			skb_flow_dissector_target(cls->dissector,
147 						  FLOW_DISSECTOR_KEY_IPV6_ADDRS,
148 						  cls->key);
149 		struct flow_dissector_key_ipv6_addrs *mask =
150 			skb_flow_dissector_target(cls->dissector,
151 						  FLOW_DISSECTOR_KEY_IPV6_ADDRS,
152 						  cls->mask);
153 
154 		fs->type = 1;
155 		memcpy(&fs->val.lip[0], key->dst.s6_addr, sizeof(key->dst));
156 		memcpy(&fs->val.fip[0], key->src.s6_addr, sizeof(key->src));
157 		memcpy(&fs->mask.lip[0], mask->dst.s6_addr, sizeof(mask->dst));
158 		memcpy(&fs->mask.fip[0], mask->src.s6_addr, sizeof(mask->src));
159 
160 		/* also initialize nat_lip/fip to same values */
161 		memcpy(&fs->nat_lip[0], key->dst.s6_addr, sizeof(key->dst));
162 		memcpy(&fs->nat_fip[0], key->src.s6_addr, sizeof(key->src));
163 	}
164 
165 	if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
166 		struct flow_dissector_key_ports *key, *mask;
167 
168 		key = skb_flow_dissector_target(cls->dissector,
169 						FLOW_DISSECTOR_KEY_PORTS,
170 						cls->key);
171 		mask = skb_flow_dissector_target(cls->dissector,
172 						 FLOW_DISSECTOR_KEY_PORTS,
173 						 cls->mask);
174 		fs->val.lport = cpu_to_be16(key->dst);
175 		fs->mask.lport = cpu_to_be16(mask->dst);
176 		fs->val.fport = cpu_to_be16(key->src);
177 		fs->mask.fport = cpu_to_be16(mask->src);
178 
179 		/* also initialize nat_lport/fport to same values */
180 		fs->nat_lport = cpu_to_be16(key->dst);
181 		fs->nat_fport = cpu_to_be16(key->src);
182 	}
183 
184 	if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_IP)) {
185 		struct flow_dissector_key_ip *key, *mask;
186 
187 		key = skb_flow_dissector_target(cls->dissector,
188 						FLOW_DISSECTOR_KEY_IP,
189 						cls->key);
190 		mask = skb_flow_dissector_target(cls->dissector,
191 						 FLOW_DISSECTOR_KEY_IP,
192 						 cls->mask);
193 		fs->val.tos = key->tos;
194 		fs->mask.tos = mask->tos;
195 	}
196 
197 	if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
198 		struct flow_dissector_key_keyid *key, *mask;
199 
200 		key = skb_flow_dissector_target(cls->dissector,
201 						FLOW_DISSECTOR_KEY_ENC_KEYID,
202 						cls->key);
203 		mask = skb_flow_dissector_target(cls->dissector,
204 						 FLOW_DISSECTOR_KEY_ENC_KEYID,
205 						 cls->mask);
206 		fs->val.vni = be32_to_cpu(key->keyid);
207 		fs->mask.vni = be32_to_cpu(mask->keyid);
208 		if (fs->mask.vni) {
209 			fs->val.encap_vld = 1;
210 			fs->mask.encap_vld = 1;
211 		}
212 	}
213 
214 	if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
215 		struct flow_dissector_key_vlan *key, *mask;
216 		u16 vlan_tci, vlan_tci_mask;
217 
218 		key = skb_flow_dissector_target(cls->dissector,
219 						FLOW_DISSECTOR_KEY_VLAN,
220 						cls->key);
221 		mask = skb_flow_dissector_target(cls->dissector,
222 						 FLOW_DISSECTOR_KEY_VLAN,
223 						 cls->mask);
224 		vlan_tci = key->vlan_id | (key->vlan_priority <<
225 					   VLAN_PRIO_SHIFT);
226 		vlan_tci_mask = mask->vlan_id | (mask->vlan_priority <<
227 						 VLAN_PRIO_SHIFT);
228 		fs->val.ivlan = vlan_tci;
229 		fs->mask.ivlan = vlan_tci_mask;
230 
231 		/* Chelsio adapters use ivlan_vld bit to match vlan packets
232 		 * as 802.1Q. Also, when vlan tag is present in packets,
233 		 * ethtype match is used then to match on ethtype of inner
234 		 * header ie. the header following the vlan header.
235 		 * So, set the ivlan_vld based on ethtype info supplied by
236 		 * TC for vlan packets if its 802.1Q. And then reset the
237 		 * ethtype value else, hw will try to match the supplied
238 		 * ethtype value with ethtype of inner header.
239 		 */
240 		if (fs->val.ethtype == ETH_P_8021Q) {
241 			fs->val.ivlan_vld = 1;
242 			fs->mask.ivlan_vld = 1;
243 			fs->val.ethtype = 0;
244 			fs->mask.ethtype = 0;
245 		}
246 	}
247 
248 	/* Match only packets coming from the ingress port where this
249 	 * filter will be created.
250 	 */
251 	fs->val.iport = netdev2pinfo(dev)->port_id;
252 	fs->mask.iport = ~0;
253 }
254 
255 static int cxgb4_validate_flow_match(struct net_device *dev,
256 				     struct tc_cls_flower_offload *cls)
257 {
258 	u16 ethtype_mask = 0;
259 	u16 ethtype_key = 0;
260 
261 	if (cls->dissector->used_keys &
262 	    ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
263 	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
264 	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
265 	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
266 	      BIT(FLOW_DISSECTOR_KEY_PORTS) |
267 	      BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
268 	      BIT(FLOW_DISSECTOR_KEY_VLAN) |
269 	      BIT(FLOW_DISSECTOR_KEY_IP))) {
270 		netdev_warn(dev, "Unsupported key used: 0x%x\n",
271 			    cls->dissector->used_keys);
272 		return -EOPNOTSUPP;
273 	}
274 
275 	if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
276 		struct flow_dissector_key_basic *key =
277 			skb_flow_dissector_target(cls->dissector,
278 						  FLOW_DISSECTOR_KEY_BASIC,
279 						  cls->key);
280 		struct flow_dissector_key_basic *mask =
281 			skb_flow_dissector_target(cls->dissector,
282 						  FLOW_DISSECTOR_KEY_BASIC,
283 						  cls->mask);
284 		ethtype_key = ntohs(key->n_proto);
285 		ethtype_mask = ntohs(mask->n_proto);
286 	}
287 
288 	if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_IP)) {
289 		u16 eth_ip_type = ethtype_key & ethtype_mask;
290 		struct flow_dissector_key_ip *mask;
291 
292 		if (eth_ip_type != ETH_P_IP && eth_ip_type != ETH_P_IPV6) {
293 			netdev_err(dev, "IP Key supported only with IPv4/v6");
294 			return -EINVAL;
295 		}
296 
297 		mask = skb_flow_dissector_target(cls->dissector,
298 						 FLOW_DISSECTOR_KEY_IP,
299 						 cls->mask);
300 		if (mask->ttl) {
301 			netdev_warn(dev, "ttl match unsupported for offload");
302 			return -EOPNOTSUPP;
303 		}
304 	}
305 
306 	return 0;
307 }
308 
309 static void offload_pedit(struct ch_filter_specification *fs, u32 val, u32 mask,
310 			  u8 field)
311 {
312 	u32 set_val = val & ~mask;
313 	u32 offset = 0;
314 	u8 size = 1;
315 	int i;
316 
317 	for (i = 0; i < ARRAY_SIZE(pedits); i++) {
318 		if (pedits[i].field == field) {
319 			offset = pedits[i].offset;
320 			size = pedits[i].size;
321 			break;
322 		}
323 	}
324 	memcpy((u8 *)fs + offset, &set_val, size);
325 }
326 
327 static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
328 				u32 mask, u32 offset, u8 htype)
329 {
330 	switch (htype) {
331 	case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
332 		switch (offset) {
333 		case PEDIT_ETH_DMAC_31_0:
334 			fs->newdmac = 1;
335 			offload_pedit(fs, val, mask, ETH_DMAC_31_0);
336 			break;
337 		case PEDIT_ETH_DMAC_47_32_SMAC_15_0:
338 			if (~mask & PEDIT_ETH_DMAC_MASK)
339 				offload_pedit(fs, val, mask, ETH_DMAC_47_32);
340 			else
341 				offload_pedit(fs, val >> 16, mask >> 16,
342 					      ETH_SMAC_15_0);
343 			break;
344 		case PEDIT_ETH_SMAC_47_16:
345 			fs->newsmac = 1;
346 			offload_pedit(fs, val, mask, ETH_SMAC_47_16);
347 		}
348 		break;
349 	case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
350 		switch (offset) {
351 		case PEDIT_IP4_SRC:
352 			offload_pedit(fs, val, mask, IP4_SRC);
353 			break;
354 		case PEDIT_IP4_DST:
355 			offload_pedit(fs, val, mask, IP4_DST);
356 		}
357 		fs->nat_mode = NAT_MODE_ALL;
358 		break;
359 	case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
360 		switch (offset) {
361 		case PEDIT_IP6_SRC_31_0:
362 			offload_pedit(fs, val, mask, IP6_SRC_31_0);
363 			break;
364 		case PEDIT_IP6_SRC_63_32:
365 			offload_pedit(fs, val, mask, IP6_SRC_63_32);
366 			break;
367 		case PEDIT_IP6_SRC_95_64:
368 			offload_pedit(fs, val, mask, IP6_SRC_95_64);
369 			break;
370 		case PEDIT_IP6_SRC_127_96:
371 			offload_pedit(fs, val, mask, IP6_SRC_127_96);
372 			break;
373 		case PEDIT_IP6_DST_31_0:
374 			offload_pedit(fs, val, mask, IP6_DST_31_0);
375 			break;
376 		case PEDIT_IP6_DST_63_32:
377 			offload_pedit(fs, val, mask, IP6_DST_63_32);
378 			break;
379 		case PEDIT_IP6_DST_95_64:
380 			offload_pedit(fs, val, mask, IP6_DST_95_64);
381 			break;
382 		case PEDIT_IP6_DST_127_96:
383 			offload_pedit(fs, val, mask, IP6_DST_127_96);
384 		}
385 		fs->nat_mode = NAT_MODE_ALL;
386 		break;
387 	case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
388 		switch (offset) {
389 		case PEDIT_TCP_SPORT_DPORT:
390 			if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
391 				offload_pedit(fs, cpu_to_be32(val) >> 16,
392 					      cpu_to_be32(mask) >> 16,
393 					      TCP_SPORT);
394 			else
395 				offload_pedit(fs, cpu_to_be32(val),
396 					      cpu_to_be32(mask), TCP_DPORT);
397 		}
398 		fs->nat_mode = NAT_MODE_ALL;
399 		break;
400 	case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
401 		switch (offset) {
402 		case PEDIT_UDP_SPORT_DPORT:
403 			if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
404 				offload_pedit(fs, cpu_to_be32(val) >> 16,
405 					      cpu_to_be32(mask) >> 16,
406 					      UDP_SPORT);
407 			else
408 				offload_pedit(fs, cpu_to_be32(val),
409 					      cpu_to_be32(mask), UDP_DPORT);
410 		}
411 		fs->nat_mode = NAT_MODE_ALL;
412 	}
413 }
414 
415 static void cxgb4_process_flow_actions(struct net_device *in,
416 				       struct tc_cls_flower_offload *cls,
417 				       struct ch_filter_specification *fs)
418 {
419 	const struct tc_action *a;
420 	int i;
421 
422 	tcf_exts_for_each_action(i, a, cls->exts) {
423 		if (is_tcf_gact_ok(a)) {
424 			fs->action = FILTER_PASS;
425 		} else if (is_tcf_gact_shot(a)) {
426 			fs->action = FILTER_DROP;
427 		} else if (is_tcf_mirred_egress_redirect(a)) {
428 			struct net_device *out = tcf_mirred_dev(a);
429 			struct port_info *pi = netdev_priv(out);
430 
431 			fs->action = FILTER_SWITCH;
432 			fs->eport = pi->port_id;
433 		} else if (is_tcf_vlan(a)) {
434 			u32 vlan_action = tcf_vlan_action(a);
435 			u8 prio = tcf_vlan_push_prio(a);
436 			u16 vid = tcf_vlan_push_vid(a);
437 			u16 vlan_tci = (prio << VLAN_PRIO_SHIFT) | vid;
438 
439 			switch (vlan_action) {
440 			case TCA_VLAN_ACT_POP:
441 				fs->newvlan |= VLAN_REMOVE;
442 				break;
443 			case TCA_VLAN_ACT_PUSH:
444 				fs->newvlan |= VLAN_INSERT;
445 				fs->vlan = vlan_tci;
446 				break;
447 			case TCA_VLAN_ACT_MODIFY:
448 				fs->newvlan |= VLAN_REWRITE;
449 				fs->vlan = vlan_tci;
450 				break;
451 			default:
452 				break;
453 			}
454 		} else if (is_tcf_pedit(a)) {
455 			u32 mask, val, offset;
456 			int nkeys, i;
457 			u8 htype;
458 
459 			nkeys = tcf_pedit_nkeys(a);
460 			for (i = 0; i < nkeys; i++) {
461 				htype = tcf_pedit_htype(a, i);
462 				mask = tcf_pedit_mask(a, i);
463 				val = tcf_pedit_val(a, i);
464 				offset = tcf_pedit_offset(a, i);
465 
466 				process_pedit_field(fs, val, mask, offset,
467 						    htype);
468 			}
469 		}
470 	}
471 }
472 
473 static bool valid_l4_mask(u32 mask)
474 {
475 	u16 hi, lo;
476 
477 	/* Either the upper 16-bits (SPORT) OR the lower
478 	 * 16-bits (DPORT) can be set, but NOT BOTH.
479 	 */
480 	hi = (mask >> 16) & 0xFFFF;
481 	lo = mask & 0xFFFF;
482 
483 	return hi && lo ? false : true;
484 }
485 
486 static bool valid_pedit_action(struct net_device *dev,
487 			       const struct tc_action *a)
488 {
489 	u32 mask, offset;
490 	u8 cmd, htype;
491 	int nkeys, i;
492 
493 	nkeys = tcf_pedit_nkeys(a);
494 	for (i = 0; i < nkeys; i++) {
495 		htype = tcf_pedit_htype(a, i);
496 		cmd = tcf_pedit_cmd(a, i);
497 		mask = tcf_pedit_mask(a, i);
498 		offset = tcf_pedit_offset(a, i);
499 
500 		if (cmd != TCA_PEDIT_KEY_EX_CMD_SET) {
501 			netdev_err(dev, "%s: Unsupported pedit cmd\n",
502 				   __func__);
503 			return false;
504 		}
505 
506 		switch (htype) {
507 		case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
508 			switch (offset) {
509 			case PEDIT_ETH_DMAC_31_0:
510 			case PEDIT_ETH_DMAC_47_32_SMAC_15_0:
511 			case PEDIT_ETH_SMAC_47_16:
512 				break;
513 			default:
514 				netdev_err(dev, "%s: Unsupported pedit field\n",
515 					   __func__);
516 				return false;
517 			}
518 			break;
519 		case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
520 			switch (offset) {
521 			case PEDIT_IP4_SRC:
522 			case PEDIT_IP4_DST:
523 				break;
524 			default:
525 				netdev_err(dev, "%s: Unsupported pedit field\n",
526 					   __func__);
527 				return false;
528 			}
529 			break;
530 		case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
531 			switch (offset) {
532 			case PEDIT_IP6_SRC_31_0:
533 			case PEDIT_IP6_SRC_63_32:
534 			case PEDIT_IP6_SRC_95_64:
535 			case PEDIT_IP6_SRC_127_96:
536 			case PEDIT_IP6_DST_31_0:
537 			case PEDIT_IP6_DST_63_32:
538 			case PEDIT_IP6_DST_95_64:
539 			case PEDIT_IP6_DST_127_96:
540 				break;
541 			default:
542 				netdev_err(dev, "%s: Unsupported pedit field\n",
543 					   __func__);
544 				return false;
545 			}
546 			break;
547 		case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
548 			switch (offset) {
549 			case PEDIT_TCP_SPORT_DPORT:
550 				if (!valid_l4_mask(~mask)) {
551 					netdev_err(dev, "%s: Unsupported mask for TCP L4 ports\n",
552 						   __func__);
553 					return false;
554 				}
555 				break;
556 			default:
557 				netdev_err(dev, "%s: Unsupported pedit field\n",
558 					   __func__);
559 				return false;
560 			}
561 			break;
562 		case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
563 			switch (offset) {
564 			case PEDIT_UDP_SPORT_DPORT:
565 				if (!valid_l4_mask(~mask)) {
566 					netdev_err(dev, "%s: Unsupported mask for UDP L4 ports\n",
567 						   __func__);
568 					return false;
569 				}
570 				break;
571 			default:
572 				netdev_err(dev, "%s: Unsupported pedit field\n",
573 					   __func__);
574 				return false;
575 			}
576 			break;
577 		default:
578 			netdev_err(dev, "%s: Unsupported pedit type\n",
579 				   __func__);
580 			return false;
581 		}
582 	}
583 	return true;
584 }
585 
586 static int cxgb4_validate_flow_actions(struct net_device *dev,
587 				       struct tc_cls_flower_offload *cls)
588 {
589 	const struct tc_action *a;
590 	bool act_redir = false;
591 	bool act_pedit = false;
592 	bool act_vlan = false;
593 	int i;
594 
595 	tcf_exts_for_each_action(i, a, cls->exts) {
596 		if (is_tcf_gact_ok(a)) {
597 			/* Do nothing */
598 		} else if (is_tcf_gact_shot(a)) {
599 			/* Do nothing */
600 		} else if (is_tcf_mirred_egress_redirect(a)) {
601 			struct adapter *adap = netdev2adap(dev);
602 			struct net_device *n_dev, *target_dev;
603 			unsigned int i;
604 			bool found = false;
605 
606 			target_dev = tcf_mirred_dev(a);
607 			for_each_port(adap, i) {
608 				n_dev = adap->port[i];
609 				if (target_dev == n_dev) {
610 					found = true;
611 					break;
612 				}
613 			}
614 
615 			/* If interface doesn't belong to our hw, then
616 			 * the provided output port is not valid
617 			 */
618 			if (!found) {
619 				netdev_err(dev, "%s: Out port invalid\n",
620 					   __func__);
621 				return -EINVAL;
622 			}
623 			act_redir = true;
624 		} else if (is_tcf_vlan(a)) {
625 			u16 proto = be16_to_cpu(tcf_vlan_push_proto(a));
626 			u32 vlan_action = tcf_vlan_action(a);
627 
628 			switch (vlan_action) {
629 			case TCA_VLAN_ACT_POP:
630 				break;
631 			case TCA_VLAN_ACT_PUSH:
632 			case TCA_VLAN_ACT_MODIFY:
633 				if (proto != ETH_P_8021Q) {
634 					netdev_err(dev, "%s: Unsupported vlan proto\n",
635 						   __func__);
636 					return -EOPNOTSUPP;
637 				}
638 				break;
639 			default:
640 				netdev_err(dev, "%s: Unsupported vlan action\n",
641 					   __func__);
642 				return -EOPNOTSUPP;
643 			}
644 			act_vlan = true;
645 		} else if (is_tcf_pedit(a)) {
646 			bool pedit_valid = valid_pedit_action(dev, a);
647 
648 			if (!pedit_valid)
649 				return -EOPNOTSUPP;
650 			act_pedit = true;
651 		} else {
652 			netdev_err(dev, "%s: Unsupported action\n", __func__);
653 			return -EOPNOTSUPP;
654 		}
655 	}
656 
657 	if ((act_pedit || act_vlan) && !act_redir) {
658 		netdev_err(dev, "%s: pedit/vlan rewrite invalid without egress redirect\n",
659 			   __func__);
660 		return -EINVAL;
661 	}
662 
663 	return 0;
664 }
665 
666 int cxgb4_tc_flower_replace(struct net_device *dev,
667 			    struct tc_cls_flower_offload *cls)
668 {
669 	struct adapter *adap = netdev2adap(dev);
670 	struct ch_tc_flower_entry *ch_flower;
671 	struct ch_filter_specification *fs;
672 	struct filter_ctx ctx;
673 	int fidx;
674 	int ret;
675 
676 	if (cxgb4_validate_flow_actions(dev, cls))
677 		return -EOPNOTSUPP;
678 
679 	if (cxgb4_validate_flow_match(dev, cls))
680 		return -EOPNOTSUPP;
681 
682 	ch_flower = allocate_flower_entry();
683 	if (!ch_flower) {
684 		netdev_err(dev, "%s: ch_flower alloc failed.\n", __func__);
685 		return -ENOMEM;
686 	}
687 
688 	fs = &ch_flower->fs;
689 	fs->hitcnts = 1;
690 	cxgb4_process_flow_match(dev, cls, fs);
691 	cxgb4_process_flow_actions(dev, cls, fs);
692 
693 	fs->hash = is_filter_exact_match(adap, fs);
694 	if (fs->hash) {
695 		fidx = 0;
696 	} else {
697 		fidx = cxgb4_get_free_ftid(dev, fs->type ? PF_INET6 : PF_INET);
698 		if (fidx < 0) {
699 			netdev_err(dev, "%s: No fidx for offload.\n", __func__);
700 			ret = -ENOMEM;
701 			goto free_entry;
702 		}
703 	}
704 
705 	init_completion(&ctx.completion);
706 	ret = __cxgb4_set_filter(dev, fidx, fs, &ctx);
707 	if (ret) {
708 		netdev_err(dev, "%s: filter creation err %d\n",
709 			   __func__, ret);
710 		goto free_entry;
711 	}
712 
713 	/* Wait for reply */
714 	ret = wait_for_completion_timeout(&ctx.completion, 10 * HZ);
715 	if (!ret) {
716 		ret = -ETIMEDOUT;
717 		goto free_entry;
718 	}
719 
720 	ret = ctx.result;
721 	/* Check if hw returned error for filter creation */
722 	if (ret) {
723 		netdev_err(dev, "%s: filter creation err %d\n",
724 			   __func__, ret);
725 		goto free_entry;
726 	}
727 
728 	ch_flower->tc_flower_cookie = cls->cookie;
729 	ch_flower->filter_id = ctx.tid;
730 	ret = rhashtable_insert_fast(&adap->flower_tbl, &ch_flower->node,
731 				     adap->flower_ht_params);
732 	if (ret)
733 		goto del_filter;
734 
735 	return 0;
736 
737 del_filter:
738 	cxgb4_del_filter(dev, ch_flower->filter_id, &ch_flower->fs);
739 
740 free_entry:
741 	kfree(ch_flower);
742 	return ret;
743 }
744 
745 int cxgb4_tc_flower_destroy(struct net_device *dev,
746 			    struct tc_cls_flower_offload *cls)
747 {
748 	struct adapter *adap = netdev2adap(dev);
749 	struct ch_tc_flower_entry *ch_flower;
750 	int ret;
751 
752 	ch_flower = ch_flower_lookup(adap, cls->cookie);
753 	if (!ch_flower)
754 		return -ENOENT;
755 
756 	ret = cxgb4_del_filter(dev, ch_flower->filter_id, &ch_flower->fs);
757 	if (ret)
758 		goto err;
759 
760 	ret = rhashtable_remove_fast(&adap->flower_tbl, &ch_flower->node,
761 				     adap->flower_ht_params);
762 	if (ret) {
763 		netdev_err(dev, "Flow remove from rhashtable failed");
764 		goto err;
765 	}
766 	kfree_rcu(ch_flower, rcu);
767 
768 err:
769 	return ret;
770 }
771 
772 static void ch_flower_stats_handler(struct work_struct *work)
773 {
774 	struct adapter *adap = container_of(work, struct adapter,
775 					    flower_stats_work);
776 	struct ch_tc_flower_entry *flower_entry;
777 	struct ch_tc_flower_stats *ofld_stats;
778 	struct rhashtable_iter iter;
779 	u64 packets;
780 	u64 bytes;
781 	int ret;
782 
783 	rhashtable_walk_enter(&adap->flower_tbl, &iter);
784 	do {
785 		rhashtable_walk_start(&iter);
786 
787 		while ((flower_entry = rhashtable_walk_next(&iter)) &&
788 		       !IS_ERR(flower_entry)) {
789 			ret = cxgb4_get_filter_counters(adap->port[0],
790 							flower_entry->filter_id,
791 							&packets, &bytes,
792 							flower_entry->fs.hash);
793 			if (!ret) {
794 				spin_lock(&flower_entry->lock);
795 				ofld_stats = &flower_entry->stats;
796 
797 				if (ofld_stats->prev_packet_count != packets) {
798 					ofld_stats->prev_packet_count = packets;
799 					ofld_stats->last_used = jiffies;
800 				}
801 				spin_unlock(&flower_entry->lock);
802 			}
803 		}
804 
805 		rhashtable_walk_stop(&iter);
806 
807 	} while (flower_entry == ERR_PTR(-EAGAIN));
808 	rhashtable_walk_exit(&iter);
809 	mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD);
810 }
811 
812 static void ch_flower_stats_cb(struct timer_list *t)
813 {
814 	struct adapter *adap = from_timer(adap, t, flower_stats_timer);
815 
816 	schedule_work(&adap->flower_stats_work);
817 }
818 
819 int cxgb4_tc_flower_stats(struct net_device *dev,
820 			  struct tc_cls_flower_offload *cls)
821 {
822 	struct adapter *adap = netdev2adap(dev);
823 	struct ch_tc_flower_stats *ofld_stats;
824 	struct ch_tc_flower_entry *ch_flower;
825 	u64 packets;
826 	u64 bytes;
827 	int ret;
828 
829 	ch_flower = ch_flower_lookup(adap, cls->cookie);
830 	if (!ch_flower) {
831 		ret = -ENOENT;
832 		goto err;
833 	}
834 
835 	ret = cxgb4_get_filter_counters(dev, ch_flower->filter_id,
836 					&packets, &bytes,
837 					ch_flower->fs.hash);
838 	if (ret < 0)
839 		goto err;
840 
841 	spin_lock_bh(&ch_flower->lock);
842 	ofld_stats = &ch_flower->stats;
843 	if (ofld_stats->packet_count != packets) {
844 		if (ofld_stats->prev_packet_count != packets)
845 			ofld_stats->last_used = jiffies;
846 		tcf_exts_stats_update(cls->exts, bytes - ofld_stats->byte_count,
847 				      packets - ofld_stats->packet_count,
848 				      ofld_stats->last_used);
849 
850 		ofld_stats->packet_count = packets;
851 		ofld_stats->byte_count = bytes;
852 		ofld_stats->prev_packet_count = packets;
853 	}
854 	spin_unlock_bh(&ch_flower->lock);
855 	return 0;
856 
857 err:
858 	return ret;
859 }
860 
861 static const struct rhashtable_params cxgb4_tc_flower_ht_params = {
862 	.nelem_hint = 384,
863 	.head_offset = offsetof(struct ch_tc_flower_entry, node),
864 	.key_offset = offsetof(struct ch_tc_flower_entry, tc_flower_cookie),
865 	.key_len = sizeof(((struct ch_tc_flower_entry *)0)->tc_flower_cookie),
866 	.max_size = 524288,
867 	.min_size = 512,
868 	.automatic_shrinking = true
869 };
870 
871 int cxgb4_init_tc_flower(struct adapter *adap)
872 {
873 	int ret;
874 
875 	if (adap->tc_flower_initialized)
876 		return -EEXIST;
877 
878 	adap->flower_ht_params = cxgb4_tc_flower_ht_params;
879 	ret = rhashtable_init(&adap->flower_tbl, &adap->flower_ht_params);
880 	if (ret)
881 		return ret;
882 
883 	INIT_WORK(&adap->flower_stats_work, ch_flower_stats_handler);
884 	timer_setup(&adap->flower_stats_timer, ch_flower_stats_cb, 0);
885 	mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD);
886 	adap->tc_flower_initialized = true;
887 	return 0;
888 }
889 
890 void cxgb4_cleanup_tc_flower(struct adapter *adap)
891 {
892 	if (!adap->tc_flower_initialized)
893 		return;
894 
895 	if (adap->flower_stats_timer.function)
896 		del_timer_sync(&adap->flower_stats_timer);
897 	cancel_work_sync(&adap->flower_stats_work);
898 	rhashtable_destroy(&adap->flower_tbl);
899 	adap->tc_flower_initialized = false;
900 }
901