1 /*
2  * This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux.
3  *
4  * Copyright (c) 2017 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #include <net/tc_act/tc_mirred.h>
36 #include <net/tc_act/tc_pedit.h>
37 #include <net/tc_act/tc_gact.h>
38 #include <net/tc_act/tc_vlan.h>
39 
40 #include "cxgb4.h"
41 #include "cxgb4_filter.h"
42 #include "cxgb4_tc_flower.h"
43 
44 #define STATS_CHECK_PERIOD (HZ / 2)
45 
46 static struct ch_tc_pedit_fields pedits[] = {
47 	PEDIT_FIELDS(ETH_, DMAC_31_0, 4, dmac, 0),
48 	PEDIT_FIELDS(ETH_, DMAC_47_32, 2, dmac, 4),
49 	PEDIT_FIELDS(ETH_, SMAC_15_0, 2, smac, 0),
50 	PEDIT_FIELDS(ETH_, SMAC_47_16, 4, smac, 2),
51 	PEDIT_FIELDS(IP4_, SRC, 4, nat_fip, 0),
52 	PEDIT_FIELDS(IP4_, DST, 4, nat_lip, 0),
53 	PEDIT_FIELDS(IP6_, SRC_31_0, 4, nat_fip, 0),
54 	PEDIT_FIELDS(IP6_, SRC_63_32, 4, nat_fip, 4),
55 	PEDIT_FIELDS(IP6_, SRC_95_64, 4, nat_fip, 8),
56 	PEDIT_FIELDS(IP6_, SRC_127_96, 4, nat_fip, 12),
57 	PEDIT_FIELDS(IP6_, DST_31_0, 4, nat_lip, 0),
58 	PEDIT_FIELDS(IP6_, DST_63_32, 4, nat_lip, 4),
59 	PEDIT_FIELDS(IP6_, DST_95_64, 4, nat_lip, 8),
60 	PEDIT_FIELDS(IP6_, DST_127_96, 4, nat_lip, 12),
61 	PEDIT_FIELDS(TCP_, SPORT, 2, nat_fport, 0),
62 	PEDIT_FIELDS(TCP_, DPORT, 2, nat_lport, 0),
63 	PEDIT_FIELDS(UDP_, SPORT, 2, nat_fport, 0),
64 	PEDIT_FIELDS(UDP_, DPORT, 2, nat_lport, 0),
65 };
66 
67 static struct ch_tc_flower_entry *allocate_flower_entry(void)
68 {
69 	struct ch_tc_flower_entry *new = kzalloc(sizeof(*new), GFP_KERNEL);
70 	spin_lock_init(&new->lock);
71 	return new;
72 }
73 
74 /* Must be called with either RTNL or rcu_read_lock */
75 static struct ch_tc_flower_entry *ch_flower_lookup(struct adapter *adap,
76 						   unsigned long flower_cookie)
77 {
78 	return rhashtable_lookup_fast(&adap->flower_tbl, &flower_cookie,
79 				      adap->flower_ht_params);
80 }
81 
82 static void cxgb4_process_flow_match(struct net_device *dev,
83 				     struct tc_cls_flower_offload *cls,
84 				     struct ch_filter_specification *fs)
85 {
86 	u16 addr_type = 0;
87 
88 	if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
89 		struct flow_dissector_key_control *key =
90 			skb_flow_dissector_target(cls->dissector,
91 						  FLOW_DISSECTOR_KEY_CONTROL,
92 						  cls->key);
93 
94 		addr_type = key->addr_type;
95 	}
96 
97 	if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
98 		struct flow_dissector_key_basic *key =
99 			skb_flow_dissector_target(cls->dissector,
100 						  FLOW_DISSECTOR_KEY_BASIC,
101 						  cls->key);
102 		struct flow_dissector_key_basic *mask =
103 			skb_flow_dissector_target(cls->dissector,
104 						  FLOW_DISSECTOR_KEY_BASIC,
105 						  cls->mask);
106 		u16 ethtype_key = ntohs(key->n_proto);
107 		u16 ethtype_mask = ntohs(mask->n_proto);
108 
109 		if (ethtype_key == ETH_P_ALL) {
110 			ethtype_key = 0;
111 			ethtype_mask = 0;
112 		}
113 
114 		if (ethtype_key == ETH_P_IPV6)
115 			fs->type = 1;
116 
117 		fs->val.ethtype = ethtype_key;
118 		fs->mask.ethtype = ethtype_mask;
119 		fs->val.proto = key->ip_proto;
120 		fs->mask.proto = mask->ip_proto;
121 	}
122 
123 	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
124 		struct flow_dissector_key_ipv4_addrs *key =
125 			skb_flow_dissector_target(cls->dissector,
126 						  FLOW_DISSECTOR_KEY_IPV4_ADDRS,
127 						  cls->key);
128 		struct flow_dissector_key_ipv4_addrs *mask =
129 			skb_flow_dissector_target(cls->dissector,
130 						  FLOW_DISSECTOR_KEY_IPV4_ADDRS,
131 						  cls->mask);
132 		fs->type = 0;
133 		memcpy(&fs->val.lip[0], &key->dst, sizeof(key->dst));
134 		memcpy(&fs->val.fip[0], &key->src, sizeof(key->src));
135 		memcpy(&fs->mask.lip[0], &mask->dst, sizeof(mask->dst));
136 		memcpy(&fs->mask.fip[0], &mask->src, sizeof(mask->src));
137 
138 		/* also initialize nat_lip/fip to same values */
139 		memcpy(&fs->nat_lip[0], &key->dst, sizeof(key->dst));
140 		memcpy(&fs->nat_fip[0], &key->src, sizeof(key->src));
141 
142 	}
143 
144 	if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
145 		struct flow_dissector_key_ipv6_addrs *key =
146 			skb_flow_dissector_target(cls->dissector,
147 						  FLOW_DISSECTOR_KEY_IPV6_ADDRS,
148 						  cls->key);
149 		struct flow_dissector_key_ipv6_addrs *mask =
150 			skb_flow_dissector_target(cls->dissector,
151 						  FLOW_DISSECTOR_KEY_IPV6_ADDRS,
152 						  cls->mask);
153 
154 		fs->type = 1;
155 		memcpy(&fs->val.lip[0], key->dst.s6_addr, sizeof(key->dst));
156 		memcpy(&fs->val.fip[0], key->src.s6_addr, sizeof(key->src));
157 		memcpy(&fs->mask.lip[0], mask->dst.s6_addr, sizeof(mask->dst));
158 		memcpy(&fs->mask.fip[0], mask->src.s6_addr, sizeof(mask->src));
159 
160 		/* also initialize nat_lip/fip to same values */
161 		memcpy(&fs->nat_lip[0], key->dst.s6_addr, sizeof(key->dst));
162 		memcpy(&fs->nat_fip[0], key->src.s6_addr, sizeof(key->src));
163 	}
164 
165 	if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
166 		struct flow_dissector_key_ports *key, *mask;
167 
168 		key = skb_flow_dissector_target(cls->dissector,
169 						FLOW_DISSECTOR_KEY_PORTS,
170 						cls->key);
171 		mask = skb_flow_dissector_target(cls->dissector,
172 						 FLOW_DISSECTOR_KEY_PORTS,
173 						 cls->mask);
174 		fs->val.lport = cpu_to_be16(key->dst);
175 		fs->mask.lport = cpu_to_be16(mask->dst);
176 		fs->val.fport = cpu_to_be16(key->src);
177 		fs->mask.fport = cpu_to_be16(mask->src);
178 
179 		/* also initialize nat_lport/fport to same values */
180 		fs->nat_lport = cpu_to_be16(key->dst);
181 		fs->nat_fport = cpu_to_be16(key->src);
182 	}
183 
184 	if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_IP)) {
185 		struct flow_dissector_key_ip *key, *mask;
186 
187 		key = skb_flow_dissector_target(cls->dissector,
188 						FLOW_DISSECTOR_KEY_IP,
189 						cls->key);
190 		mask = skb_flow_dissector_target(cls->dissector,
191 						 FLOW_DISSECTOR_KEY_IP,
192 						 cls->mask);
193 		fs->val.tos = key->tos;
194 		fs->mask.tos = mask->tos;
195 	}
196 
197 	if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
198 		struct flow_dissector_key_keyid *key, *mask;
199 
200 		key = skb_flow_dissector_target(cls->dissector,
201 						FLOW_DISSECTOR_KEY_ENC_KEYID,
202 						cls->key);
203 		mask = skb_flow_dissector_target(cls->dissector,
204 						 FLOW_DISSECTOR_KEY_ENC_KEYID,
205 						 cls->mask);
206 		fs->val.vni = be32_to_cpu(key->keyid);
207 		fs->mask.vni = be32_to_cpu(mask->keyid);
208 		if (fs->mask.vni) {
209 			fs->val.encap_vld = 1;
210 			fs->mask.encap_vld = 1;
211 		}
212 	}
213 
214 	if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
215 		struct flow_dissector_key_vlan *key, *mask;
216 		u16 vlan_tci, vlan_tci_mask;
217 
218 		key = skb_flow_dissector_target(cls->dissector,
219 						FLOW_DISSECTOR_KEY_VLAN,
220 						cls->key);
221 		mask = skb_flow_dissector_target(cls->dissector,
222 						 FLOW_DISSECTOR_KEY_VLAN,
223 						 cls->mask);
224 		vlan_tci = key->vlan_id | (key->vlan_priority <<
225 					   VLAN_PRIO_SHIFT);
226 		vlan_tci_mask = mask->vlan_id | (mask->vlan_priority <<
227 						 VLAN_PRIO_SHIFT);
228 		fs->val.ivlan = vlan_tci;
229 		fs->mask.ivlan = vlan_tci_mask;
230 
231 		/* Chelsio adapters use ivlan_vld bit to match vlan packets
232 		 * as 802.1Q. Also, when vlan tag is present in packets,
233 		 * ethtype match is used then to match on ethtype of inner
234 		 * header ie. the header following the vlan header.
235 		 * So, set the ivlan_vld based on ethtype info supplied by
236 		 * TC for vlan packets if its 802.1Q. And then reset the
237 		 * ethtype value else, hw will try to match the supplied
238 		 * ethtype value with ethtype of inner header.
239 		 */
240 		if (fs->val.ethtype == ETH_P_8021Q) {
241 			fs->val.ivlan_vld = 1;
242 			fs->mask.ivlan_vld = 1;
243 			fs->val.ethtype = 0;
244 			fs->mask.ethtype = 0;
245 		}
246 	}
247 
248 	/* Match only packets coming from the ingress port where this
249 	 * filter will be created.
250 	 */
251 	fs->val.iport = netdev2pinfo(dev)->port_id;
252 	fs->mask.iport = ~0;
253 }
254 
255 static int cxgb4_validate_flow_match(struct net_device *dev,
256 				     struct tc_cls_flower_offload *cls)
257 {
258 	u16 ethtype_mask = 0;
259 	u16 ethtype_key = 0;
260 
261 	if (cls->dissector->used_keys &
262 	    ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
263 	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
264 	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
265 	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
266 	      BIT(FLOW_DISSECTOR_KEY_PORTS) |
267 	      BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
268 	      BIT(FLOW_DISSECTOR_KEY_VLAN) |
269 	      BIT(FLOW_DISSECTOR_KEY_IP))) {
270 		netdev_warn(dev, "Unsupported key used: 0x%x\n",
271 			    cls->dissector->used_keys);
272 		return -EOPNOTSUPP;
273 	}
274 
275 	if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
276 		struct flow_dissector_key_basic *key =
277 			skb_flow_dissector_target(cls->dissector,
278 						  FLOW_DISSECTOR_KEY_BASIC,
279 						  cls->key);
280 		struct flow_dissector_key_basic *mask =
281 			skb_flow_dissector_target(cls->dissector,
282 						  FLOW_DISSECTOR_KEY_BASIC,
283 						  cls->mask);
284 		ethtype_key = ntohs(key->n_proto);
285 		ethtype_mask = ntohs(mask->n_proto);
286 	}
287 
288 	if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_IP)) {
289 		u16 eth_ip_type = ethtype_key & ethtype_mask;
290 		struct flow_dissector_key_ip *mask;
291 
292 		if (eth_ip_type != ETH_P_IP && eth_ip_type != ETH_P_IPV6) {
293 			netdev_err(dev, "IP Key supported only with IPv4/v6");
294 			return -EINVAL;
295 		}
296 
297 		mask = skb_flow_dissector_target(cls->dissector,
298 						 FLOW_DISSECTOR_KEY_IP,
299 						 cls->mask);
300 		if (mask->ttl) {
301 			netdev_warn(dev, "ttl match unsupported for offload");
302 			return -EOPNOTSUPP;
303 		}
304 	}
305 
306 	return 0;
307 }
308 
309 static void offload_pedit(struct ch_filter_specification *fs, u32 val, u32 mask,
310 			  u8 field)
311 {
312 	u32 set_val = val & ~mask;
313 	u32 offset = 0;
314 	u8 size = 1;
315 	int i;
316 
317 	for (i = 0; i < ARRAY_SIZE(pedits); i++) {
318 		if (pedits[i].field == field) {
319 			offset = pedits[i].offset;
320 			size = pedits[i].size;
321 			break;
322 		}
323 	}
324 	memcpy((u8 *)fs + offset, &set_val, size);
325 }
326 
327 static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
328 				u32 mask, u32 offset, u8 htype)
329 {
330 	switch (htype) {
331 	case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
332 		switch (offset) {
333 		case PEDIT_ETH_DMAC_31_0:
334 			fs->newdmac = 1;
335 			offload_pedit(fs, val, mask, ETH_DMAC_31_0);
336 			break;
337 		case PEDIT_ETH_DMAC_47_32_SMAC_15_0:
338 			if (~mask & PEDIT_ETH_DMAC_MASK)
339 				offload_pedit(fs, val, mask, ETH_DMAC_47_32);
340 			else
341 				offload_pedit(fs, val >> 16, mask >> 16,
342 					      ETH_SMAC_15_0);
343 			break;
344 		case PEDIT_ETH_SMAC_47_16:
345 			fs->newsmac = 1;
346 			offload_pedit(fs, val, mask, ETH_SMAC_47_16);
347 		}
348 		break;
349 	case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
350 		switch (offset) {
351 		case PEDIT_IP4_SRC:
352 			offload_pedit(fs, val, mask, IP4_SRC);
353 			break;
354 		case PEDIT_IP4_DST:
355 			offload_pedit(fs, val, mask, IP4_DST);
356 		}
357 		fs->nat_mode = NAT_MODE_ALL;
358 		break;
359 	case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
360 		switch (offset) {
361 		case PEDIT_IP6_SRC_31_0:
362 			offload_pedit(fs, val, mask, IP6_SRC_31_0);
363 			break;
364 		case PEDIT_IP6_SRC_63_32:
365 			offload_pedit(fs, val, mask, IP6_SRC_63_32);
366 			break;
367 		case PEDIT_IP6_SRC_95_64:
368 			offload_pedit(fs, val, mask, IP6_SRC_95_64);
369 			break;
370 		case PEDIT_IP6_SRC_127_96:
371 			offload_pedit(fs, val, mask, IP6_SRC_127_96);
372 			break;
373 		case PEDIT_IP6_DST_31_0:
374 			offload_pedit(fs, val, mask, IP6_DST_31_0);
375 			break;
376 		case PEDIT_IP6_DST_63_32:
377 			offload_pedit(fs, val, mask, IP6_DST_63_32);
378 			break;
379 		case PEDIT_IP6_DST_95_64:
380 			offload_pedit(fs, val, mask, IP6_DST_95_64);
381 			break;
382 		case PEDIT_IP6_DST_127_96:
383 			offload_pedit(fs, val, mask, IP6_DST_127_96);
384 		}
385 		fs->nat_mode = NAT_MODE_ALL;
386 		break;
387 	case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
388 		switch (offset) {
389 		case PEDIT_TCP_SPORT_DPORT:
390 			if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
391 				offload_pedit(fs, cpu_to_be32(val) >> 16,
392 					      cpu_to_be32(mask) >> 16,
393 					      TCP_SPORT);
394 			else
395 				offload_pedit(fs, cpu_to_be32(val),
396 					      cpu_to_be32(mask), TCP_DPORT);
397 		}
398 		fs->nat_mode = NAT_MODE_ALL;
399 		break;
400 	case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
401 		switch (offset) {
402 		case PEDIT_UDP_SPORT_DPORT:
403 			if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
404 				offload_pedit(fs, cpu_to_be32(val) >> 16,
405 					      cpu_to_be32(mask) >> 16,
406 					      UDP_SPORT);
407 			else
408 				offload_pedit(fs, cpu_to_be32(val),
409 					      cpu_to_be32(mask), UDP_DPORT);
410 		}
411 		fs->nat_mode = NAT_MODE_ALL;
412 	}
413 }
414 
415 static void cxgb4_process_flow_actions(struct net_device *in,
416 				       struct tc_cls_flower_offload *cls,
417 				       struct ch_filter_specification *fs)
418 {
419 	const struct tc_action *a;
420 	LIST_HEAD(actions);
421 
422 	tcf_exts_to_list(cls->exts, &actions);
423 	list_for_each_entry(a, &actions, list) {
424 		if (is_tcf_gact_ok(a)) {
425 			fs->action = FILTER_PASS;
426 		} else if (is_tcf_gact_shot(a)) {
427 			fs->action = FILTER_DROP;
428 		} else if (is_tcf_mirred_egress_redirect(a)) {
429 			struct net_device *out = tcf_mirred_dev(a);
430 			struct port_info *pi = netdev_priv(out);
431 
432 			fs->action = FILTER_SWITCH;
433 			fs->eport = pi->port_id;
434 		} else if (is_tcf_vlan(a)) {
435 			u32 vlan_action = tcf_vlan_action(a);
436 			u8 prio = tcf_vlan_push_prio(a);
437 			u16 vid = tcf_vlan_push_vid(a);
438 			u16 vlan_tci = (prio << VLAN_PRIO_SHIFT) | vid;
439 
440 			switch (vlan_action) {
441 			case TCA_VLAN_ACT_POP:
442 				fs->newvlan |= VLAN_REMOVE;
443 				break;
444 			case TCA_VLAN_ACT_PUSH:
445 				fs->newvlan |= VLAN_INSERT;
446 				fs->vlan = vlan_tci;
447 				break;
448 			case TCA_VLAN_ACT_MODIFY:
449 				fs->newvlan |= VLAN_REWRITE;
450 				fs->vlan = vlan_tci;
451 				break;
452 			default:
453 				break;
454 			}
455 		} else if (is_tcf_pedit(a)) {
456 			u32 mask, val, offset;
457 			int nkeys, i;
458 			u8 htype;
459 
460 			nkeys = tcf_pedit_nkeys(a);
461 			for (i = 0; i < nkeys; i++) {
462 				htype = tcf_pedit_htype(a, i);
463 				mask = tcf_pedit_mask(a, i);
464 				val = tcf_pedit_val(a, i);
465 				offset = tcf_pedit_offset(a, i);
466 
467 				process_pedit_field(fs, val, mask, offset,
468 						    htype);
469 			}
470 		}
471 	}
472 }
473 
474 static bool valid_l4_mask(u32 mask)
475 {
476 	u16 hi, lo;
477 
478 	/* Either the upper 16-bits (SPORT) OR the lower
479 	 * 16-bits (DPORT) can be set, but NOT BOTH.
480 	 */
481 	hi = (mask >> 16) & 0xFFFF;
482 	lo = mask & 0xFFFF;
483 
484 	return hi && lo ? false : true;
485 }
486 
487 static bool valid_pedit_action(struct net_device *dev,
488 			       const struct tc_action *a)
489 {
490 	u32 mask, offset;
491 	u8 cmd, htype;
492 	int nkeys, i;
493 
494 	nkeys = tcf_pedit_nkeys(a);
495 	for (i = 0; i < nkeys; i++) {
496 		htype = tcf_pedit_htype(a, i);
497 		cmd = tcf_pedit_cmd(a, i);
498 		mask = tcf_pedit_mask(a, i);
499 		offset = tcf_pedit_offset(a, i);
500 
501 		if (cmd != TCA_PEDIT_KEY_EX_CMD_SET) {
502 			netdev_err(dev, "%s: Unsupported pedit cmd\n",
503 				   __func__);
504 			return false;
505 		}
506 
507 		switch (htype) {
508 		case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
509 			switch (offset) {
510 			case PEDIT_ETH_DMAC_31_0:
511 			case PEDIT_ETH_DMAC_47_32_SMAC_15_0:
512 			case PEDIT_ETH_SMAC_47_16:
513 				break;
514 			default:
515 				netdev_err(dev, "%s: Unsupported pedit field\n",
516 					   __func__);
517 				return false;
518 			}
519 			break;
520 		case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
521 			switch (offset) {
522 			case PEDIT_IP4_SRC:
523 			case PEDIT_IP4_DST:
524 				break;
525 			default:
526 				netdev_err(dev, "%s: Unsupported pedit field\n",
527 					   __func__);
528 				return false;
529 			}
530 			break;
531 		case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
532 			switch (offset) {
533 			case PEDIT_IP6_SRC_31_0:
534 			case PEDIT_IP6_SRC_63_32:
535 			case PEDIT_IP6_SRC_95_64:
536 			case PEDIT_IP6_SRC_127_96:
537 			case PEDIT_IP6_DST_31_0:
538 			case PEDIT_IP6_DST_63_32:
539 			case PEDIT_IP6_DST_95_64:
540 			case PEDIT_IP6_DST_127_96:
541 				break;
542 			default:
543 				netdev_err(dev, "%s: Unsupported pedit field\n",
544 					   __func__);
545 				return false;
546 			}
547 			break;
548 		case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
549 			switch (offset) {
550 			case PEDIT_TCP_SPORT_DPORT:
551 				if (!valid_l4_mask(~mask)) {
552 					netdev_err(dev, "%s: Unsupported mask for TCP L4 ports\n",
553 						   __func__);
554 					return false;
555 				}
556 				break;
557 			default:
558 				netdev_err(dev, "%s: Unsupported pedit field\n",
559 					   __func__);
560 				return false;
561 			}
562 			break;
563 		case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
564 			switch (offset) {
565 			case PEDIT_UDP_SPORT_DPORT:
566 				if (!valid_l4_mask(~mask)) {
567 					netdev_err(dev, "%s: Unsupported mask for UDP L4 ports\n",
568 						   __func__);
569 					return false;
570 				}
571 				break;
572 			default:
573 				netdev_err(dev, "%s: Unsupported pedit field\n",
574 					   __func__);
575 				return false;
576 			}
577 			break;
578 		default:
579 			netdev_err(dev, "%s: Unsupported pedit type\n",
580 				   __func__);
581 			return false;
582 		}
583 	}
584 	return true;
585 }
586 
587 static int cxgb4_validate_flow_actions(struct net_device *dev,
588 				       struct tc_cls_flower_offload *cls)
589 {
590 	const struct tc_action *a;
591 	bool act_redir = false;
592 	bool act_pedit = false;
593 	bool act_vlan = false;
594 	LIST_HEAD(actions);
595 
596 	tcf_exts_to_list(cls->exts, &actions);
597 	list_for_each_entry(a, &actions, list) {
598 		if (is_tcf_gact_ok(a)) {
599 			/* Do nothing */
600 		} else if (is_tcf_gact_shot(a)) {
601 			/* Do nothing */
602 		} else if (is_tcf_mirred_egress_redirect(a)) {
603 			struct adapter *adap = netdev2adap(dev);
604 			struct net_device *n_dev, *target_dev;
605 			unsigned int i;
606 			bool found = false;
607 
608 			target_dev = tcf_mirred_dev(a);
609 			for_each_port(adap, i) {
610 				n_dev = adap->port[i];
611 				if (target_dev == n_dev) {
612 					found = true;
613 					break;
614 				}
615 			}
616 
617 			/* If interface doesn't belong to our hw, then
618 			 * the provided output port is not valid
619 			 */
620 			if (!found) {
621 				netdev_err(dev, "%s: Out port invalid\n",
622 					   __func__);
623 				return -EINVAL;
624 			}
625 			act_redir = true;
626 		} else if (is_tcf_vlan(a)) {
627 			u16 proto = be16_to_cpu(tcf_vlan_push_proto(a));
628 			u32 vlan_action = tcf_vlan_action(a);
629 
630 			switch (vlan_action) {
631 			case TCA_VLAN_ACT_POP:
632 				break;
633 			case TCA_VLAN_ACT_PUSH:
634 			case TCA_VLAN_ACT_MODIFY:
635 				if (proto != ETH_P_8021Q) {
636 					netdev_err(dev, "%s: Unsupported vlan proto\n",
637 						   __func__);
638 					return -EOPNOTSUPP;
639 				}
640 				break;
641 			default:
642 				netdev_err(dev, "%s: Unsupported vlan action\n",
643 					   __func__);
644 				return -EOPNOTSUPP;
645 			}
646 			act_vlan = true;
647 		} else if (is_tcf_pedit(a)) {
648 			bool pedit_valid = valid_pedit_action(dev, a);
649 
650 			if (!pedit_valid)
651 				return -EOPNOTSUPP;
652 			act_pedit = true;
653 		} else {
654 			netdev_err(dev, "%s: Unsupported action\n", __func__);
655 			return -EOPNOTSUPP;
656 		}
657 	}
658 
659 	if ((act_pedit || act_vlan) && !act_redir) {
660 		netdev_err(dev, "%s: pedit/vlan rewrite invalid without egress redirect\n",
661 			   __func__);
662 		return -EINVAL;
663 	}
664 
665 	return 0;
666 }
667 
668 int cxgb4_tc_flower_replace(struct net_device *dev,
669 			    struct tc_cls_flower_offload *cls)
670 {
671 	struct adapter *adap = netdev2adap(dev);
672 	struct ch_tc_flower_entry *ch_flower;
673 	struct ch_filter_specification *fs;
674 	struct filter_ctx ctx;
675 	int fidx;
676 	int ret;
677 
678 	if (cxgb4_validate_flow_actions(dev, cls))
679 		return -EOPNOTSUPP;
680 
681 	if (cxgb4_validate_flow_match(dev, cls))
682 		return -EOPNOTSUPP;
683 
684 	ch_flower = allocate_flower_entry();
685 	if (!ch_flower) {
686 		netdev_err(dev, "%s: ch_flower alloc failed.\n", __func__);
687 		return -ENOMEM;
688 	}
689 
690 	fs = &ch_flower->fs;
691 	fs->hitcnts = 1;
692 	cxgb4_process_flow_match(dev, cls, fs);
693 	cxgb4_process_flow_actions(dev, cls, fs);
694 
695 	fs->hash = is_filter_exact_match(adap, fs);
696 	if (fs->hash) {
697 		fidx = 0;
698 	} else {
699 		fidx = cxgb4_get_free_ftid(dev, fs->type ? PF_INET6 : PF_INET);
700 		if (fidx < 0) {
701 			netdev_err(dev, "%s: No fidx for offload.\n", __func__);
702 			ret = -ENOMEM;
703 			goto free_entry;
704 		}
705 	}
706 
707 	init_completion(&ctx.completion);
708 	ret = __cxgb4_set_filter(dev, fidx, fs, &ctx);
709 	if (ret) {
710 		netdev_err(dev, "%s: filter creation err %d\n",
711 			   __func__, ret);
712 		goto free_entry;
713 	}
714 
715 	/* Wait for reply */
716 	ret = wait_for_completion_timeout(&ctx.completion, 10 * HZ);
717 	if (!ret) {
718 		ret = -ETIMEDOUT;
719 		goto free_entry;
720 	}
721 
722 	ret = ctx.result;
723 	/* Check if hw returned error for filter creation */
724 	if (ret) {
725 		netdev_err(dev, "%s: filter creation err %d\n",
726 			   __func__, ret);
727 		goto free_entry;
728 	}
729 
730 	ch_flower->tc_flower_cookie = cls->cookie;
731 	ch_flower->filter_id = ctx.tid;
732 	ret = rhashtable_insert_fast(&adap->flower_tbl, &ch_flower->node,
733 				     adap->flower_ht_params);
734 	if (ret)
735 		goto del_filter;
736 
737 	return 0;
738 
739 del_filter:
740 	cxgb4_del_filter(dev, ch_flower->filter_id, &ch_flower->fs);
741 
742 free_entry:
743 	kfree(ch_flower);
744 	return ret;
745 }
746 
747 int cxgb4_tc_flower_destroy(struct net_device *dev,
748 			    struct tc_cls_flower_offload *cls)
749 {
750 	struct adapter *adap = netdev2adap(dev);
751 	struct ch_tc_flower_entry *ch_flower;
752 	int ret;
753 
754 	ch_flower = ch_flower_lookup(adap, cls->cookie);
755 	if (!ch_flower)
756 		return -ENOENT;
757 
758 	ret = cxgb4_del_filter(dev, ch_flower->filter_id, &ch_flower->fs);
759 	if (ret)
760 		goto err;
761 
762 	ret = rhashtable_remove_fast(&adap->flower_tbl, &ch_flower->node,
763 				     adap->flower_ht_params);
764 	if (ret) {
765 		netdev_err(dev, "Flow remove from rhashtable failed");
766 		goto err;
767 	}
768 	kfree_rcu(ch_flower, rcu);
769 
770 err:
771 	return ret;
772 }
773 
774 static void ch_flower_stats_handler(struct work_struct *work)
775 {
776 	struct adapter *adap = container_of(work, struct adapter,
777 					    flower_stats_work);
778 	struct ch_tc_flower_entry *flower_entry;
779 	struct ch_tc_flower_stats *ofld_stats;
780 	struct rhashtable_iter iter;
781 	u64 packets;
782 	u64 bytes;
783 	int ret;
784 
785 	rhashtable_walk_enter(&adap->flower_tbl, &iter);
786 	do {
787 		rhashtable_walk_start(&iter);
788 
789 		while ((flower_entry = rhashtable_walk_next(&iter)) &&
790 		       !IS_ERR(flower_entry)) {
791 			ret = cxgb4_get_filter_counters(adap->port[0],
792 							flower_entry->filter_id,
793 							&packets, &bytes,
794 							flower_entry->fs.hash);
795 			if (!ret) {
796 				spin_lock(&flower_entry->lock);
797 				ofld_stats = &flower_entry->stats;
798 
799 				if (ofld_stats->prev_packet_count != packets) {
800 					ofld_stats->prev_packet_count = packets;
801 					ofld_stats->last_used = jiffies;
802 				}
803 				spin_unlock(&flower_entry->lock);
804 			}
805 		}
806 
807 		rhashtable_walk_stop(&iter);
808 
809 	} while (flower_entry == ERR_PTR(-EAGAIN));
810 	rhashtable_walk_exit(&iter);
811 	mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD);
812 }
813 
814 static void ch_flower_stats_cb(struct timer_list *t)
815 {
816 	struct adapter *adap = from_timer(adap, t, flower_stats_timer);
817 
818 	schedule_work(&adap->flower_stats_work);
819 }
820 
821 int cxgb4_tc_flower_stats(struct net_device *dev,
822 			  struct tc_cls_flower_offload *cls)
823 {
824 	struct adapter *adap = netdev2adap(dev);
825 	struct ch_tc_flower_stats *ofld_stats;
826 	struct ch_tc_flower_entry *ch_flower;
827 	u64 packets;
828 	u64 bytes;
829 	int ret;
830 
831 	ch_flower = ch_flower_lookup(adap, cls->cookie);
832 	if (!ch_flower) {
833 		ret = -ENOENT;
834 		goto err;
835 	}
836 
837 	ret = cxgb4_get_filter_counters(dev, ch_flower->filter_id,
838 					&packets, &bytes,
839 					ch_flower->fs.hash);
840 	if (ret < 0)
841 		goto err;
842 
843 	spin_lock_bh(&ch_flower->lock);
844 	ofld_stats = &ch_flower->stats;
845 	if (ofld_stats->packet_count != packets) {
846 		if (ofld_stats->prev_packet_count != packets)
847 			ofld_stats->last_used = jiffies;
848 		tcf_exts_stats_update(cls->exts, bytes - ofld_stats->byte_count,
849 				      packets - ofld_stats->packet_count,
850 				      ofld_stats->last_used);
851 
852 		ofld_stats->packet_count = packets;
853 		ofld_stats->byte_count = bytes;
854 		ofld_stats->prev_packet_count = packets;
855 	}
856 	spin_unlock_bh(&ch_flower->lock);
857 	return 0;
858 
859 err:
860 	return ret;
861 }
862 
863 static const struct rhashtable_params cxgb4_tc_flower_ht_params = {
864 	.nelem_hint = 384,
865 	.head_offset = offsetof(struct ch_tc_flower_entry, node),
866 	.key_offset = offsetof(struct ch_tc_flower_entry, tc_flower_cookie),
867 	.key_len = sizeof(((struct ch_tc_flower_entry *)0)->tc_flower_cookie),
868 	.max_size = 524288,
869 	.min_size = 512,
870 	.automatic_shrinking = true
871 };
872 
873 int cxgb4_init_tc_flower(struct adapter *adap)
874 {
875 	int ret;
876 
877 	if (adap->tc_flower_initialized)
878 		return -EEXIST;
879 
880 	adap->flower_ht_params = cxgb4_tc_flower_ht_params;
881 	ret = rhashtable_init(&adap->flower_tbl, &adap->flower_ht_params);
882 	if (ret)
883 		return ret;
884 
885 	INIT_WORK(&adap->flower_stats_work, ch_flower_stats_handler);
886 	timer_setup(&adap->flower_stats_timer, ch_flower_stats_cb, 0);
887 	mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD);
888 	adap->tc_flower_initialized = true;
889 	return 0;
890 }
891 
892 void cxgb4_cleanup_tc_flower(struct adapter *adap)
893 {
894 	if (!adap->tc_flower_initialized)
895 		return;
896 
897 	if (adap->flower_stats_timer.function)
898 		del_timer_sync(&adap->flower_stats_timer);
899 	cancel_work_sync(&adap->flower_stats_work);
900 	rhashtable_destroy(&adap->flower_tbl);
901 	adap->tc_flower_initialized = false;
902 }
903