1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2014-2019 aQuantia Corporation. */
3 
4 /* File aq_filters.c: RX filters related functions. */
5 
6 #include "aq_filters.h"
7 
8 static bool __must_check
9 aq_rule_is_approve(struct ethtool_rx_flow_spec *fsp)
10 {
11 	if (fsp->flow_type & FLOW_MAC_EXT)
12 		return false;
13 
14 	switch (fsp->flow_type & ~FLOW_EXT) {
15 	case ETHER_FLOW:
16 	case TCP_V4_FLOW:
17 	case UDP_V4_FLOW:
18 	case SCTP_V4_FLOW:
19 	case TCP_V6_FLOW:
20 	case UDP_V6_FLOW:
21 	case SCTP_V6_FLOW:
22 	case IPV4_FLOW:
23 	case IPV6_FLOW:
24 		return true;
25 	case IP_USER_FLOW:
26 		switch (fsp->h_u.usr_ip4_spec.proto) {
27 		case IPPROTO_TCP:
28 		case IPPROTO_UDP:
29 		case IPPROTO_SCTP:
30 		case IPPROTO_IP:
31 			return true;
32 		default:
33 			return false;
34 			}
35 	case IPV6_USER_FLOW:
36 		switch (fsp->h_u.usr_ip6_spec.l4_proto) {
37 		case IPPROTO_TCP:
38 		case IPPROTO_UDP:
39 		case IPPROTO_SCTP:
40 		case IPPROTO_IP:
41 			return true;
42 		default:
43 			return false;
44 			}
45 	default:
46 		return false;
47 	}
48 
49 	return false;
50 }
51 
52 static bool __must_check
53 aq_match_filter(struct ethtool_rx_flow_spec *fsp1,
54 		struct ethtool_rx_flow_spec *fsp2)
55 {
56 	if (fsp1->flow_type != fsp2->flow_type ||
57 	    memcmp(&fsp1->h_u, &fsp2->h_u, sizeof(fsp2->h_u)) ||
58 	    memcmp(&fsp1->h_ext, &fsp2->h_ext, sizeof(fsp2->h_ext)) ||
59 	    memcmp(&fsp1->m_u, &fsp2->m_u, sizeof(fsp2->m_u)) ||
60 	    memcmp(&fsp1->m_ext, &fsp2->m_ext, sizeof(fsp2->m_ext)))
61 		return false;
62 
63 	return true;
64 }
65 
66 static bool __must_check
67 aq_rule_already_exists(struct aq_nic_s *aq_nic,
68 		       struct ethtool_rx_flow_spec *fsp)
69 {
70 	struct aq_rx_filter *rule;
71 	struct hlist_node *aq_node2;
72 	struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
73 
74 	hlist_for_each_entry_safe(rule, aq_node2,
75 				  &rx_fltrs->filter_list, aq_node) {
76 		if (rule->aq_fsp.location == fsp->location)
77 			continue;
78 		if (aq_match_filter(&rule->aq_fsp, fsp)) {
79 			netdev_err(aq_nic->ndev,
80 				   "ethtool: This filter is already set\n");
81 			return true;
82 		}
83 	}
84 
85 	return false;
86 }
87 
88 static int aq_check_approve_fl3l4(struct aq_nic_s *aq_nic,
89 				  struct aq_hw_rx_fltrs_s *rx_fltrs,
90 				  struct ethtool_rx_flow_spec *fsp)
91 {
92 	u32 last_location = AQ_RX_LAST_LOC_FL3L4 -
93 			    aq_nic->aq_hw_rx_fltrs.fl3l4.reserved_count;
94 
95 	if (fsp->location < AQ_RX_FIRST_LOC_FL3L4 ||
96 	    fsp->location > last_location) {
97 		netdev_err(aq_nic->ndev,
98 			   "ethtool: location must be in range [%d, %d]",
99 			   AQ_RX_FIRST_LOC_FL3L4, last_location);
100 		return -EINVAL;
101 	}
102 	if (rx_fltrs->fl3l4.is_ipv6 && rx_fltrs->fl3l4.active_ipv4) {
103 		rx_fltrs->fl3l4.is_ipv6 = false;
104 		netdev_err(aq_nic->ndev,
105 			   "ethtool: mixing ipv4 and ipv6 is not allowed");
106 		return -EINVAL;
107 	} else if (!rx_fltrs->fl3l4.is_ipv6 && rx_fltrs->fl3l4.active_ipv6) {
108 		rx_fltrs->fl3l4.is_ipv6 = true;
109 		netdev_err(aq_nic->ndev,
110 			   "ethtool: mixing ipv4 and ipv6 is not allowed");
111 		return -EINVAL;
112 	} else if (rx_fltrs->fl3l4.is_ipv6		      &&
113 		   fsp->location != AQ_RX_FIRST_LOC_FL3L4 + 4 &&
114 		   fsp->location != AQ_RX_FIRST_LOC_FL3L4) {
115 		netdev_err(aq_nic->ndev,
116 			   "ethtool: The specified location for ipv6 must be %d or %d",
117 			   AQ_RX_FIRST_LOC_FL3L4, AQ_RX_FIRST_LOC_FL3L4 + 4);
118 		return -EINVAL;
119 	}
120 
121 	return 0;
122 }
123 
124 static int __must_check
125 aq_check_approve_fl2(struct aq_nic_s *aq_nic,
126 		     struct aq_hw_rx_fltrs_s *rx_fltrs,
127 		     struct ethtool_rx_flow_spec *fsp)
128 {
129 	u32 last_location = AQ_RX_LAST_LOC_FETHERT -
130 			    aq_nic->aq_hw_rx_fltrs.fet_reserved_count;
131 
132 	if (fsp->location < AQ_RX_FIRST_LOC_FETHERT ||
133 	    fsp->location > last_location) {
134 		netdev_err(aq_nic->ndev,
135 			   "ethtool: location must be in range [%d, %d]",
136 			   AQ_RX_FIRST_LOC_FETHERT,
137 			   last_location);
138 		return -EINVAL;
139 	}
140 
141 	if (be16_to_cpu(fsp->m_ext.vlan_tci) == VLAN_PRIO_MASK &&
142 	    fsp->m_u.ether_spec.h_proto == 0U) {
143 		netdev_err(aq_nic->ndev,
144 			   "ethtool: proto (ether_type) parameter must be specified");
145 		return -EINVAL;
146 	}
147 
148 	return 0;
149 }
150 
151 static int __must_check
152 aq_check_approve_fvlan(struct aq_nic_s *aq_nic,
153 		       struct aq_hw_rx_fltrs_s *rx_fltrs,
154 		       struct ethtool_rx_flow_spec *fsp)
155 {
156 	if (fsp->location < AQ_RX_FIRST_LOC_FVLANID ||
157 	    fsp->location > AQ_RX_LAST_LOC_FVLANID) {
158 		netdev_err(aq_nic->ndev,
159 			   "ethtool: location must be in range [%d, %d]",
160 			   AQ_RX_FIRST_LOC_FVLANID,
161 			   AQ_RX_LAST_LOC_FVLANID);
162 		return -EINVAL;
163 	}
164 
165 	if ((aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
166 	    (!test_bit(be16_to_cpu(fsp->h_ext.vlan_tci) & VLAN_VID_MASK,
167 		       aq_nic->active_vlans))) {
168 		netdev_err(aq_nic->ndev,
169 			   "ethtool: unknown vlan-id specified");
170 		return -EINVAL;
171 	}
172 
173 	if (fsp->ring_cookie > aq_nic->aq_nic_cfg.num_rss_queues) {
174 		netdev_err(aq_nic->ndev,
175 			   "ethtool: queue number must be in range [0, %d]",
176 			   aq_nic->aq_nic_cfg.num_rss_queues - 1);
177 		return -EINVAL;
178 	}
179 	return 0;
180 }
181 
182 static int __must_check
183 aq_check_filter(struct aq_nic_s *aq_nic,
184 		struct ethtool_rx_flow_spec *fsp)
185 {
186 	int err = 0;
187 	struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
188 
189 	if (fsp->flow_type & FLOW_EXT) {
190 		if (be16_to_cpu(fsp->m_ext.vlan_tci) == VLAN_VID_MASK) {
191 			err = aq_check_approve_fvlan(aq_nic, rx_fltrs, fsp);
192 		} else if (be16_to_cpu(fsp->m_ext.vlan_tci) == VLAN_PRIO_MASK) {
193 			err = aq_check_approve_fl2(aq_nic, rx_fltrs, fsp);
194 		} else {
195 			netdev_err(aq_nic->ndev,
196 				   "ethtool: invalid vlan mask 0x%x specified",
197 				   be16_to_cpu(fsp->m_ext.vlan_tci));
198 			err = -EINVAL;
199 		}
200 	} else {
201 		switch (fsp->flow_type & ~FLOW_EXT) {
202 		case ETHER_FLOW:
203 			err = aq_check_approve_fl2(aq_nic, rx_fltrs, fsp);
204 			break;
205 		case TCP_V4_FLOW:
206 		case UDP_V4_FLOW:
207 		case SCTP_V4_FLOW:
208 		case IPV4_FLOW:
209 		case IP_USER_FLOW:
210 			rx_fltrs->fl3l4.is_ipv6 = false;
211 			err = aq_check_approve_fl3l4(aq_nic, rx_fltrs, fsp);
212 			break;
213 		case TCP_V6_FLOW:
214 		case UDP_V6_FLOW:
215 		case SCTP_V6_FLOW:
216 		case IPV6_FLOW:
217 		case IPV6_USER_FLOW:
218 			rx_fltrs->fl3l4.is_ipv6 = true;
219 			err = aq_check_approve_fl3l4(aq_nic, rx_fltrs, fsp);
220 			break;
221 		default:
222 			netdev_err(aq_nic->ndev,
223 				   "ethtool: unknown flow-type specified");
224 			err = -EINVAL;
225 		}
226 	}
227 
228 	return err;
229 }
230 
231 static bool __must_check
232 aq_rule_is_not_support(struct aq_nic_s *aq_nic,
233 		       struct ethtool_rx_flow_spec *fsp)
234 {
235 	bool rule_is_not_support = false;
236 
237 	if (!(aq_nic->ndev->features & NETIF_F_NTUPLE)) {
238 		netdev_err(aq_nic->ndev,
239 			   "ethtool: Please, to enable the RX flow control:\n"
240 			   "ethtool -K %s ntuple on\n", aq_nic->ndev->name);
241 		rule_is_not_support = true;
242 	} else if (!aq_rule_is_approve(fsp)) {
243 		netdev_err(aq_nic->ndev,
244 			   "ethtool: The specified flow type is not supported\n");
245 		rule_is_not_support = true;
246 	} else if ((fsp->flow_type & ~FLOW_EXT) != ETHER_FLOW &&
247 		   (fsp->h_u.tcp_ip4_spec.tos ||
248 		    fsp->h_u.tcp_ip6_spec.tclass)) {
249 		netdev_err(aq_nic->ndev,
250 			   "ethtool: The specified tos tclass are not supported\n");
251 		rule_is_not_support = true;
252 	} else if (fsp->flow_type & FLOW_MAC_EXT) {
253 		netdev_err(aq_nic->ndev,
254 			   "ethtool: MAC_EXT is not supported");
255 		rule_is_not_support = true;
256 	}
257 
258 	return rule_is_not_support;
259 }
260 
261 static bool __must_check
262 aq_rule_is_not_correct(struct aq_nic_s *aq_nic,
263 		       struct ethtool_rx_flow_spec *fsp)
264 {
265 	bool rule_is_not_correct = false;
266 
267 	if (!aq_nic) {
268 		rule_is_not_correct = true;
269 	} else if (fsp->location > AQ_RX_MAX_RXNFC_LOC) {
270 		netdev_err(aq_nic->ndev,
271 			   "ethtool: The specified number %u rule is invalid\n",
272 			   fsp->location);
273 		rule_is_not_correct = true;
274 	} else if (aq_check_filter(aq_nic, fsp)) {
275 		rule_is_not_correct = true;
276 	} else if (fsp->ring_cookie != RX_CLS_FLOW_DISC) {
277 		if (fsp->ring_cookie >= aq_nic->aq_nic_cfg.num_rss_queues) {
278 			netdev_err(aq_nic->ndev,
279 				   "ethtool: The specified action is invalid.\n"
280 				   "Maximum allowable value action is %u.\n",
281 				   aq_nic->aq_nic_cfg.num_rss_queues - 1);
282 			rule_is_not_correct = true;
283 		}
284 	}
285 
286 	return rule_is_not_correct;
287 }
288 
289 static int __must_check
290 aq_check_rule(struct aq_nic_s *aq_nic,
291 	      struct ethtool_rx_flow_spec *fsp)
292 {
293 	int err = 0;
294 
295 	if (aq_rule_is_not_correct(aq_nic, fsp))
296 		err = -EINVAL;
297 	else if (aq_rule_is_not_support(aq_nic, fsp))
298 		err = -EOPNOTSUPP;
299 	else if (aq_rule_already_exists(aq_nic, fsp))
300 		err = -EEXIST;
301 
302 	return err;
303 }
304 
305 static void aq_set_data_fl2(struct aq_nic_s *aq_nic,
306 			    struct aq_rx_filter *aq_rx_fltr,
307 			    struct aq_rx_filter_l2 *data, bool add)
308 {
309 	const struct ethtool_rx_flow_spec *fsp = &aq_rx_fltr->aq_fsp;
310 
311 	memset(data, 0, sizeof(*data));
312 
313 	data->location = fsp->location - AQ_RX_FIRST_LOC_FETHERT;
314 
315 	if (fsp->ring_cookie != RX_CLS_FLOW_DISC)
316 		data->queue = fsp->ring_cookie;
317 	else
318 		data->queue = -1;
319 
320 	data->ethertype = be16_to_cpu(fsp->h_u.ether_spec.h_proto);
321 	data->user_priority_en = be16_to_cpu(fsp->m_ext.vlan_tci)
322 				 == VLAN_PRIO_MASK;
323 	data->user_priority = (be16_to_cpu(fsp->h_ext.vlan_tci)
324 			       & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
325 }
326 
327 static int aq_add_del_fether(struct aq_nic_s *aq_nic,
328 			     struct aq_rx_filter *aq_rx_fltr, bool add)
329 {
330 	struct aq_rx_filter_l2 data;
331 	struct aq_hw_s *aq_hw = aq_nic->aq_hw;
332 	const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
333 
334 	aq_set_data_fl2(aq_nic, aq_rx_fltr, &data, add);
335 
336 	if (unlikely(!aq_hw_ops->hw_filter_l2_set))
337 		return -EOPNOTSUPP;
338 	if (unlikely(!aq_hw_ops->hw_filter_l2_clear))
339 		return -EOPNOTSUPP;
340 
341 	if (add)
342 		return aq_hw_ops->hw_filter_l2_set(aq_hw, &data);
343 	else
344 		return aq_hw_ops->hw_filter_l2_clear(aq_hw, &data);
345 }
346 
347 static bool aq_fvlan_is_busy(struct aq_rx_filter_vlan *aq_vlans, int vlan)
348 {
349 	int i;
350 
351 	for (i = 0; i < AQ_VLAN_MAX_FILTERS; ++i) {
352 		if (aq_vlans[i].enable &&
353 		    aq_vlans[i].queue != AQ_RX_QUEUE_NOT_ASSIGNED &&
354 		    aq_vlans[i].vlan_id == vlan) {
355 			return true;
356 		}
357 	}
358 
359 	return false;
360 }
361 
362 /* Function rebuilds array of vlan filters so that filters with assigned
363  * queue have a precedence over just vlans on the interface.
364  */
365 static void aq_fvlan_rebuild(struct aq_nic_s *aq_nic,
366 			     unsigned long *active_vlans,
367 			     struct aq_rx_filter_vlan *aq_vlans)
368 {
369 	bool vlan_busy = false;
370 	int vlan = -1;
371 	int i;
372 
373 	for (i = 0; i < AQ_VLAN_MAX_FILTERS; ++i) {
374 		if (aq_vlans[i].enable &&
375 		    aq_vlans[i].queue != AQ_RX_QUEUE_NOT_ASSIGNED)
376 			continue;
377 		do {
378 			vlan = find_next_bit(active_vlans,
379 					     VLAN_N_VID,
380 					     vlan + 1);
381 			if (vlan == VLAN_N_VID) {
382 				aq_vlans[i].enable = 0U;
383 				aq_vlans[i].queue = AQ_RX_QUEUE_NOT_ASSIGNED;
384 				aq_vlans[i].vlan_id = 0;
385 				continue;
386 			}
387 
388 			vlan_busy = aq_fvlan_is_busy(aq_vlans, vlan);
389 			if (!vlan_busy) {
390 				aq_vlans[i].enable = 1U;
391 				aq_vlans[i].queue = AQ_RX_QUEUE_NOT_ASSIGNED;
392 				aq_vlans[i].vlan_id = vlan;
393 			}
394 		} while (vlan_busy && vlan != VLAN_N_VID);
395 	}
396 }
397 
398 static int aq_set_data_fvlan(struct aq_nic_s *aq_nic,
399 			     struct aq_rx_filter *aq_rx_fltr,
400 			     struct aq_rx_filter_vlan *aq_vlans, bool add)
401 {
402 	const struct ethtool_rx_flow_spec *fsp = &aq_rx_fltr->aq_fsp;
403 	int location = fsp->location - AQ_RX_FIRST_LOC_FVLANID;
404 	int i;
405 
406 	memset(&aq_vlans[location], 0, sizeof(aq_vlans[location]));
407 
408 	if (!add)
409 		return 0;
410 
411 	/* remove vlan if it was in table without queue assignment */
412 	for (i = 0; i < AQ_VLAN_MAX_FILTERS; ++i) {
413 		if (aq_vlans[i].vlan_id ==
414 		   (be16_to_cpu(fsp->h_ext.vlan_tci) & VLAN_VID_MASK)) {
415 			aq_vlans[i].enable = false;
416 		}
417 	}
418 
419 	aq_vlans[location].location = location;
420 	aq_vlans[location].vlan_id = be16_to_cpu(fsp->h_ext.vlan_tci)
421 				     & VLAN_VID_MASK;
422 	aq_vlans[location].queue = fsp->ring_cookie & 0x1FU;
423 	aq_vlans[location].enable = 1U;
424 
425 	return 0;
426 }
427 
428 int aq_del_fvlan_by_vlan(struct aq_nic_s *aq_nic, u16 vlan_id)
429 {
430 	struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
431 	struct aq_rx_filter *rule = NULL;
432 	struct hlist_node *aq_node2;
433 
434 	hlist_for_each_entry_safe(rule, aq_node2,
435 				  &rx_fltrs->filter_list, aq_node) {
436 		if (be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id)
437 			break;
438 	}
439 	if (rule && rule->type == aq_rx_filter_vlan &&
440 	    be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id) {
441 		struct ethtool_rxnfc cmd;
442 
443 		cmd.fs.location = rule->aq_fsp.location;
444 		return aq_del_rxnfc_rule(aq_nic, &cmd);
445 	}
446 
447 	return -ENOENT;
448 }
449 
450 static int aq_add_del_fvlan(struct aq_nic_s *aq_nic,
451 			    struct aq_rx_filter *aq_rx_fltr, bool add)
452 {
453 	const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
454 
455 	if (unlikely(!aq_hw_ops->hw_filter_vlan_set))
456 		return -EOPNOTSUPP;
457 
458 	aq_set_data_fvlan(aq_nic,
459 			  aq_rx_fltr,
460 			  aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans,
461 			  add);
462 
463 	return aq_filters_vlans_update(aq_nic);
464 }
465 
466 static int aq_set_data_fl3l4(struct aq_nic_s *aq_nic,
467 			     struct aq_rx_filter *aq_rx_fltr,
468 			     struct aq_rx_filter_l3l4 *data, bool add)
469 {
470 	struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
471 	const struct ethtool_rx_flow_spec *fsp = &aq_rx_fltr->aq_fsp;
472 
473 	memset(data, 0, sizeof(*data));
474 
475 	data->is_ipv6 = rx_fltrs->fl3l4.is_ipv6;
476 	data->location = HW_ATL_GET_REG_LOCATION_FL3L4(fsp->location);
477 
478 	if (!add) {
479 		if (!data->is_ipv6)
480 			rx_fltrs->fl3l4.active_ipv4 &= ~BIT(data->location);
481 		else
482 			rx_fltrs->fl3l4.active_ipv6 &=
483 				~BIT((data->location) / 4);
484 
485 		return 0;
486 	}
487 
488 	data->cmd |= HW_ATL_RX_ENABLE_FLTR_L3L4;
489 
490 	switch (fsp->flow_type) {
491 	case TCP_V4_FLOW:
492 	case TCP_V6_FLOW:
493 		data->cmd |= HW_ATL_RX_ENABLE_CMP_PROT_L4;
494 		break;
495 	case UDP_V4_FLOW:
496 	case UDP_V6_FLOW:
497 		data->cmd |= HW_ATL_RX_UDP;
498 		data->cmd |= HW_ATL_RX_ENABLE_CMP_PROT_L4;
499 		break;
500 	case SCTP_V4_FLOW:
501 	case SCTP_V6_FLOW:
502 		data->cmd |= HW_ATL_RX_SCTP;
503 		data->cmd |= HW_ATL_RX_ENABLE_CMP_PROT_L4;
504 		break;
505 	default:
506 		break;
507 	}
508 
509 	if (!data->is_ipv6) {
510 		data->ip_src[0] =
511 			ntohl(fsp->h_u.tcp_ip4_spec.ip4src);
512 		data->ip_dst[0] =
513 			ntohl(fsp->h_u.tcp_ip4_spec.ip4dst);
514 		rx_fltrs->fl3l4.active_ipv4 |= BIT(data->location);
515 	} else {
516 		int i;
517 
518 		rx_fltrs->fl3l4.active_ipv6 |= BIT((data->location) / 4);
519 		for (i = 0; i < HW_ATL_RX_CNT_REG_ADDR_IPV6; ++i) {
520 			data->ip_dst[i] =
521 				ntohl(fsp->h_u.tcp_ip6_spec.ip6dst[i]);
522 			data->ip_src[i] =
523 				ntohl(fsp->h_u.tcp_ip6_spec.ip6src[i]);
524 		}
525 		data->cmd |= HW_ATL_RX_ENABLE_L3_IPV6;
526 	}
527 	if (fsp->flow_type != IP_USER_FLOW &&
528 	    fsp->flow_type != IPV6_USER_FLOW) {
529 		if (!data->is_ipv6) {
530 			data->p_dst =
531 				ntohs(fsp->h_u.tcp_ip4_spec.pdst);
532 			data->p_src =
533 				ntohs(fsp->h_u.tcp_ip4_spec.psrc);
534 		} else {
535 			data->p_dst =
536 				ntohs(fsp->h_u.tcp_ip6_spec.pdst);
537 			data->p_src =
538 				ntohs(fsp->h_u.tcp_ip6_spec.psrc);
539 		}
540 	}
541 	if (data->ip_src[0] && !data->is_ipv6)
542 		data->cmd |= HW_ATL_RX_ENABLE_CMP_SRC_ADDR_L3;
543 	if (data->ip_dst[0] && !data->is_ipv6)
544 		data->cmd |= HW_ATL_RX_ENABLE_CMP_DEST_ADDR_L3;
545 	if (data->p_dst)
546 		data->cmd |= HW_ATL_RX_ENABLE_CMP_DEST_PORT_L4;
547 	if (data->p_src)
548 		data->cmd |= HW_ATL_RX_ENABLE_CMP_SRC_PORT_L4;
549 	if (fsp->ring_cookie != RX_CLS_FLOW_DISC) {
550 		data->cmd |= HW_ATL_RX_HOST << HW_ATL_RX_ACTION_FL3F4_SHIFT;
551 		data->cmd |= fsp->ring_cookie << HW_ATL_RX_QUEUE_FL3L4_SHIFT;
552 		data->cmd |= HW_ATL_RX_ENABLE_QUEUE_L3L4;
553 	} else {
554 		data->cmd |= HW_ATL_RX_DISCARD << HW_ATL_RX_ACTION_FL3F4_SHIFT;
555 	}
556 
557 	return 0;
558 }
559 
560 static int aq_set_fl3l4(struct aq_hw_s *aq_hw,
561 			const struct aq_hw_ops *aq_hw_ops,
562 			struct aq_rx_filter_l3l4 *data)
563 {
564 	if (unlikely(!aq_hw_ops->hw_filter_l3l4_set))
565 		return -EOPNOTSUPP;
566 
567 	return aq_hw_ops->hw_filter_l3l4_set(aq_hw, data);
568 }
569 
570 static int aq_add_del_fl3l4(struct aq_nic_s *aq_nic,
571 			    struct aq_rx_filter *aq_rx_fltr, bool add)
572 {
573 	const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
574 	struct aq_hw_s *aq_hw = aq_nic->aq_hw;
575 	struct aq_rx_filter_l3l4 data;
576 
577 	if (unlikely(aq_rx_fltr->aq_fsp.location < AQ_RX_FIRST_LOC_FL3L4 ||
578 		     aq_rx_fltr->aq_fsp.location > AQ_RX_LAST_LOC_FL3L4  ||
579 		     aq_set_data_fl3l4(aq_nic, aq_rx_fltr, &data, add)))
580 		return -EINVAL;
581 
582 	return aq_set_fl3l4(aq_hw, aq_hw_ops, &data);
583 }
584 
585 static int aq_add_del_rule(struct aq_nic_s *aq_nic,
586 			   struct aq_rx_filter *aq_rx_fltr, bool add)
587 {
588 	int err = -EINVAL;
589 
590 	if (aq_rx_fltr->aq_fsp.flow_type & FLOW_EXT) {
591 		if (be16_to_cpu(aq_rx_fltr->aq_fsp.m_ext.vlan_tci)
592 		    == VLAN_VID_MASK) {
593 			aq_rx_fltr->type = aq_rx_filter_vlan;
594 			err = aq_add_del_fvlan(aq_nic, aq_rx_fltr, add);
595 		} else if (be16_to_cpu(aq_rx_fltr->aq_fsp.m_ext.vlan_tci)
596 			== VLAN_PRIO_MASK) {
597 			aq_rx_fltr->type = aq_rx_filter_ethertype;
598 			err = aq_add_del_fether(aq_nic, aq_rx_fltr, add);
599 		}
600 	} else {
601 		switch (aq_rx_fltr->aq_fsp.flow_type & ~FLOW_EXT) {
602 		case ETHER_FLOW:
603 			aq_rx_fltr->type = aq_rx_filter_ethertype;
604 			err = aq_add_del_fether(aq_nic, aq_rx_fltr, add);
605 			break;
606 		case TCP_V4_FLOW:
607 		case UDP_V4_FLOW:
608 		case SCTP_V4_FLOW:
609 		case IP_USER_FLOW:
610 		case TCP_V6_FLOW:
611 		case UDP_V6_FLOW:
612 		case SCTP_V6_FLOW:
613 		case IPV6_USER_FLOW:
614 			aq_rx_fltr->type = aq_rx_filter_l3l4;
615 			err = aq_add_del_fl3l4(aq_nic, aq_rx_fltr, add);
616 			break;
617 		default:
618 			err = -EINVAL;
619 			break;
620 		}
621 	}
622 
623 	return err;
624 }
625 
626 static int aq_update_table_filters(struct aq_nic_s *aq_nic,
627 				   struct aq_rx_filter *aq_rx_fltr, u16 index,
628 				   struct ethtool_rxnfc *cmd)
629 {
630 	struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
631 	struct aq_rx_filter *rule = NULL, *parent = NULL;
632 	struct hlist_node *aq_node2;
633 	int err = -EINVAL;
634 
635 	hlist_for_each_entry_safe(rule, aq_node2,
636 				  &rx_fltrs->filter_list, aq_node) {
637 		if (rule->aq_fsp.location >= index)
638 			break;
639 		parent = rule;
640 	}
641 
642 	if (rule && rule->aq_fsp.location == index) {
643 		err = aq_add_del_rule(aq_nic, rule, false);
644 		hlist_del(&rule->aq_node);
645 		kfree(rule);
646 		--rx_fltrs->active_filters;
647 	}
648 
649 	if (unlikely(!aq_rx_fltr))
650 		return err;
651 
652 	INIT_HLIST_NODE(&aq_rx_fltr->aq_node);
653 
654 	if (parent)
655 		hlist_add_behind(&aq_rx_fltr->aq_node, &parent->aq_node);
656 	else
657 		hlist_add_head(&aq_rx_fltr->aq_node, &rx_fltrs->filter_list);
658 
659 	++rx_fltrs->active_filters;
660 
661 	return 0;
662 }
663 
664 u16 aq_get_rxnfc_count_all_rules(struct aq_nic_s *aq_nic)
665 {
666 	struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
667 
668 	return rx_fltrs->active_filters;
669 }
670 
671 struct aq_hw_rx_fltrs_s *aq_get_hw_rx_fltrs(struct aq_nic_s *aq_nic)
672 {
673 	return &aq_nic->aq_hw_rx_fltrs;
674 }
675 
676 int aq_add_rxnfc_rule(struct aq_nic_s *aq_nic, const struct ethtool_rxnfc *cmd)
677 {
678 	struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
679 	struct ethtool_rx_flow_spec *fsp =
680 		(struct ethtool_rx_flow_spec *)&cmd->fs;
681 	struct aq_rx_filter *aq_rx_fltr;
682 	int err = 0;
683 
684 	err = aq_check_rule(aq_nic, fsp);
685 	if (err)
686 		goto err_exit;
687 
688 	aq_rx_fltr = kzalloc(sizeof(*aq_rx_fltr), GFP_KERNEL);
689 	if (unlikely(!aq_rx_fltr)) {
690 		err = -ENOMEM;
691 		goto err_exit;
692 	}
693 
694 	memcpy(&aq_rx_fltr->aq_fsp, fsp, sizeof(*fsp));
695 
696 	err = aq_update_table_filters(aq_nic, aq_rx_fltr, fsp->location, NULL);
697 	if (unlikely(err))
698 		goto err_free;
699 
700 	err = aq_add_del_rule(aq_nic, aq_rx_fltr, true);
701 	if (unlikely(err)) {
702 		hlist_del(&aq_rx_fltr->aq_node);
703 		--rx_fltrs->active_filters;
704 		goto err_free;
705 	}
706 
707 	return 0;
708 
709 err_free:
710 	kfree(aq_rx_fltr);
711 err_exit:
712 	return err;
713 }
714 
715 int aq_del_rxnfc_rule(struct aq_nic_s *aq_nic, const struct ethtool_rxnfc *cmd)
716 {
717 	struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
718 	struct aq_rx_filter *rule = NULL;
719 	struct hlist_node *aq_node2;
720 	int err = -EINVAL;
721 
722 	hlist_for_each_entry_safe(rule, aq_node2,
723 				  &rx_fltrs->filter_list, aq_node) {
724 		if (rule->aq_fsp.location == cmd->fs.location)
725 			break;
726 	}
727 
728 	if (rule && rule->aq_fsp.location == cmd->fs.location) {
729 		err = aq_add_del_rule(aq_nic, rule, false);
730 		hlist_del(&rule->aq_node);
731 		kfree(rule);
732 		--rx_fltrs->active_filters;
733 	}
734 	return err;
735 }
736 
737 int aq_get_rxnfc_rule(struct aq_nic_s *aq_nic, struct ethtool_rxnfc *cmd)
738 {
739 	struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
740 	struct ethtool_rx_flow_spec *fsp =
741 			(struct ethtool_rx_flow_spec *)&cmd->fs;
742 	struct aq_rx_filter *rule = NULL;
743 	struct hlist_node *aq_node2;
744 
745 	hlist_for_each_entry_safe(rule, aq_node2,
746 				  &rx_fltrs->filter_list, aq_node)
747 		if (fsp->location <= rule->aq_fsp.location)
748 			break;
749 
750 	if (unlikely(!rule || fsp->location != rule->aq_fsp.location))
751 		return -EINVAL;
752 
753 	memcpy(fsp, &rule->aq_fsp, sizeof(*fsp));
754 
755 	return 0;
756 }
757 
758 int aq_get_rxnfc_all_rules(struct aq_nic_s *aq_nic, struct ethtool_rxnfc *cmd,
759 			   u32 *rule_locs)
760 {
761 	struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
762 	struct hlist_node *aq_node2;
763 	struct aq_rx_filter *rule;
764 	int count = 0;
765 
766 	cmd->data = aq_get_rxnfc_count_all_rules(aq_nic);
767 
768 	hlist_for_each_entry_safe(rule, aq_node2,
769 				  &rx_fltrs->filter_list, aq_node) {
770 		if (unlikely(count == cmd->rule_cnt))
771 			return -EMSGSIZE;
772 
773 		rule_locs[count++] = rule->aq_fsp.location;
774 	}
775 
776 	cmd->rule_cnt = count;
777 
778 	return 0;
779 }
780 
781 int aq_clear_rxnfc_all_rules(struct aq_nic_s *aq_nic)
782 {
783 	struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
784 	struct hlist_node *aq_node2;
785 	struct aq_rx_filter *rule;
786 	int err = 0;
787 
788 	hlist_for_each_entry_safe(rule, aq_node2,
789 				  &rx_fltrs->filter_list, aq_node) {
790 		err = aq_add_del_rule(aq_nic, rule, false);
791 		if (err)
792 			goto err_exit;
793 		hlist_del(&rule->aq_node);
794 		kfree(rule);
795 		--rx_fltrs->active_filters;
796 	}
797 
798 err_exit:
799 	return err;
800 }
801 
802 int aq_reapply_rxnfc_all_rules(struct aq_nic_s *aq_nic)
803 {
804 	struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
805 	struct hlist_node *aq_node2;
806 	struct aq_rx_filter *rule;
807 	int err = 0;
808 
809 	hlist_for_each_entry_safe(rule, aq_node2,
810 				  &rx_fltrs->filter_list, aq_node) {
811 		err = aq_add_del_rule(aq_nic, rule, true);
812 		if (err)
813 			goto err_exit;
814 	}
815 
816 err_exit:
817 	return err;
818 }
819 
820 int aq_filters_vlans_update(struct aq_nic_s *aq_nic)
821 {
822 	const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
823 	struct aq_hw_s *aq_hw = aq_nic->aq_hw;
824 	int hweight = 0;
825 	int err = 0;
826 	int i;
827 
828 	if (unlikely(!aq_hw_ops->hw_filter_vlan_set))
829 		return -EOPNOTSUPP;
830 	if (unlikely(!aq_hw_ops->hw_filter_vlan_ctrl))
831 		return -EOPNOTSUPP;
832 
833 	aq_fvlan_rebuild(aq_nic, aq_nic->active_vlans,
834 			 aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans);
835 
836 	if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) {
837 		for (i = 0; i < BITS_TO_LONGS(VLAN_N_VID); i++)
838 			hweight += hweight_long(aq_nic->active_vlans[i]);
839 
840 		err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw, false);
841 		if (err)
842 			return err;
843 	}
844 
845 	err = aq_hw_ops->hw_filter_vlan_set(aq_hw,
846 					    aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans
847 					   );
848 	if (err)
849 		return err;
850 
851 	if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) {
852 		if (hweight <= AQ_VLAN_MAX_FILTERS && hweight > 0) {
853 			err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw,
854 				!(aq_nic->packet_filter & IFF_PROMISC));
855 			aq_nic->aq_nic_cfg.is_vlan_force_promisc = false;
856 		} else {
857 		/* otherwise left in promiscue mode */
858 			aq_nic->aq_nic_cfg.is_vlan_force_promisc = true;
859 		}
860 	}
861 
862 	return err;
863 }
864 
865 int aq_filters_vlan_offload_off(struct aq_nic_s *aq_nic)
866 {
867 	const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
868 	struct aq_hw_s *aq_hw = aq_nic->aq_hw;
869 	int err = 0;
870 
871 	memset(aq_nic->active_vlans, 0, sizeof(aq_nic->active_vlans));
872 	aq_fvlan_rebuild(aq_nic, aq_nic->active_vlans,
873 			 aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans);
874 
875 	if (unlikely(!aq_hw_ops->hw_filter_vlan_set))
876 		return -EOPNOTSUPP;
877 	if (unlikely(!aq_hw_ops->hw_filter_vlan_ctrl))
878 		return -EOPNOTSUPP;
879 
880 	aq_nic->aq_nic_cfg.is_vlan_force_promisc = true;
881 	err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw, false);
882 	if (err)
883 		return err;
884 	err = aq_hw_ops->hw_filter_vlan_set(aq_hw,
885 					    aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans
886 					   );
887 	return err;
888 }
889