1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2018-2020, Intel Corporation. */
3
4 #include "ice.h"
5
6 /**
7 * ice_is_arfs_active - helper to check is aRFS is active
8 * @vsi: VSI to check
9 */
ice_is_arfs_active(struct ice_vsi * vsi)10 static bool ice_is_arfs_active(struct ice_vsi *vsi)
11 {
12 return !!vsi->arfs_fltr_list;
13 }
14
15 /**
16 * ice_is_arfs_using_perfect_flow - check if aRFS has active perfect filters
17 * @hw: pointer to the HW structure
18 * @flow_type: flow type as Flow Director understands it
19 *
20 * Flow Director will query this function to see if aRFS is currently using
21 * the specified flow_type for perfect (4-tuple) filters.
22 */
23 bool
ice_is_arfs_using_perfect_flow(struct ice_hw * hw,enum ice_fltr_ptype flow_type)24 ice_is_arfs_using_perfect_flow(struct ice_hw *hw, enum ice_fltr_ptype flow_type)
25 {
26 struct ice_arfs_active_fltr_cntrs *arfs_fltr_cntrs;
27 struct ice_pf *pf = hw->back;
28 struct ice_vsi *vsi;
29
30 vsi = ice_get_main_vsi(pf);
31 if (!vsi)
32 return false;
33
34 arfs_fltr_cntrs = vsi->arfs_fltr_cntrs;
35
36 /* active counters can be updated by multiple CPUs */
37 smp_mb__before_atomic();
38 switch (flow_type) {
39 case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
40 return atomic_read(&arfs_fltr_cntrs->active_udpv4_cnt) > 0;
41 case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
42 return atomic_read(&arfs_fltr_cntrs->active_udpv6_cnt) > 0;
43 case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
44 return atomic_read(&arfs_fltr_cntrs->active_tcpv4_cnt) > 0;
45 case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
46 return atomic_read(&arfs_fltr_cntrs->active_tcpv6_cnt) > 0;
47 default:
48 return false;
49 }
50 }
51
52 /**
53 * ice_arfs_update_active_fltr_cntrs - update active filter counters for aRFS
54 * @vsi: VSI that aRFS is active on
55 * @entry: aRFS entry used to change counters
56 * @add: true to increment counter, false to decrement
57 */
58 static void
ice_arfs_update_active_fltr_cntrs(struct ice_vsi * vsi,struct ice_arfs_entry * entry,bool add)59 ice_arfs_update_active_fltr_cntrs(struct ice_vsi *vsi,
60 struct ice_arfs_entry *entry, bool add)
61 {
62 struct ice_arfs_active_fltr_cntrs *fltr_cntrs = vsi->arfs_fltr_cntrs;
63
64 switch (entry->fltr_info.flow_type) {
65 case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
66 if (add)
67 atomic_inc(&fltr_cntrs->active_tcpv4_cnt);
68 else
69 atomic_dec(&fltr_cntrs->active_tcpv4_cnt);
70 break;
71 case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
72 if (add)
73 atomic_inc(&fltr_cntrs->active_tcpv6_cnt);
74 else
75 atomic_dec(&fltr_cntrs->active_tcpv6_cnt);
76 break;
77 case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
78 if (add)
79 atomic_inc(&fltr_cntrs->active_udpv4_cnt);
80 else
81 atomic_dec(&fltr_cntrs->active_udpv4_cnt);
82 break;
83 case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
84 if (add)
85 atomic_inc(&fltr_cntrs->active_udpv6_cnt);
86 else
87 atomic_dec(&fltr_cntrs->active_udpv6_cnt);
88 break;
89 default:
90 dev_err(ice_pf_to_dev(vsi->back), "aRFS: Failed to update filter counters, invalid filter type %d\n",
91 entry->fltr_info.flow_type);
92 }
93 }
94
95 /**
96 * ice_arfs_del_flow_rules - delete the rules passed in from HW
97 * @vsi: VSI for the flow rules that need to be deleted
98 * @del_list_head: head of the list of ice_arfs_entry(s) for rule deletion
99 *
100 * Loop through the delete list passed in and remove the rules from HW. After
101 * each rule is deleted, disconnect and free the ice_arfs_entry because it is no
102 * longer being referenced by the aRFS hash table.
103 */
104 static void
ice_arfs_del_flow_rules(struct ice_vsi * vsi,struct hlist_head * del_list_head)105 ice_arfs_del_flow_rules(struct ice_vsi *vsi, struct hlist_head *del_list_head)
106 {
107 struct ice_arfs_entry *e;
108 struct hlist_node *n;
109 struct device *dev;
110
111 dev = ice_pf_to_dev(vsi->back);
112
113 hlist_for_each_entry_safe(e, n, del_list_head, list_entry) {
114 int result;
115
116 result = ice_fdir_write_fltr(vsi->back, &e->fltr_info, false,
117 false);
118 if (!result)
119 ice_arfs_update_active_fltr_cntrs(vsi, e, false);
120 else
121 dev_dbg(dev, "Unable to delete aRFS entry, err %d fltr_state %d fltr_id %d flow_id %d Q %d\n",
122 result, e->fltr_state, e->fltr_info.fltr_id,
123 e->flow_id, e->fltr_info.q_index);
124
125 /* The aRFS hash table is no longer referencing this entry */
126 hlist_del(&e->list_entry);
127 devm_kfree(dev, e);
128 }
129 }
130
131 /**
132 * ice_arfs_add_flow_rules - add the rules passed in from HW
133 * @vsi: VSI for the flow rules that need to be added
134 * @add_list_head: head of the list of ice_arfs_entry_ptr(s) for rule addition
135 *
136 * Loop through the add list passed in and remove the rules from HW. After each
137 * rule is added, disconnect and free the ice_arfs_entry_ptr node. Don't free
138 * the ice_arfs_entry(s) because they are still being referenced in the aRFS
139 * hash table.
140 */
141 static void
ice_arfs_add_flow_rules(struct ice_vsi * vsi,struct hlist_head * add_list_head)142 ice_arfs_add_flow_rules(struct ice_vsi *vsi, struct hlist_head *add_list_head)
143 {
144 struct ice_arfs_entry_ptr *ep;
145 struct hlist_node *n;
146 struct device *dev;
147
148 dev = ice_pf_to_dev(vsi->back);
149
150 hlist_for_each_entry_safe(ep, n, add_list_head, list_entry) {
151 int result;
152
153 result = ice_fdir_write_fltr(vsi->back,
154 &ep->arfs_entry->fltr_info, true,
155 false);
156 if (!result)
157 ice_arfs_update_active_fltr_cntrs(vsi, ep->arfs_entry,
158 true);
159 else
160 dev_dbg(dev, "Unable to add aRFS entry, err %d fltr_state %d fltr_id %d flow_id %d Q %d\n",
161 result, ep->arfs_entry->fltr_state,
162 ep->arfs_entry->fltr_info.fltr_id,
163 ep->arfs_entry->flow_id,
164 ep->arfs_entry->fltr_info.q_index);
165
166 hlist_del(&ep->list_entry);
167 devm_kfree(dev, ep);
168 }
169 }
170
171 /**
172 * ice_arfs_is_flow_expired - check if the aRFS entry has expired
173 * @vsi: VSI containing the aRFS entry
174 * @arfs_entry: aRFS entry that's being checked for expiration
175 *
176 * Return true if the flow has expired, else false. This function should be used
177 * to determine whether or not an aRFS entry should be removed from the hardware
178 * and software structures.
179 */
180 static bool
ice_arfs_is_flow_expired(struct ice_vsi * vsi,struct ice_arfs_entry * arfs_entry)181 ice_arfs_is_flow_expired(struct ice_vsi *vsi, struct ice_arfs_entry *arfs_entry)
182 {
183 #define ICE_ARFS_TIME_DELTA_EXPIRATION msecs_to_jiffies(5000)
184 if (rps_may_expire_flow(vsi->netdev, arfs_entry->fltr_info.q_index,
185 arfs_entry->flow_id,
186 arfs_entry->fltr_info.fltr_id))
187 return true;
188
189 /* expiration timer only used for UDP filters */
190 if (arfs_entry->fltr_info.flow_type != ICE_FLTR_PTYPE_NONF_IPV4_UDP &&
191 arfs_entry->fltr_info.flow_type != ICE_FLTR_PTYPE_NONF_IPV6_UDP)
192 return false;
193
194 return time_in_range64(arfs_entry->time_activated +
195 ICE_ARFS_TIME_DELTA_EXPIRATION,
196 arfs_entry->time_activated, get_jiffies_64());
197 }
198
199 /**
200 * ice_arfs_update_flow_rules - add/delete aRFS rules in HW
201 * @vsi: the VSI to be forwarded to
202 * @idx: index into the table of aRFS filter lists. Obtained from skb->hash
203 * @add_list: list to populate with filters to be added to Flow Director
204 * @del_list: list to populate with filters to be deleted from Flow Director
205 *
206 * Iterate over the hlist at the index given in the aRFS hash table and
207 * determine if there are any aRFS entries that need to be either added or
208 * deleted in the HW. If the aRFS entry is marked as ICE_ARFS_INACTIVE the
209 * filter needs to be added to HW, else if it's marked as ICE_ARFS_ACTIVE and
210 * the flow has expired delete the filter from HW. The caller of this function
211 * is expected to add/delete rules on the add_list/del_list respectively.
212 */
213 static void
ice_arfs_update_flow_rules(struct ice_vsi * vsi,u16 idx,struct hlist_head * add_list,struct hlist_head * del_list)214 ice_arfs_update_flow_rules(struct ice_vsi *vsi, u16 idx,
215 struct hlist_head *add_list,
216 struct hlist_head *del_list)
217 {
218 struct ice_arfs_entry *e;
219 struct hlist_node *n;
220 struct device *dev;
221
222 dev = ice_pf_to_dev(vsi->back);
223
224 /* go through the aRFS hlist at this idx and check for needed updates */
225 hlist_for_each_entry_safe(e, n, &vsi->arfs_fltr_list[idx], list_entry)
226 /* check if filter needs to be added to HW */
227 if (e->fltr_state == ICE_ARFS_INACTIVE) {
228 enum ice_fltr_ptype flow_type = e->fltr_info.flow_type;
229 struct ice_arfs_entry_ptr *ep =
230 devm_kzalloc(dev, sizeof(*ep), GFP_ATOMIC);
231
232 if (!ep)
233 continue;
234 INIT_HLIST_NODE(&ep->list_entry);
235 /* reference aRFS entry to add HW filter */
236 ep->arfs_entry = e;
237 hlist_add_head(&ep->list_entry, add_list);
238 e->fltr_state = ICE_ARFS_ACTIVE;
239 /* expiration timer only used for UDP flows */
240 if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||
241 flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP)
242 e->time_activated = get_jiffies_64();
243 } else if (e->fltr_state == ICE_ARFS_ACTIVE) {
244 /* check if filter needs to be removed from HW */
245 if (ice_arfs_is_flow_expired(vsi, e)) {
246 /* remove aRFS entry from hash table for delete
247 * and to prevent referencing it the next time
248 * through this hlist index
249 */
250 hlist_del(&e->list_entry);
251 e->fltr_state = ICE_ARFS_TODEL;
252 /* save reference to aRFS entry for delete */
253 hlist_add_head(&e->list_entry, del_list);
254 }
255 }
256 }
257
258 /**
259 * ice_sync_arfs_fltrs - update all aRFS filters
260 * @pf: board private structure
261 */
ice_sync_arfs_fltrs(struct ice_pf * pf)262 void ice_sync_arfs_fltrs(struct ice_pf *pf)
263 {
264 HLIST_HEAD(tmp_del_list);
265 HLIST_HEAD(tmp_add_list);
266 struct ice_vsi *pf_vsi;
267 unsigned int i;
268
269 pf_vsi = ice_get_main_vsi(pf);
270 if (!pf_vsi)
271 return;
272
273 if (!ice_is_arfs_active(pf_vsi))
274 return;
275
276 spin_lock_bh(&pf_vsi->arfs_lock);
277 /* Once we process aRFS for the PF VSI get out */
278 for (i = 0; i < ICE_MAX_ARFS_LIST; i++)
279 ice_arfs_update_flow_rules(pf_vsi, i, &tmp_add_list,
280 &tmp_del_list);
281 spin_unlock_bh(&pf_vsi->arfs_lock);
282
283 /* use list of ice_arfs_entry(s) for delete */
284 ice_arfs_del_flow_rules(pf_vsi, &tmp_del_list);
285
286 /* use list of ice_arfs_entry_ptr(s) for add */
287 ice_arfs_add_flow_rules(pf_vsi, &tmp_add_list);
288 }
289
290 /**
291 * ice_arfs_build_entry - builds an aRFS entry based on input
292 * @vsi: destination VSI for this flow
293 * @fk: flow dissector keys for creating the tuple
294 * @rxq_idx: Rx queue to steer this flow to
295 * @flow_id: passed down from the stack and saved for flow expiration
296 *
297 * returns an aRFS entry on success and NULL on failure
298 */
299 static struct ice_arfs_entry *
ice_arfs_build_entry(struct ice_vsi * vsi,const struct flow_keys * fk,u16 rxq_idx,u32 flow_id)300 ice_arfs_build_entry(struct ice_vsi *vsi, const struct flow_keys *fk,
301 u16 rxq_idx, u32 flow_id)
302 {
303 struct ice_arfs_entry *arfs_entry;
304 struct ice_fdir_fltr *fltr_info;
305 u8 ip_proto;
306
307 arfs_entry = devm_kzalloc(ice_pf_to_dev(vsi->back),
308 sizeof(*arfs_entry),
309 GFP_ATOMIC | __GFP_NOWARN);
310 if (!arfs_entry)
311 return NULL;
312
313 fltr_info = &arfs_entry->fltr_info;
314 fltr_info->q_index = rxq_idx;
315 fltr_info->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
316 fltr_info->dest_vsi = vsi->idx;
317 ip_proto = fk->basic.ip_proto;
318
319 if (fk->basic.n_proto == htons(ETH_P_IP)) {
320 fltr_info->ip.v4.proto = ip_proto;
321 fltr_info->flow_type = (ip_proto == IPPROTO_TCP) ?
322 ICE_FLTR_PTYPE_NONF_IPV4_TCP :
323 ICE_FLTR_PTYPE_NONF_IPV4_UDP;
324 fltr_info->ip.v4.src_ip = fk->addrs.v4addrs.src;
325 fltr_info->ip.v4.dst_ip = fk->addrs.v4addrs.dst;
326 fltr_info->ip.v4.src_port = fk->ports.src;
327 fltr_info->ip.v4.dst_port = fk->ports.dst;
328 } else { /* ETH_P_IPV6 */
329 fltr_info->ip.v6.proto = ip_proto;
330 fltr_info->flow_type = (ip_proto == IPPROTO_TCP) ?
331 ICE_FLTR_PTYPE_NONF_IPV6_TCP :
332 ICE_FLTR_PTYPE_NONF_IPV6_UDP;
333 memcpy(&fltr_info->ip.v6.src_ip, &fk->addrs.v6addrs.src,
334 sizeof(struct in6_addr));
335 memcpy(&fltr_info->ip.v6.dst_ip, &fk->addrs.v6addrs.dst,
336 sizeof(struct in6_addr));
337 fltr_info->ip.v6.src_port = fk->ports.src;
338 fltr_info->ip.v6.dst_port = fk->ports.dst;
339 }
340
341 arfs_entry->flow_id = flow_id;
342 fltr_info->fltr_id =
343 atomic_inc_return(vsi->arfs_last_fltr_id) % RPS_NO_FILTER;
344
345 return arfs_entry;
346 }
347
348 /**
349 * ice_arfs_is_perfect_flow_set - Check to see if perfect flow is set
350 * @hw: pointer to HW structure
351 * @l3_proto: ETH_P_IP or ETH_P_IPV6 in network order
352 * @l4_proto: IPPROTO_UDP or IPPROTO_TCP
353 *
354 * We only support perfect (4-tuple) filters for aRFS. This function allows aRFS
355 * to check if perfect (4-tuple) flow rules are currently in place by Flow
356 * Director.
357 */
358 static bool
ice_arfs_is_perfect_flow_set(struct ice_hw * hw,__be16 l3_proto,u8 l4_proto)359 ice_arfs_is_perfect_flow_set(struct ice_hw *hw, __be16 l3_proto, u8 l4_proto)
360 {
361 unsigned long *perfect_fltr = hw->fdir_perfect_fltr;
362
363 /* advanced Flow Director disabled, perfect filters always supported */
364 if (!perfect_fltr)
365 return true;
366
367 if (l3_proto == htons(ETH_P_IP) && l4_proto == IPPROTO_UDP)
368 return test_bit(ICE_FLTR_PTYPE_NONF_IPV4_UDP, perfect_fltr);
369 else if (l3_proto == htons(ETH_P_IP) && l4_proto == IPPROTO_TCP)
370 return test_bit(ICE_FLTR_PTYPE_NONF_IPV4_TCP, perfect_fltr);
371 else if (l3_proto == htons(ETH_P_IPV6) && l4_proto == IPPROTO_UDP)
372 return test_bit(ICE_FLTR_PTYPE_NONF_IPV6_UDP, perfect_fltr);
373 else if (l3_proto == htons(ETH_P_IPV6) && l4_proto == IPPROTO_TCP)
374 return test_bit(ICE_FLTR_PTYPE_NONF_IPV6_TCP, perfect_fltr);
375
376 return false;
377 }
378
379 /**
380 * ice_arfs_cmp - Check if aRFS filter matches this flow.
381 * @fltr_info: filter info of the saved ARFS entry.
382 * @fk: flow dissector keys.
383 * @n_proto: One of htons(ETH_P_IP) or htons(ETH_P_IPV6).
384 * @ip_proto: One of IPPROTO_TCP or IPPROTO_UDP.
385 *
386 * Since this function assumes limited values for n_proto and ip_proto, it
387 * is meant to be called only from ice_rx_flow_steer().
388 *
389 * Return:
390 * * true - fltr_info refers to the same flow as fk.
391 * * false - fltr_info and fk refer to different flows.
392 */
393 static bool
ice_arfs_cmp(const struct ice_fdir_fltr * fltr_info,const struct flow_keys * fk,__be16 n_proto,u8 ip_proto)394 ice_arfs_cmp(const struct ice_fdir_fltr *fltr_info, const struct flow_keys *fk,
395 __be16 n_proto, u8 ip_proto)
396 {
397 /* Determine if the filter is for IPv4 or IPv6 based on flow_type,
398 * which is one of ICE_FLTR_PTYPE_NONF_IPV{4,6}_{TCP,UDP}.
399 */
400 bool is_v4 = fltr_info->flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP ||
401 fltr_info->flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP;
402
403 /* Following checks are arranged in the quickest and most discriminative
404 * fields first for early failure.
405 */
406 if (is_v4)
407 return n_proto == htons(ETH_P_IP) &&
408 fltr_info->ip.v4.src_port == fk->ports.src &&
409 fltr_info->ip.v4.dst_port == fk->ports.dst &&
410 fltr_info->ip.v4.src_ip == fk->addrs.v4addrs.src &&
411 fltr_info->ip.v4.dst_ip == fk->addrs.v4addrs.dst &&
412 fltr_info->ip.v4.proto == ip_proto;
413
414 return fltr_info->ip.v6.src_port == fk->ports.src &&
415 fltr_info->ip.v6.dst_port == fk->ports.dst &&
416 fltr_info->ip.v6.proto == ip_proto &&
417 !memcmp(&fltr_info->ip.v6.src_ip, &fk->addrs.v6addrs.src,
418 sizeof(struct in6_addr)) &&
419 !memcmp(&fltr_info->ip.v6.dst_ip, &fk->addrs.v6addrs.dst,
420 sizeof(struct in6_addr));
421 }
422
423 /**
424 * ice_rx_flow_steer - steer the Rx flow to where application is being run
425 * @netdev: ptr to the netdev being adjusted
426 * @skb: buffer with required header information
427 * @rxq_idx: queue to which the flow needs to move
428 * @flow_id: flow identifier provided by the netdev
429 *
430 * Based on the skb, rxq_idx, and flow_id passed in add/update an entry in the
431 * aRFS hash table. Iterate over one of the hlists in the aRFS hash table and
432 * if the flow_id already exists in the hash table but the rxq_idx has changed
433 * mark the entry as ICE_ARFS_INACTIVE so it can get updated in HW, else
434 * if the entry is marked as ICE_ARFS_TODEL delete it from the aRFS hash table.
435 * If neither of the previous conditions are true then add a new entry in the
436 * aRFS hash table, which gets set to ICE_ARFS_INACTIVE by default so it can be
437 * added to HW.
438 */
439 int
ice_rx_flow_steer(struct net_device * netdev,const struct sk_buff * skb,u16 rxq_idx,u32 flow_id)440 ice_rx_flow_steer(struct net_device *netdev, const struct sk_buff *skb,
441 u16 rxq_idx, u32 flow_id)
442 {
443 struct ice_netdev_priv *np = netdev_priv(netdev);
444 struct ice_arfs_entry *arfs_entry;
445 struct ice_vsi *vsi = np->vsi;
446 struct flow_keys fk;
447 struct ice_pf *pf;
448 __be16 n_proto;
449 u8 ip_proto;
450 u16 idx;
451 int ret;
452
453 /* failed to allocate memory for aRFS so don't crash */
454 if (unlikely(!vsi->arfs_fltr_list))
455 return -ENODEV;
456
457 pf = vsi->back;
458
459 if (skb->encapsulation)
460 return -EPROTONOSUPPORT;
461
462 if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
463 return -EPROTONOSUPPORT;
464
465 n_proto = fk.basic.n_proto;
466 /* Support only IPV4 and IPV6 */
467 if ((n_proto == htons(ETH_P_IP) && !ip_is_fragment(ip_hdr(skb))) ||
468 n_proto == htons(ETH_P_IPV6))
469 ip_proto = fk.basic.ip_proto;
470 else
471 return -EPROTONOSUPPORT;
472
473 /* Support only TCP and UDP */
474 if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP)
475 return -EPROTONOSUPPORT;
476
477 /* only support 4-tuple filters for aRFS */
478 if (!ice_arfs_is_perfect_flow_set(&pf->hw, n_proto, ip_proto))
479 return -EOPNOTSUPP;
480
481 /* choose the aRFS list bucket based on skb hash */
482 idx = skb_get_hash_raw(skb) & ICE_ARFS_LST_MASK;
483 /* search for entry in the bucket */
484 spin_lock_bh(&vsi->arfs_lock);
485 hlist_for_each_entry(arfs_entry, &vsi->arfs_fltr_list[idx],
486 list_entry) {
487 struct ice_fdir_fltr *fltr_info;
488
489 /* keep searching for the already existing arfs_entry flow */
490 if (arfs_entry->flow_id != flow_id)
491 continue;
492
493 fltr_info = &arfs_entry->fltr_info;
494
495 if (!ice_arfs_cmp(fltr_info, &fk, n_proto, ip_proto))
496 continue;
497
498 ret = fltr_info->fltr_id;
499
500 if (fltr_info->q_index == rxq_idx ||
501 arfs_entry->fltr_state != ICE_ARFS_ACTIVE)
502 goto out;
503
504 /* update the queue to forward to on an already existing flow */
505 fltr_info->q_index = rxq_idx;
506 arfs_entry->fltr_state = ICE_ARFS_INACTIVE;
507 ice_arfs_update_active_fltr_cntrs(vsi, arfs_entry, false);
508 goto out_schedule_service_task;
509 }
510
511 arfs_entry = ice_arfs_build_entry(vsi, &fk, rxq_idx, flow_id);
512 if (!arfs_entry) {
513 ret = -ENOMEM;
514 goto out;
515 }
516
517 ret = arfs_entry->fltr_info.fltr_id;
518 INIT_HLIST_NODE(&arfs_entry->list_entry);
519 hlist_add_head(&arfs_entry->list_entry, &vsi->arfs_fltr_list[idx]);
520 out_schedule_service_task:
521 ice_service_task_schedule(pf);
522 out:
523 spin_unlock_bh(&vsi->arfs_lock);
524 return ret;
525 }
526
527 /**
528 * ice_init_arfs_cntrs - initialize aRFS counter values
529 * @vsi: VSI that aRFS counters need to be initialized on
530 */
ice_init_arfs_cntrs(struct ice_vsi * vsi)531 static int ice_init_arfs_cntrs(struct ice_vsi *vsi)
532 {
533 if (!vsi || vsi->type != ICE_VSI_PF)
534 return -EINVAL;
535
536 vsi->arfs_fltr_cntrs = kzalloc(sizeof(*vsi->arfs_fltr_cntrs),
537 GFP_KERNEL);
538 if (!vsi->arfs_fltr_cntrs)
539 return -ENOMEM;
540
541 vsi->arfs_last_fltr_id = kzalloc(sizeof(*vsi->arfs_last_fltr_id),
542 GFP_KERNEL);
543 if (!vsi->arfs_last_fltr_id) {
544 kfree(vsi->arfs_fltr_cntrs);
545 vsi->arfs_fltr_cntrs = NULL;
546 return -ENOMEM;
547 }
548
549 return 0;
550 }
551
552 /**
553 * ice_init_arfs - initialize aRFS resources
554 * @vsi: the VSI to be forwarded to
555 */
ice_init_arfs(struct ice_vsi * vsi)556 void ice_init_arfs(struct ice_vsi *vsi)
557 {
558 struct hlist_head *arfs_fltr_list;
559 unsigned int i;
560
561 if (!vsi || vsi->type != ICE_VSI_PF || ice_is_arfs_active(vsi))
562 return;
563
564 arfs_fltr_list = kcalloc(ICE_MAX_ARFS_LIST, sizeof(*arfs_fltr_list),
565 GFP_KERNEL);
566 if (!arfs_fltr_list)
567 return;
568
569 if (ice_init_arfs_cntrs(vsi))
570 goto free_arfs_fltr_list;
571
572 for (i = 0; i < ICE_MAX_ARFS_LIST; i++)
573 INIT_HLIST_HEAD(&arfs_fltr_list[i]);
574
575 spin_lock_init(&vsi->arfs_lock);
576
577 vsi->arfs_fltr_list = arfs_fltr_list;
578
579 return;
580
581 free_arfs_fltr_list:
582 kfree(arfs_fltr_list);
583 }
584
585 /**
586 * ice_clear_arfs - clear the aRFS hash table and any memory used for aRFS
587 * @vsi: the VSI to be forwarded to
588 */
ice_clear_arfs(struct ice_vsi * vsi)589 void ice_clear_arfs(struct ice_vsi *vsi)
590 {
591 struct device *dev;
592 unsigned int i;
593
594 if (!vsi || vsi->type != ICE_VSI_PF || !vsi->back ||
595 !vsi->arfs_fltr_list)
596 return;
597
598 dev = ice_pf_to_dev(vsi->back);
599 for (i = 0; i < ICE_MAX_ARFS_LIST; i++) {
600 struct ice_arfs_entry *r;
601 struct hlist_node *n;
602
603 spin_lock_bh(&vsi->arfs_lock);
604 hlist_for_each_entry_safe(r, n, &vsi->arfs_fltr_list[i],
605 list_entry) {
606 hlist_del(&r->list_entry);
607 devm_kfree(dev, r);
608 }
609 spin_unlock_bh(&vsi->arfs_lock);
610 }
611
612 kfree(vsi->arfs_fltr_list);
613 vsi->arfs_fltr_list = NULL;
614 kfree(vsi->arfs_last_fltr_id);
615 vsi->arfs_last_fltr_id = NULL;
616 kfree(vsi->arfs_fltr_cntrs);
617 vsi->arfs_fltr_cntrs = NULL;
618 }
619
620 /**
621 * ice_free_cpu_rx_rmap - free setup CPU reverse map
622 * @vsi: the VSI to be forwarded to
623 */
ice_free_cpu_rx_rmap(struct ice_vsi * vsi)624 void ice_free_cpu_rx_rmap(struct ice_vsi *vsi)
625 {
626 struct net_device *netdev;
627
628 if (!vsi || vsi->type != ICE_VSI_PF)
629 return;
630
631 netdev = vsi->netdev;
632 if (!netdev || !netdev->rx_cpu_rmap)
633 return;
634
635 free_irq_cpu_rmap(netdev->rx_cpu_rmap);
636 netdev->rx_cpu_rmap = NULL;
637 }
638
639 /**
640 * ice_set_cpu_rx_rmap - setup CPU reverse map for each queue
641 * @vsi: the VSI to be forwarded to
642 */
ice_set_cpu_rx_rmap(struct ice_vsi * vsi)643 int ice_set_cpu_rx_rmap(struct ice_vsi *vsi)
644 {
645 struct net_device *netdev;
646 struct ice_pf *pf;
647 int i;
648
649 if (!vsi || vsi->type != ICE_VSI_PF)
650 return 0;
651
652 pf = vsi->back;
653 netdev = vsi->netdev;
654 if (!pf || !netdev || !vsi->num_q_vectors)
655 return -EINVAL;
656
657 netdev_dbg(netdev, "Setup CPU RMAP: vsi type 0x%x, ifname %s, q_vectors %d\n",
658 vsi->type, netdev->name, vsi->num_q_vectors);
659
660 netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(vsi->num_q_vectors);
661 if (unlikely(!netdev->rx_cpu_rmap))
662 return -EINVAL;
663
664 ice_for_each_q_vector(vsi, i)
665 if (irq_cpu_rmap_add(netdev->rx_cpu_rmap,
666 vsi->q_vectors[i]->irq.virq)) {
667 ice_free_cpu_rx_rmap(vsi);
668 return -EINVAL;
669 }
670
671 return 0;
672 }
673
674 /**
675 * ice_remove_arfs - remove/clear all aRFS resources
676 * @pf: device private structure
677 */
ice_remove_arfs(struct ice_pf * pf)678 void ice_remove_arfs(struct ice_pf *pf)
679 {
680 struct ice_vsi *pf_vsi;
681
682 pf_vsi = ice_get_main_vsi(pf);
683 if (!pf_vsi)
684 return;
685
686 ice_clear_arfs(pf_vsi);
687 }
688
689 /**
690 * ice_rebuild_arfs - remove/clear all aRFS resources and rebuild after reset
691 * @pf: device private structure
692 */
ice_rebuild_arfs(struct ice_pf * pf)693 void ice_rebuild_arfs(struct ice_pf *pf)
694 {
695 struct ice_vsi *pf_vsi;
696
697 pf_vsi = ice_get_main_vsi(pf);
698 if (!pf_vsi)
699 return;
700
701 ice_remove_arfs(pf);
702 ice_init_arfs(pf_vsi);
703 }
704