143a335e0SAmir Vadai /*
243a335e0SAmir Vadai  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
343a335e0SAmir Vadai  *
443a335e0SAmir Vadai  * This software is available to you under a choice of one of two
543a335e0SAmir Vadai  * licenses.  You may choose to be licensed under the terms of the GNU
643a335e0SAmir Vadai  * General Public License (GPL) Version 2, available from the file
743a335e0SAmir Vadai  * COPYING in the main directory of this source tree, or the
843a335e0SAmir Vadai  * OpenIB.org BSD license below:
943a335e0SAmir Vadai  *
1043a335e0SAmir Vadai  *     Redistribution and use in source and binary forms, with or
1143a335e0SAmir Vadai  *     without modification, are permitted provided that the following
1243a335e0SAmir Vadai  *     conditions are met:
1343a335e0SAmir Vadai  *
1443a335e0SAmir Vadai  *      - Redistributions of source code must retain the above
1543a335e0SAmir Vadai  *        copyright notice, this list of conditions and the following
1643a335e0SAmir Vadai  *        disclaimer.
1743a335e0SAmir Vadai  *
1843a335e0SAmir Vadai  *      - Redistributions in binary form must reproduce the above
1943a335e0SAmir Vadai  *        copyright notice, this list of conditions and the following
2043a335e0SAmir Vadai  *        disclaimer in the documentation and/or other materials
2143a335e0SAmir Vadai  *        provided with the distribution.
2243a335e0SAmir Vadai  *
2343a335e0SAmir Vadai  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
2443a335e0SAmir Vadai  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
2543a335e0SAmir Vadai  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
2643a335e0SAmir Vadai  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
2743a335e0SAmir Vadai  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
2843a335e0SAmir Vadai  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
2943a335e0SAmir Vadai  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
3043a335e0SAmir Vadai  * SOFTWARE.
3143a335e0SAmir Vadai  */
3243a335e0SAmir Vadai 
3343a335e0SAmir Vadai #include <linux/mlx5/driver.h>
3443a335e0SAmir Vadai #include <linux/mlx5/fs.h>
3529cc6679SAmir Vadai #include <linux/rbtree.h>
3643a335e0SAmir Vadai #include "mlx5_core.h"
3743a335e0SAmir Vadai #include "fs_core.h"
3843a335e0SAmir Vadai #include "fs_cmd.h"
3943a335e0SAmir Vadai 
4043a335e0SAmir Vadai #define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000)
41a8ffcc74SRabie Loulou /* Max number of counters to query in bulk read is 32K */
42a8ffcc74SRabie Loulou #define MLX5_SW_MAX_COUNTERS_BULK BIT(15)
4343a335e0SAmir Vadai 
447300375fSSaeed Mahameed struct mlx5_fc_cache {
457300375fSSaeed Mahameed 	u64 packets;
467300375fSSaeed Mahameed 	u64 bytes;
477300375fSSaeed Mahameed 	u64 lastuse;
487300375fSSaeed Mahameed };
497300375fSSaeed Mahameed 
507300375fSSaeed Mahameed struct mlx5_fc {
517300375fSSaeed Mahameed 	struct list_head list;
527300375fSSaeed Mahameed 	struct llist_node addlist;
537300375fSSaeed Mahameed 	struct llist_node dellist;
547300375fSSaeed Mahameed 
557300375fSSaeed Mahameed 	/* last{packets,bytes} members are used when calculating the delta since
567300375fSSaeed Mahameed 	 * last reading
577300375fSSaeed Mahameed 	 */
587300375fSSaeed Mahameed 	u64 lastpackets;
597300375fSSaeed Mahameed 	u64 lastbytes;
607300375fSSaeed Mahameed 
617300375fSSaeed Mahameed 	u32 id;
627300375fSSaeed Mahameed 	bool aging;
637300375fSSaeed Mahameed 
647300375fSSaeed Mahameed 	struct mlx5_fc_cache cache ____cacheline_aligned_in_smp;
657300375fSSaeed Mahameed };
667300375fSSaeed Mahameed 
6743a335e0SAmir Vadai /* locking scheme:
6843a335e0SAmir Vadai  *
6943a335e0SAmir Vadai  * It is the responsibility of the user to prevent concurrent calls or bad
7043a335e0SAmir Vadai  * ordering to mlx5_fc_create(), mlx5_fc_destroy() and accessing a reference
7143a335e0SAmir Vadai  * to struct mlx5_fc.
7243a335e0SAmir Vadai  * e.g en_tc.c is protected by RTNL lock of its caller, and will never call a
7343a335e0SAmir Vadai  * dump (access to struct mlx5_fc) after a counter is destroyed.
7443a335e0SAmir Vadai  *
7543a335e0SAmir Vadai  * access to counter list:
7643a335e0SAmir Vadai  * - create (user context)
7743a335e0SAmir Vadai  *   - mlx5_fc_create() only adds to an addlist to be used by
7883033688SVlad Buslov  *     mlx5_fc_stats_query_work(). addlist is a lockless single linked list
7983033688SVlad Buslov  *     that doesn't require any additional synchronization when adding single
8083033688SVlad Buslov  *     node.
8143a335e0SAmir Vadai  *   - spawn thread to do the actual destroy
8243a335e0SAmir Vadai  *
8343a335e0SAmir Vadai  * - destroy (user context)
846e5e2283SVlad Buslov  *   - add a counter to lockless dellist
8543a335e0SAmir Vadai  *   - spawn thread to do the actual del
8643a335e0SAmir Vadai  *
8743a335e0SAmir Vadai  * - dump (user context)
8843a335e0SAmir Vadai  *   user should not call dump after destroy
8943a335e0SAmir Vadai  *
9043a335e0SAmir Vadai  * - query (single thread workqueue context)
9143a335e0SAmir Vadai  *   destroy/dump - no conflict (see destroy)
9243a335e0SAmir Vadai  *   query/dump - packets and bytes might be inconsistent (since update is not
9343a335e0SAmir Vadai  *                atomic)
9443a335e0SAmir Vadai  *   query/create - no conflict (see create)
9543a335e0SAmir Vadai  *   since every create/destroy spawn the work, only after necessary time has
9643a335e0SAmir Vadai  *   elapsed, the thread will actually query the hardware.
9743a335e0SAmir Vadai  */
9843a335e0SAmir Vadai 
999aff93d7SVlad Buslov static struct list_head *mlx5_fc_counters_lookup_next(struct mlx5_core_dev *dev,
1009aff93d7SVlad Buslov 						      u32 id)
10129cc6679SAmir Vadai {
1029aff93d7SVlad Buslov 	struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
10312d6066cSVlad Buslov 	unsigned long next_id = (unsigned long)id + 1;
1049aff93d7SVlad Buslov 	struct mlx5_fc *counter;
10529cc6679SAmir Vadai 
10612d6066cSVlad Buslov 	rcu_read_lock();
10712d6066cSVlad Buslov 	/* skip counters that are in idr, but not yet in counters list */
10812d6066cSVlad Buslov 	while ((counter = idr_get_next_ul(&fc_stats->counters_idr,
10912d6066cSVlad Buslov 					  &next_id)) != NULL &&
11012d6066cSVlad Buslov 	       list_empty(&counter->list))
11112d6066cSVlad Buslov 		next_id++;
11212d6066cSVlad Buslov 	rcu_read_unlock();
11329cc6679SAmir Vadai 
11412d6066cSVlad Buslov 	return counter ? &counter->list : &fc_stats->counters;
11529cc6679SAmir Vadai }
11629cc6679SAmir Vadai 
1179aff93d7SVlad Buslov static void mlx5_fc_stats_insert(struct mlx5_core_dev *dev,
1189aff93d7SVlad Buslov 				 struct mlx5_fc *counter)
1199aff93d7SVlad Buslov {
1209aff93d7SVlad Buslov 	struct list_head *next = mlx5_fc_counters_lookup_next(dev, counter->id);
1219aff93d7SVlad Buslov 
1229aff93d7SVlad Buslov 	list_add_tail(&counter->list, next);
12329cc6679SAmir Vadai }
12429cc6679SAmir Vadai 
1252a4c4298SVlad Buslov static void mlx5_fc_stats_remove(struct mlx5_core_dev *dev,
1262a4c4298SVlad Buslov 				 struct mlx5_fc *counter)
1272a4c4298SVlad Buslov {
1282a4c4298SVlad Buslov 	struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
1292a4c4298SVlad Buslov 
1302a4c4298SVlad Buslov 	list_del(&counter->list);
1312a4c4298SVlad Buslov 
1322a4c4298SVlad Buslov 	spin_lock(&fc_stats->counters_idr_lock);
1332a4c4298SVlad Buslov 	WARN_ON(!idr_remove(&fc_stats->counters_idr, counter->id));
1342a4c4298SVlad Buslov 	spin_unlock(&fc_stats->counters_idr_lock);
1352a4c4298SVlad Buslov }
1362a4c4298SVlad Buslov 
1379aff93d7SVlad Buslov /* The function returns the last counter that was queried so the caller
138a8ffcc74SRabie Loulou  * function can continue calling it till all counters are queried.
139a8ffcc74SRabie Loulou  */
1409aff93d7SVlad Buslov static struct mlx5_fc *mlx5_fc_stats_query(struct mlx5_core_dev *dev,
141a351a1b0SAmir Vadai 					   struct mlx5_fc *first,
142a8ffcc74SRabie Loulou 					   u32 last_id)
143a351a1b0SAmir Vadai {
1449aff93d7SVlad Buslov 	struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
1459aff93d7SVlad Buslov 	struct mlx5_fc *counter = NULL;
146a351a1b0SAmir Vadai 	struct mlx5_cmd_fc_bulk *b;
1479aff93d7SVlad Buslov 	bool more = false;
148a8ffcc74SRabie Loulou 	u32 afirst_id;
149a351a1b0SAmir Vadai 	int num;
150a351a1b0SAmir Vadai 	int err;
151a8ffcc74SRabie Loulou 
152a8ffcc74SRabie Loulou 	int max_bulk = min_t(int, MLX5_SW_MAX_COUNTERS_BULK,
153a8ffcc74SRabie Loulou 			     (1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk)));
154a351a1b0SAmir Vadai 
155a351a1b0SAmir Vadai 	/* first id must be aligned to 4 when using bulk query */
156a351a1b0SAmir Vadai 	afirst_id = first->id & ~0x3;
157a351a1b0SAmir Vadai 
158a351a1b0SAmir Vadai 	/* number of counters to query inc. the last counter */
159a351a1b0SAmir Vadai 	num = ALIGN(last_id - afirst_id + 1, 4);
160a351a1b0SAmir Vadai 	if (num > max_bulk) {
161a351a1b0SAmir Vadai 		num = max_bulk;
162a351a1b0SAmir Vadai 		last_id = afirst_id + num - 1;
163a351a1b0SAmir Vadai 	}
164a351a1b0SAmir Vadai 
165a351a1b0SAmir Vadai 	b = mlx5_cmd_fc_bulk_alloc(dev, afirst_id, num);
166a351a1b0SAmir Vadai 	if (!b) {
167a351a1b0SAmir Vadai 		mlx5_core_err(dev, "Error allocating resources for bulk query\n");
168a351a1b0SAmir Vadai 		return NULL;
169a351a1b0SAmir Vadai 	}
170a351a1b0SAmir Vadai 
171a351a1b0SAmir Vadai 	err = mlx5_cmd_fc_bulk_query(dev, b);
172a351a1b0SAmir Vadai 	if (err) {
173a351a1b0SAmir Vadai 		mlx5_core_err(dev, "Error doing bulk query: %d\n", err);
174a351a1b0SAmir Vadai 		goto out;
175a351a1b0SAmir Vadai 	}
176a351a1b0SAmir Vadai 
1779aff93d7SVlad Buslov 	counter = first;
1789aff93d7SVlad Buslov 	list_for_each_entry_from(counter, &fc_stats->counters, list) {
179a351a1b0SAmir Vadai 		struct mlx5_fc_cache *c = &counter->cache;
1806c3b4f90SAmir Vadai 		u64 packets;
1816c3b4f90SAmir Vadai 		u64 bytes;
182a351a1b0SAmir Vadai 
1839aff93d7SVlad Buslov 		if (counter->id > last_id) {
1849aff93d7SVlad Buslov 			more = true;
185a351a1b0SAmir Vadai 			break;
1869aff93d7SVlad Buslov 		}
187a351a1b0SAmir Vadai 
188a351a1b0SAmir Vadai 		mlx5_cmd_fc_bulk_get(dev, b,
1896c3b4f90SAmir Vadai 				     counter->id, &packets, &bytes);
1906c3b4f90SAmir Vadai 
1916c3b4f90SAmir Vadai 		if (c->packets == packets)
1926c3b4f90SAmir Vadai 			continue;
1936c3b4f90SAmir Vadai 
1946c3b4f90SAmir Vadai 		c->packets = packets;
1956c3b4f90SAmir Vadai 		c->bytes = bytes;
1966c3b4f90SAmir Vadai 		c->lastuse = jiffies;
197a351a1b0SAmir Vadai 	}
198a351a1b0SAmir Vadai 
199a351a1b0SAmir Vadai out:
200a351a1b0SAmir Vadai 	mlx5_cmd_fc_bulk_free(b);
201a351a1b0SAmir Vadai 
2029aff93d7SVlad Buslov 	return more ? counter : NULL;
203a351a1b0SAmir Vadai }
204a351a1b0SAmir Vadai 
20583033688SVlad Buslov static void mlx5_free_fc(struct mlx5_core_dev *dev,
20683033688SVlad Buslov 			 struct mlx5_fc *counter)
20783033688SVlad Buslov {
20883033688SVlad Buslov 	mlx5_cmd_fc_free(dev, counter->id);
20983033688SVlad Buslov 	kfree(counter);
21083033688SVlad Buslov }
21183033688SVlad Buslov 
21243a335e0SAmir Vadai static void mlx5_fc_stats_work(struct work_struct *work)
21343a335e0SAmir Vadai {
21443a335e0SAmir Vadai 	struct mlx5_core_dev *dev = container_of(work, struct mlx5_core_dev,
21543a335e0SAmir Vadai 						 priv.fc_stats.work.work);
21643a335e0SAmir Vadai 	struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
217fd330713SVlad Buslov 	/* Take dellist first to ensure that counters cannot be deleted before
218fd330713SVlad Buslov 	 * they are inserted.
219fd330713SVlad Buslov 	 */
220fd330713SVlad Buslov 	struct llist_node *dellist = llist_del_all(&fc_stats->dellist);
221fd330713SVlad Buslov 	struct llist_node *addlist = llist_del_all(&fc_stats->addlist);
2226e5e2283SVlad Buslov 	struct mlx5_fc *counter = NULL, *last = NULL, *tmp;
22343a335e0SAmir Vadai 	unsigned long now = jiffies;
22443a335e0SAmir Vadai 
225fd330713SVlad Buslov 	if (addlist || !list_empty(&fc_stats->counters))
226f6dfb4c3SHadar Hen Zion 		queue_delayed_work(fc_stats->wq, &fc_stats->work,
227f6dfb4c3SHadar Hen Zion 				   fc_stats->sampling_interval);
22843a335e0SAmir Vadai 
229fd330713SVlad Buslov 	llist_for_each_entry(counter, addlist, addlist)
2309aff93d7SVlad Buslov 		mlx5_fc_stats_insert(dev, counter);
23129cc6679SAmir Vadai 
232fd330713SVlad Buslov 	llist_for_each_entry_safe(counter, tmp, dellist, dellist) {
2332a4c4298SVlad Buslov 		mlx5_fc_stats_remove(dev, counter);
23443a335e0SAmir Vadai 
2356e5e2283SVlad Buslov 		mlx5_free_fc(dev, counter);
23643a335e0SAmir Vadai 	}
23743a335e0SAmir Vadai 
2389aff93d7SVlad Buslov 	if (time_before(now, fc_stats->next_query) ||
2399aff93d7SVlad Buslov 	    list_empty(&fc_stats->counters))
240a351a1b0SAmir Vadai 		return;
2419aff93d7SVlad Buslov 	last = list_last_entry(&fc_stats->counters, struct mlx5_fc, list);
24243a335e0SAmir Vadai 
2439aff93d7SVlad Buslov 	counter = list_first_entry(&fc_stats->counters, struct mlx5_fc,
2449aff93d7SVlad Buslov 				   list);
2459aff93d7SVlad Buslov 	while (counter)
2469aff93d7SVlad Buslov 		counter = mlx5_fc_stats_query(dev, counter, last->id);
24743a335e0SAmir Vadai 
248f6dfb4c3SHadar Hen Zion 	fc_stats->next_query = now + fc_stats->sampling_interval;
24943a335e0SAmir Vadai }
25043a335e0SAmir Vadai 
25143a335e0SAmir Vadai struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging)
25243a335e0SAmir Vadai {
25343a335e0SAmir Vadai 	struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
25443a335e0SAmir Vadai 	struct mlx5_fc *counter;
25543a335e0SAmir Vadai 	int err;
25643a335e0SAmir Vadai 
25743a335e0SAmir Vadai 	counter = kzalloc(sizeof(*counter), GFP_KERNEL);
25843a335e0SAmir Vadai 	if (!counter)
25943a335e0SAmir Vadai 		return ERR_PTR(-ENOMEM);
26012d6066cSVlad Buslov 	INIT_LIST_HEAD(&counter->list);
26143a335e0SAmir Vadai 
26243a335e0SAmir Vadai 	err = mlx5_cmd_fc_alloc(dev, &counter->id);
26343a335e0SAmir Vadai 	if (err)
26443a335e0SAmir Vadai 		goto err_out;
26543a335e0SAmir Vadai 
26643a335e0SAmir Vadai 	if (aging) {
26712d6066cSVlad Buslov 		u32 id = counter->id;
26812d6066cSVlad Buslov 
269e83d6955SPaul Blakey 		counter->cache.lastuse = jiffies;
27043a335e0SAmir Vadai 		counter->aging = true;
27143a335e0SAmir Vadai 
27212d6066cSVlad Buslov 		idr_preload(GFP_KERNEL);
27312d6066cSVlad Buslov 		spin_lock(&fc_stats->counters_idr_lock);
27412d6066cSVlad Buslov 
27512d6066cSVlad Buslov 		err = idr_alloc_u32(&fc_stats->counters_idr, counter, &id, id,
27612d6066cSVlad Buslov 				    GFP_NOWAIT);
27712d6066cSVlad Buslov 
27812d6066cSVlad Buslov 		spin_unlock(&fc_stats->counters_idr_lock);
27912d6066cSVlad Buslov 		idr_preload_end();
28012d6066cSVlad Buslov 		if (err)
28112d6066cSVlad Buslov 			goto err_out_alloc;
28212d6066cSVlad Buslov 
28383033688SVlad Buslov 		llist_add(&counter->addlist, &fc_stats->addlist);
28443a335e0SAmir Vadai 
28543a335e0SAmir Vadai 		mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
28643a335e0SAmir Vadai 	}
28743a335e0SAmir Vadai 
28843a335e0SAmir Vadai 	return counter;
28943a335e0SAmir Vadai 
29012d6066cSVlad Buslov err_out_alloc:
29112d6066cSVlad Buslov 	mlx5_cmd_fc_free(dev, counter->id);
29243a335e0SAmir Vadai err_out:
29343a335e0SAmir Vadai 	kfree(counter);
29443a335e0SAmir Vadai 
29543a335e0SAmir Vadai 	return ERR_PTR(err);
29643a335e0SAmir Vadai }
2975f9bf63aSRaed Salem EXPORT_SYMBOL(mlx5_fc_create);
29843a335e0SAmir Vadai 
299171c7625SMark Bloch u32 mlx5_fc_id(struct mlx5_fc *counter)
300171c7625SMark Bloch {
301171c7625SMark Bloch 	return counter->id;
302171c7625SMark Bloch }
303171c7625SMark Bloch EXPORT_SYMBOL(mlx5_fc_id);
304171c7625SMark Bloch 
30543a335e0SAmir Vadai void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
30643a335e0SAmir Vadai {
30743a335e0SAmir Vadai 	struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
30843a335e0SAmir Vadai 
30943a335e0SAmir Vadai 	if (!counter)
31043a335e0SAmir Vadai 		return;
31143a335e0SAmir Vadai 
31243a335e0SAmir Vadai 	if (counter->aging) {
3136e5e2283SVlad Buslov 		llist_add(&counter->dellist, &fc_stats->dellist);
31443a335e0SAmir Vadai 		mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
31543a335e0SAmir Vadai 		return;
31643a335e0SAmir Vadai 	}
31743a335e0SAmir Vadai 
3186e5e2283SVlad Buslov 	mlx5_free_fc(dev, counter);
31943a335e0SAmir Vadai }
3205f9bf63aSRaed Salem EXPORT_SYMBOL(mlx5_fc_destroy);
32143a335e0SAmir Vadai 
32243a335e0SAmir Vadai int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
32343a335e0SAmir Vadai {
32443a335e0SAmir Vadai 	struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
32543a335e0SAmir Vadai 
32612d6066cSVlad Buslov 	spin_lock_init(&fc_stats->counters_idr_lock);
32712d6066cSVlad Buslov 	idr_init(&fc_stats->counters_idr);
3289aff93d7SVlad Buslov 	INIT_LIST_HEAD(&fc_stats->counters);
32983033688SVlad Buslov 	init_llist_head(&fc_stats->addlist);
3306e5e2283SVlad Buslov 	init_llist_head(&fc_stats->dellist);
33143a335e0SAmir Vadai 
33243a335e0SAmir Vadai 	fc_stats->wq = create_singlethread_workqueue("mlx5_fc");
33343a335e0SAmir Vadai 	if (!fc_stats->wq)
33443a335e0SAmir Vadai 		return -ENOMEM;
33543a335e0SAmir Vadai 
336f6dfb4c3SHadar Hen Zion 	fc_stats->sampling_interval = MLX5_FC_STATS_PERIOD;
33743a335e0SAmir Vadai 	INIT_DELAYED_WORK(&fc_stats->work, mlx5_fc_stats_work);
33843a335e0SAmir Vadai 
33943a335e0SAmir Vadai 	return 0;
34043a335e0SAmir Vadai }
34143a335e0SAmir Vadai 
34243a335e0SAmir Vadai void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
34343a335e0SAmir Vadai {
34443a335e0SAmir Vadai 	struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
34583033688SVlad Buslov 	struct llist_node *tmplist;
34643a335e0SAmir Vadai 	struct mlx5_fc *counter;
34743a335e0SAmir Vadai 	struct mlx5_fc *tmp;
34843a335e0SAmir Vadai 
34943a335e0SAmir Vadai 	cancel_delayed_work_sync(&dev->priv.fc_stats.work);
35043a335e0SAmir Vadai 	destroy_workqueue(dev->priv.fc_stats.wq);
35143a335e0SAmir Vadai 	dev->priv.fc_stats.wq = NULL;
35243a335e0SAmir Vadai 
35312d6066cSVlad Buslov 	idr_destroy(&fc_stats->counters_idr);
35412d6066cSVlad Buslov 
35583033688SVlad Buslov 	tmplist = llist_del_all(&fc_stats->addlist);
35683033688SVlad Buslov 	llist_for_each_entry_safe(counter, tmp, tmplist, addlist)
35783033688SVlad Buslov 		mlx5_free_fc(dev, counter);
35829cc6679SAmir Vadai 
3599aff93d7SVlad Buslov 	list_for_each_entry_safe(counter, tmp, &fc_stats->counters, list)
36083033688SVlad Buslov 		mlx5_free_fc(dev, counter);
36129cc6679SAmir Vadai }
36243a335e0SAmir Vadai 
363930821e3SOr Gerlitz int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter,
364b8a0dbe3SEugenia Emantayev 		  u64 *packets, u64 *bytes)
365b8a0dbe3SEugenia Emantayev {
366930821e3SOr Gerlitz 	return mlx5_cmd_fc_query(dev, counter->id, packets, bytes);
367b8a0dbe3SEugenia Emantayev }
3685f9bf63aSRaed Salem EXPORT_SYMBOL(mlx5_fc_query);
369b8a0dbe3SEugenia Emantayev 
37043a335e0SAmir Vadai void mlx5_fc_query_cached(struct mlx5_fc *counter,
37143a335e0SAmir Vadai 			  u64 *bytes, u64 *packets, u64 *lastuse)
37243a335e0SAmir Vadai {
37343a335e0SAmir Vadai 	struct mlx5_fc_cache c;
37443a335e0SAmir Vadai 
37543a335e0SAmir Vadai 	c = counter->cache;
37643a335e0SAmir Vadai 
37743a335e0SAmir Vadai 	*bytes = c.bytes - counter->lastbytes;
37843a335e0SAmir Vadai 	*packets = c.packets - counter->lastpackets;
37943a335e0SAmir Vadai 	*lastuse = c.lastuse;
38043a335e0SAmir Vadai 
38143a335e0SAmir Vadai 	counter->lastbytes = c.bytes;
38243a335e0SAmir Vadai 	counter->lastpackets = c.packets;
38343a335e0SAmir Vadai }
384f6dfb4c3SHadar Hen Zion 
385f6dfb4c3SHadar Hen Zion void mlx5_fc_queue_stats_work(struct mlx5_core_dev *dev,
386f6dfb4c3SHadar Hen Zion 			      struct delayed_work *dwork,
387f6dfb4c3SHadar Hen Zion 			      unsigned long delay)
388f6dfb4c3SHadar Hen Zion {
389f6dfb4c3SHadar Hen Zion 	struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
390f6dfb4c3SHadar Hen Zion 
391f6dfb4c3SHadar Hen Zion 	queue_delayed_work(fc_stats->wq, dwork, delay);
392f6dfb4c3SHadar Hen Zion }
393f6dfb4c3SHadar Hen Zion 
394f6dfb4c3SHadar Hen Zion void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev,
395f6dfb4c3SHadar Hen Zion 				      unsigned long interval)
396f6dfb4c3SHadar Hen Zion {
397f6dfb4c3SHadar Hen Zion 	struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
398f6dfb4c3SHadar Hen Zion 
399f6dfb4c3SHadar Hen Zion 	fc_stats->sampling_interval = min_t(unsigned long, interval,
400f6dfb4c3SHadar Hen Zion 					    fc_stats->sampling_interval);
401f6dfb4c3SHadar Hen Zion }
402