143a335e0SAmir Vadai /*
243a335e0SAmir Vadai  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
343a335e0SAmir Vadai  *
443a335e0SAmir Vadai  * This software is available to you under a choice of one of two
543a335e0SAmir Vadai  * licenses.  You may choose to be licensed under the terms of the GNU
643a335e0SAmir Vadai  * General Public License (GPL) Version 2, available from the file
743a335e0SAmir Vadai  * COPYING in the main directory of this source tree, or the
843a335e0SAmir Vadai  * OpenIB.org BSD license below:
943a335e0SAmir Vadai  *
1043a335e0SAmir Vadai  *     Redistribution and use in source and binary forms, with or
1143a335e0SAmir Vadai  *     without modification, are permitted provided that the following
1243a335e0SAmir Vadai  *     conditions are met:
1343a335e0SAmir Vadai  *
1443a335e0SAmir Vadai  *      - Redistributions of source code must retain the above
1543a335e0SAmir Vadai  *        copyright notice, this list of conditions and the following
1643a335e0SAmir Vadai  *        disclaimer.
1743a335e0SAmir Vadai  *
1843a335e0SAmir Vadai  *      - Redistributions in binary form must reproduce the above
1943a335e0SAmir Vadai  *        copyright notice, this list of conditions and the following
2043a335e0SAmir Vadai  *        disclaimer in the documentation and/or other materials
2143a335e0SAmir Vadai  *        provided with the distribution.
2243a335e0SAmir Vadai  *
2343a335e0SAmir Vadai  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
2443a335e0SAmir Vadai  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
2543a335e0SAmir Vadai  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
2643a335e0SAmir Vadai  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
2743a335e0SAmir Vadai  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
2843a335e0SAmir Vadai  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
2943a335e0SAmir Vadai  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
3043a335e0SAmir Vadai  * SOFTWARE.
3143a335e0SAmir Vadai  */
3243a335e0SAmir Vadai 
3343a335e0SAmir Vadai #include <linux/mlx5/driver.h>
3443a335e0SAmir Vadai #include <linux/mlx5/fs.h>
3529cc6679SAmir Vadai #include <linux/rbtree.h>
3643a335e0SAmir Vadai #include "mlx5_core.h"
3743a335e0SAmir Vadai #include "fs_core.h"
3843a335e0SAmir Vadai #include "fs_cmd.h"
3943a335e0SAmir Vadai 
4043a335e0SAmir Vadai #define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000)
41a8ffcc74SRabie Loulou /* Max number of counters to query in bulk read is 32K */
42a8ffcc74SRabie Loulou #define MLX5_SW_MAX_COUNTERS_BULK BIT(15)
43558101f1SGavi Teitz #define MLX5_FC_POOL_MAX_THRESHOLD BIT(18)
44558101f1SGavi Teitz #define MLX5_FC_POOL_USED_BUFF_RATIO 10
4543a335e0SAmir Vadai 
467300375fSSaeed Mahameed struct mlx5_fc_cache {
477300375fSSaeed Mahameed 	u64 packets;
487300375fSSaeed Mahameed 	u64 bytes;
497300375fSSaeed Mahameed 	u64 lastuse;
507300375fSSaeed Mahameed };
517300375fSSaeed Mahameed 
527300375fSSaeed Mahameed struct mlx5_fc {
537300375fSSaeed Mahameed 	struct list_head list;
547300375fSSaeed Mahameed 	struct llist_node addlist;
557300375fSSaeed Mahameed 	struct llist_node dellist;
567300375fSSaeed Mahameed 
577300375fSSaeed Mahameed 	/* last{packets,bytes} members are used when calculating the delta since
587300375fSSaeed Mahameed 	 * last reading
597300375fSSaeed Mahameed 	 */
607300375fSSaeed Mahameed 	u64 lastpackets;
617300375fSSaeed Mahameed 	u64 lastbytes;
627300375fSSaeed Mahameed 
635d8a0253SGavi Teitz 	struct mlx5_fc_bulk *bulk;
647300375fSSaeed Mahameed 	u32 id;
657300375fSSaeed Mahameed 	bool aging;
667300375fSSaeed Mahameed 
677300375fSSaeed Mahameed 	struct mlx5_fc_cache cache ____cacheline_aligned_in_smp;
687300375fSSaeed Mahameed };
697300375fSSaeed Mahameed 
70558101f1SGavi Teitz static void mlx5_fc_pool_init(struct mlx5_fc_pool *fc_pool, struct mlx5_core_dev *dev);
71558101f1SGavi Teitz static void mlx5_fc_pool_cleanup(struct mlx5_fc_pool *fc_pool);
72558101f1SGavi Teitz static struct mlx5_fc *mlx5_fc_pool_acquire_counter(struct mlx5_fc_pool *fc_pool);
73558101f1SGavi Teitz static void mlx5_fc_pool_release_counter(struct mlx5_fc_pool *fc_pool, struct mlx5_fc *fc);
74558101f1SGavi Teitz 
7543a335e0SAmir Vadai /* locking scheme:
7643a335e0SAmir Vadai  *
7743a335e0SAmir Vadai  * It is the responsibility of the user to prevent concurrent calls or bad
7843a335e0SAmir Vadai  * ordering to mlx5_fc_create(), mlx5_fc_destroy() and accessing a reference
7943a335e0SAmir Vadai  * to struct mlx5_fc.
8043a335e0SAmir Vadai  * e.g en_tc.c is protected by RTNL lock of its caller, and will never call a
8143a335e0SAmir Vadai  * dump (access to struct mlx5_fc) after a counter is destroyed.
8243a335e0SAmir Vadai  *
8343a335e0SAmir Vadai  * access to counter list:
8443a335e0SAmir Vadai  * - create (user context)
8543a335e0SAmir Vadai  *   - mlx5_fc_create() only adds to an addlist to be used by
866f06e04bSGavi Teitz  *     mlx5_fc_stats_work(). addlist is a lockless single linked list
8783033688SVlad Buslov  *     that doesn't require any additional synchronization when adding single
8883033688SVlad Buslov  *     node.
8943a335e0SAmir Vadai  *   - spawn thread to do the actual destroy
9043a335e0SAmir Vadai  *
9143a335e0SAmir Vadai  * - destroy (user context)
926e5e2283SVlad Buslov  *   - add a counter to lockless dellist
9343a335e0SAmir Vadai  *   - spawn thread to do the actual del
9443a335e0SAmir Vadai  *
9543a335e0SAmir Vadai  * - dump (user context)
9643a335e0SAmir Vadai  *   user should not call dump after destroy
9743a335e0SAmir Vadai  *
9843a335e0SAmir Vadai  * - query (single thread workqueue context)
9943a335e0SAmir Vadai  *   destroy/dump - no conflict (see destroy)
10043a335e0SAmir Vadai  *   query/dump - packets and bytes might be inconsistent (since update is not
10143a335e0SAmir Vadai  *                atomic)
10243a335e0SAmir Vadai  *   query/create - no conflict (see create)
10343a335e0SAmir Vadai  *   since every create/destroy spawn the work, only after necessary time has
10443a335e0SAmir Vadai  *   elapsed, the thread will actually query the hardware.
10543a335e0SAmir Vadai  */
10643a335e0SAmir Vadai 
1079aff93d7SVlad Buslov static struct list_head *mlx5_fc_counters_lookup_next(struct mlx5_core_dev *dev,
1089aff93d7SVlad Buslov 						      u32 id)
10929cc6679SAmir Vadai {
1109aff93d7SVlad Buslov 	struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
11112d6066cSVlad Buslov 	unsigned long next_id = (unsigned long)id + 1;
1129aff93d7SVlad Buslov 	struct mlx5_fc *counter;
113d39d7149SCong Wang 	unsigned long tmp;
11429cc6679SAmir Vadai 
11512d6066cSVlad Buslov 	rcu_read_lock();
11612d6066cSVlad Buslov 	/* skip counters that are in idr, but not yet in counters list */
117d39d7149SCong Wang 	idr_for_each_entry_continue_ul(&fc_stats->counters_idr,
118d39d7149SCong Wang 				       counter, tmp, next_id) {
119d39d7149SCong Wang 		if (!list_empty(&counter->list))
120d39d7149SCong Wang 			break;
121d39d7149SCong Wang 	}
12212d6066cSVlad Buslov 	rcu_read_unlock();
12329cc6679SAmir Vadai 
12412d6066cSVlad Buslov 	return counter ? &counter->list : &fc_stats->counters;
12529cc6679SAmir Vadai }
12629cc6679SAmir Vadai 
1279aff93d7SVlad Buslov static void mlx5_fc_stats_insert(struct mlx5_core_dev *dev,
1289aff93d7SVlad Buslov 				 struct mlx5_fc *counter)
1299aff93d7SVlad Buslov {
1309aff93d7SVlad Buslov 	struct list_head *next = mlx5_fc_counters_lookup_next(dev, counter->id);
1319aff93d7SVlad Buslov 
1329aff93d7SVlad Buslov 	list_add_tail(&counter->list, next);
13329cc6679SAmir Vadai }
13429cc6679SAmir Vadai 
1352a4c4298SVlad Buslov static void mlx5_fc_stats_remove(struct mlx5_core_dev *dev,
1362a4c4298SVlad Buslov 				 struct mlx5_fc *counter)
1372a4c4298SVlad Buslov {
1382a4c4298SVlad Buslov 	struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
1392a4c4298SVlad Buslov 
1402a4c4298SVlad Buslov 	list_del(&counter->list);
1412a4c4298SVlad Buslov 
1422a4c4298SVlad Buslov 	spin_lock(&fc_stats->counters_idr_lock);
1432a4c4298SVlad Buslov 	WARN_ON(!idr_remove(&fc_stats->counters_idr, counter->id));
1442a4c4298SVlad Buslov 	spin_unlock(&fc_stats->counters_idr_lock);
1452a4c4298SVlad Buslov }
1462a4c4298SVlad Buslov 
1476f06e04bSGavi Teitz static int get_max_bulk_query_len(struct mlx5_core_dev *dev)
1486f06e04bSGavi Teitz {
1496f06e04bSGavi Teitz 	return min_t(int, MLX5_SW_MAX_COUNTERS_BULK,
1506f06e04bSGavi Teitz 			  (1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk)));
1516f06e04bSGavi Teitz }
1526f06e04bSGavi Teitz 
1536f06e04bSGavi Teitz static void update_counter_cache(int index, u32 *bulk_raw_data,
1546f06e04bSGavi Teitz 				 struct mlx5_fc_cache *cache)
1556f06e04bSGavi Teitz {
1566f06e04bSGavi Teitz 	void *stats = MLX5_ADDR_OF(query_flow_counter_out, bulk_raw_data,
1576f06e04bSGavi Teitz 			     flow_statistics[index]);
1586f06e04bSGavi Teitz 	u64 packets = MLX5_GET64(traffic_counter, stats, packets);
1596f06e04bSGavi Teitz 	u64 bytes = MLX5_GET64(traffic_counter, stats, octets);
1606f06e04bSGavi Teitz 
1616f06e04bSGavi Teitz 	if (cache->packets == packets)
1626f06e04bSGavi Teitz 		return;
1636f06e04bSGavi Teitz 
1646f06e04bSGavi Teitz 	cache->packets = packets;
1656f06e04bSGavi Teitz 	cache->bytes = bytes;
1666f06e04bSGavi Teitz 	cache->lastuse = jiffies;
1676f06e04bSGavi Teitz }
1686f06e04bSGavi Teitz 
1696f06e04bSGavi Teitz static void mlx5_fc_stats_query_counter_range(struct mlx5_core_dev *dev,
170a351a1b0SAmir Vadai 					      struct mlx5_fc *first,
171a8ffcc74SRabie Loulou 					      u32 last_id)
172a351a1b0SAmir Vadai {
1739aff93d7SVlad Buslov 	struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
1746f06e04bSGavi Teitz 	bool query_more_counters = (first->id <= last_id);
1756f06e04bSGavi Teitz 	int max_bulk_len = get_max_bulk_query_len(dev);
1766f06e04bSGavi Teitz 	u32 *data = fc_stats->bulk_query_out;
1776f06e04bSGavi Teitz 	struct mlx5_fc *counter = first;
1786f06e04bSGavi Teitz 	u32 bulk_base_id;
1796f06e04bSGavi Teitz 	int bulk_len;
180a351a1b0SAmir Vadai 	int err;
181a8ffcc74SRabie Loulou 
1826f06e04bSGavi Teitz 	while (query_more_counters) {
183a351a1b0SAmir Vadai 		/* first id must be aligned to 4 when using bulk query */
1846f06e04bSGavi Teitz 		bulk_base_id = counter->id & ~0x3;
185a351a1b0SAmir Vadai 
186a351a1b0SAmir Vadai 		/* number of counters to query inc. the last counter */
1876f06e04bSGavi Teitz 		bulk_len = min_t(int, max_bulk_len,
1886f06e04bSGavi Teitz 				 ALIGN(last_id - bulk_base_id + 1, 4));
189a351a1b0SAmir Vadai 
1906f06e04bSGavi Teitz 		err = mlx5_cmd_fc_bulk_query(dev, bulk_base_id, bulk_len,
1916f06e04bSGavi Teitz 					     data);
192a351a1b0SAmir Vadai 		if (err) {
193a351a1b0SAmir Vadai 			mlx5_core_err(dev, "Error doing bulk query: %d\n", err);
1946f06e04bSGavi Teitz 			return;
195a351a1b0SAmir Vadai 		}
1966f06e04bSGavi Teitz 		query_more_counters = false;
197a351a1b0SAmir Vadai 
1989aff93d7SVlad Buslov 		list_for_each_entry_from(counter, &fc_stats->counters, list) {
1996f06e04bSGavi Teitz 			int counter_index = counter->id - bulk_base_id;
2006f06e04bSGavi Teitz 			struct mlx5_fc_cache *cache = &counter->cache;
201a351a1b0SAmir Vadai 
2026f06e04bSGavi Teitz 			if (counter->id >= bulk_base_id + bulk_len) {
2036f06e04bSGavi Teitz 				query_more_counters = true;
204a351a1b0SAmir Vadai 				break;
2059aff93d7SVlad Buslov 			}
206a351a1b0SAmir Vadai 
2076f06e04bSGavi Teitz 			update_counter_cache(counter_index, data, cache);
208a351a1b0SAmir Vadai 		}
2096f06e04bSGavi Teitz 	}
210a351a1b0SAmir Vadai }
211a351a1b0SAmir Vadai 
212558101f1SGavi Teitz static void mlx5_fc_free(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
21383033688SVlad Buslov {
21483033688SVlad Buslov 	mlx5_cmd_fc_free(dev, counter->id);
21583033688SVlad Buslov 	kfree(counter);
21683033688SVlad Buslov }
21783033688SVlad Buslov 
218558101f1SGavi Teitz static void mlx5_fc_release(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
219558101f1SGavi Teitz {
220558101f1SGavi Teitz 	struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
221558101f1SGavi Teitz 
222558101f1SGavi Teitz 	if (counter->bulk)
223558101f1SGavi Teitz 		mlx5_fc_pool_release_counter(&fc_stats->fc_pool, counter);
224558101f1SGavi Teitz 	else
225558101f1SGavi Teitz 		mlx5_fc_free(dev, counter);
226558101f1SGavi Teitz }
227558101f1SGavi Teitz 
22843a335e0SAmir Vadai static void mlx5_fc_stats_work(struct work_struct *work)
22943a335e0SAmir Vadai {
23043a335e0SAmir Vadai 	struct mlx5_core_dev *dev = container_of(work, struct mlx5_core_dev,
23143a335e0SAmir Vadai 						 priv.fc_stats.work.work);
23243a335e0SAmir Vadai 	struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
233fd330713SVlad Buslov 	/* Take dellist first to ensure that counters cannot be deleted before
234fd330713SVlad Buslov 	 * they are inserted.
235fd330713SVlad Buslov 	 */
236fd330713SVlad Buslov 	struct llist_node *dellist = llist_del_all(&fc_stats->dellist);
237fd330713SVlad Buslov 	struct llist_node *addlist = llist_del_all(&fc_stats->addlist);
2386e5e2283SVlad Buslov 	struct mlx5_fc *counter = NULL, *last = NULL, *tmp;
23943a335e0SAmir Vadai 	unsigned long now = jiffies;
24043a335e0SAmir Vadai 
241fd330713SVlad Buslov 	if (addlist || !list_empty(&fc_stats->counters))
242f6dfb4c3SHadar Hen Zion 		queue_delayed_work(fc_stats->wq, &fc_stats->work,
243f6dfb4c3SHadar Hen Zion 				   fc_stats->sampling_interval);
24443a335e0SAmir Vadai 
245fd330713SVlad Buslov 	llist_for_each_entry(counter, addlist, addlist)
2469aff93d7SVlad Buslov 		mlx5_fc_stats_insert(dev, counter);
24729cc6679SAmir Vadai 
248fd330713SVlad Buslov 	llist_for_each_entry_safe(counter, tmp, dellist, dellist) {
2492a4c4298SVlad Buslov 		mlx5_fc_stats_remove(dev, counter);
25043a335e0SAmir Vadai 
251558101f1SGavi Teitz 		mlx5_fc_release(dev, counter);
25243a335e0SAmir Vadai 	}
25343a335e0SAmir Vadai 
2549aff93d7SVlad Buslov 	if (time_before(now, fc_stats->next_query) ||
2559aff93d7SVlad Buslov 	    list_empty(&fc_stats->counters))
256a351a1b0SAmir Vadai 		return;
2579aff93d7SVlad Buslov 	last = list_last_entry(&fc_stats->counters, struct mlx5_fc, list);
25843a335e0SAmir Vadai 
2599aff93d7SVlad Buslov 	counter = list_first_entry(&fc_stats->counters, struct mlx5_fc,
2609aff93d7SVlad Buslov 				   list);
2616f06e04bSGavi Teitz 	if (counter)
2626f06e04bSGavi Teitz 		mlx5_fc_stats_query_counter_range(dev, counter, last->id);
26343a335e0SAmir Vadai 
264f6dfb4c3SHadar Hen Zion 	fc_stats->next_query = now + fc_stats->sampling_interval;
26543a335e0SAmir Vadai }
26643a335e0SAmir Vadai 
267558101f1SGavi Teitz static struct mlx5_fc *mlx5_fc_single_alloc(struct mlx5_core_dev *dev)
26843a335e0SAmir Vadai {
26943a335e0SAmir Vadai 	struct mlx5_fc *counter;
27043a335e0SAmir Vadai 	int err;
27143a335e0SAmir Vadai 
27243a335e0SAmir Vadai 	counter = kzalloc(sizeof(*counter), GFP_KERNEL);
27343a335e0SAmir Vadai 	if (!counter)
27443a335e0SAmir Vadai 		return ERR_PTR(-ENOMEM);
27543a335e0SAmir Vadai 
27643a335e0SAmir Vadai 	err = mlx5_cmd_fc_alloc(dev, &counter->id);
277558101f1SGavi Teitz 	if (err) {
278558101f1SGavi Teitz 		kfree(counter);
279558101f1SGavi Teitz 		return ERR_PTR(err);
280558101f1SGavi Teitz 	}
281558101f1SGavi Teitz 
282558101f1SGavi Teitz 	return counter;
283558101f1SGavi Teitz }
284558101f1SGavi Teitz 
285558101f1SGavi Teitz static struct mlx5_fc *mlx5_fc_acquire(struct mlx5_core_dev *dev, bool aging)
286558101f1SGavi Teitz {
287558101f1SGavi Teitz 	struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
288558101f1SGavi Teitz 	struct mlx5_fc *counter;
289558101f1SGavi Teitz 
290558101f1SGavi Teitz 	if (aging && MLX5_CAP_GEN(dev, flow_counter_bulk_alloc) != 0) {
291558101f1SGavi Teitz 		counter = mlx5_fc_pool_acquire_counter(&fc_stats->fc_pool);
292558101f1SGavi Teitz 		if (!IS_ERR(counter))
293558101f1SGavi Teitz 			return counter;
294558101f1SGavi Teitz 	}
295558101f1SGavi Teitz 
296558101f1SGavi Teitz 	return mlx5_fc_single_alloc(dev);
297558101f1SGavi Teitz }
298558101f1SGavi Teitz 
299558101f1SGavi Teitz struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging)
300558101f1SGavi Teitz {
301558101f1SGavi Teitz 	struct mlx5_fc *counter = mlx5_fc_acquire(dev, aging);
302558101f1SGavi Teitz 	struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
303558101f1SGavi Teitz 	int err;
304558101f1SGavi Teitz 
305558101f1SGavi Teitz 	if (IS_ERR(counter))
306558101f1SGavi Teitz 		return counter;
307558101f1SGavi Teitz 
308558101f1SGavi Teitz 	INIT_LIST_HEAD(&counter->list);
309558101f1SGavi Teitz 	counter->aging = aging;
31043a335e0SAmir Vadai 
31143a335e0SAmir Vadai 	if (aging) {
31212d6066cSVlad Buslov 		u32 id = counter->id;
31312d6066cSVlad Buslov 
314e83d6955SPaul Blakey 		counter->cache.lastuse = jiffies;
315558101f1SGavi Teitz 		counter->lastbytes = counter->cache.bytes;
316558101f1SGavi Teitz 		counter->lastpackets = counter->cache.packets;
31743a335e0SAmir Vadai 
31812d6066cSVlad Buslov 		idr_preload(GFP_KERNEL);
31912d6066cSVlad Buslov 		spin_lock(&fc_stats->counters_idr_lock);
32012d6066cSVlad Buslov 
32112d6066cSVlad Buslov 		err = idr_alloc_u32(&fc_stats->counters_idr, counter, &id, id,
32212d6066cSVlad Buslov 				    GFP_NOWAIT);
32312d6066cSVlad Buslov 
32412d6066cSVlad Buslov 		spin_unlock(&fc_stats->counters_idr_lock);
32512d6066cSVlad Buslov 		idr_preload_end();
32612d6066cSVlad Buslov 		if (err)
32712d6066cSVlad Buslov 			goto err_out_alloc;
32812d6066cSVlad Buslov 
32983033688SVlad Buslov 		llist_add(&counter->addlist, &fc_stats->addlist);
33043a335e0SAmir Vadai 
33143a335e0SAmir Vadai 		mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
33243a335e0SAmir Vadai 	}
33343a335e0SAmir Vadai 
33443a335e0SAmir Vadai 	return counter;
33543a335e0SAmir Vadai 
33612d6066cSVlad Buslov err_out_alloc:
337558101f1SGavi Teitz 	mlx5_fc_release(dev, counter);
33843a335e0SAmir Vadai 	return ERR_PTR(err);
33943a335e0SAmir Vadai }
3405f9bf63aSRaed Salem EXPORT_SYMBOL(mlx5_fc_create);
34143a335e0SAmir Vadai 
342171c7625SMark Bloch u32 mlx5_fc_id(struct mlx5_fc *counter)
343171c7625SMark Bloch {
344171c7625SMark Bloch 	return counter->id;
345171c7625SMark Bloch }
346171c7625SMark Bloch EXPORT_SYMBOL(mlx5_fc_id);
347171c7625SMark Bloch 
34843a335e0SAmir Vadai void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
34943a335e0SAmir Vadai {
35043a335e0SAmir Vadai 	struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
35143a335e0SAmir Vadai 
35243a335e0SAmir Vadai 	if (!counter)
35343a335e0SAmir Vadai 		return;
35443a335e0SAmir Vadai 
35543a335e0SAmir Vadai 	if (counter->aging) {
3566e5e2283SVlad Buslov 		llist_add(&counter->dellist, &fc_stats->dellist);
35743a335e0SAmir Vadai 		mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
35843a335e0SAmir Vadai 		return;
35943a335e0SAmir Vadai 	}
36043a335e0SAmir Vadai 
361558101f1SGavi Teitz 	mlx5_fc_release(dev, counter);
36243a335e0SAmir Vadai }
3635f9bf63aSRaed Salem EXPORT_SYMBOL(mlx5_fc_destroy);
36443a335e0SAmir Vadai 
36543a335e0SAmir Vadai int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
36643a335e0SAmir Vadai {
36743a335e0SAmir Vadai 	struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
3686f06e04bSGavi Teitz 	int max_bulk_len;
3696f06e04bSGavi Teitz 	int max_out_len;
37043a335e0SAmir Vadai 
37112d6066cSVlad Buslov 	spin_lock_init(&fc_stats->counters_idr_lock);
37212d6066cSVlad Buslov 	idr_init(&fc_stats->counters_idr);
3739aff93d7SVlad Buslov 	INIT_LIST_HEAD(&fc_stats->counters);
37483033688SVlad Buslov 	init_llist_head(&fc_stats->addlist);
3756e5e2283SVlad Buslov 	init_llist_head(&fc_stats->dellist);
37643a335e0SAmir Vadai 
3776f06e04bSGavi Teitz 	max_bulk_len = get_max_bulk_query_len(dev);
3786f06e04bSGavi Teitz 	max_out_len = mlx5_cmd_fc_get_bulk_query_out_len(max_bulk_len);
3796f06e04bSGavi Teitz 	fc_stats->bulk_query_out = kzalloc(max_out_len, GFP_KERNEL);
3806f06e04bSGavi Teitz 	if (!fc_stats->bulk_query_out)
3816f06e04bSGavi Teitz 		return -ENOMEM;
3826f06e04bSGavi Teitz 
38343a335e0SAmir Vadai 	fc_stats->wq = create_singlethread_workqueue("mlx5_fc");
38443a335e0SAmir Vadai 	if (!fc_stats->wq)
3856f06e04bSGavi Teitz 		goto err_wq_create;
38643a335e0SAmir Vadai 
387f6dfb4c3SHadar Hen Zion 	fc_stats->sampling_interval = MLX5_FC_STATS_PERIOD;
38843a335e0SAmir Vadai 	INIT_DELAYED_WORK(&fc_stats->work, mlx5_fc_stats_work);
38943a335e0SAmir Vadai 
390558101f1SGavi Teitz 	mlx5_fc_pool_init(&fc_stats->fc_pool, dev);
39143a335e0SAmir Vadai 	return 0;
3926f06e04bSGavi Teitz 
3936f06e04bSGavi Teitz err_wq_create:
3946f06e04bSGavi Teitz 	kfree(fc_stats->bulk_query_out);
3956f06e04bSGavi Teitz 	return -ENOMEM;
39643a335e0SAmir Vadai }
39743a335e0SAmir Vadai 
39843a335e0SAmir Vadai void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
39943a335e0SAmir Vadai {
40043a335e0SAmir Vadai 	struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
40183033688SVlad Buslov 	struct llist_node *tmplist;
40243a335e0SAmir Vadai 	struct mlx5_fc *counter;
40343a335e0SAmir Vadai 	struct mlx5_fc *tmp;
40443a335e0SAmir Vadai 
40543a335e0SAmir Vadai 	cancel_delayed_work_sync(&dev->priv.fc_stats.work);
40643a335e0SAmir Vadai 	destroy_workqueue(dev->priv.fc_stats.wq);
40743a335e0SAmir Vadai 	dev->priv.fc_stats.wq = NULL;
40843a335e0SAmir Vadai 
40983033688SVlad Buslov 	tmplist = llist_del_all(&fc_stats->addlist);
41083033688SVlad Buslov 	llist_for_each_entry_safe(counter, tmp, tmplist, addlist)
411558101f1SGavi Teitz 		mlx5_fc_release(dev, counter);
41229cc6679SAmir Vadai 
4139aff93d7SVlad Buslov 	list_for_each_entry_safe(counter, tmp, &fc_stats->counters, list)
414558101f1SGavi Teitz 		mlx5_fc_release(dev, counter);
415b1b9f97aSGavi Teitz 
416b1b9f97aSGavi Teitz 	mlx5_fc_pool_cleanup(&fc_stats->fc_pool);
417b1b9f97aSGavi Teitz 	idr_destroy(&fc_stats->counters_idr);
418b1b9f97aSGavi Teitz 	kfree(fc_stats->bulk_query_out);
41929cc6679SAmir Vadai }
42043a335e0SAmir Vadai 
421930821e3SOr Gerlitz int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter,
422b8a0dbe3SEugenia Emantayev 		  u64 *packets, u64 *bytes)
423b8a0dbe3SEugenia Emantayev {
424930821e3SOr Gerlitz 	return mlx5_cmd_fc_query(dev, counter->id, packets, bytes);
425b8a0dbe3SEugenia Emantayev }
4265f9bf63aSRaed Salem EXPORT_SYMBOL(mlx5_fc_query);
427b8a0dbe3SEugenia Emantayev 
42890bb7692SAriel Levkovich u64 mlx5_fc_query_lastuse(struct mlx5_fc *counter)
42990bb7692SAriel Levkovich {
43090bb7692SAriel Levkovich 	return counter->cache.lastuse;
43190bb7692SAriel Levkovich }
43290bb7692SAriel Levkovich 
43343a335e0SAmir Vadai void mlx5_fc_query_cached(struct mlx5_fc *counter,
43443a335e0SAmir Vadai 			  u64 *bytes, u64 *packets, u64 *lastuse)
43543a335e0SAmir Vadai {
43643a335e0SAmir Vadai 	struct mlx5_fc_cache c;
43743a335e0SAmir Vadai 
43843a335e0SAmir Vadai 	c = counter->cache;
43943a335e0SAmir Vadai 
44043a335e0SAmir Vadai 	*bytes = c.bytes - counter->lastbytes;
44143a335e0SAmir Vadai 	*packets = c.packets - counter->lastpackets;
44243a335e0SAmir Vadai 	*lastuse = c.lastuse;
44343a335e0SAmir Vadai 
44443a335e0SAmir Vadai 	counter->lastbytes = c.bytes;
44543a335e0SAmir Vadai 	counter->lastpackets = c.packets;
44643a335e0SAmir Vadai }
447f6dfb4c3SHadar Hen Zion 
448f6dfb4c3SHadar Hen Zion void mlx5_fc_queue_stats_work(struct mlx5_core_dev *dev,
449f6dfb4c3SHadar Hen Zion 			      struct delayed_work *dwork,
450f6dfb4c3SHadar Hen Zion 			      unsigned long delay)
451f6dfb4c3SHadar Hen Zion {
452f6dfb4c3SHadar Hen Zion 	struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
453f6dfb4c3SHadar Hen Zion 
454f6dfb4c3SHadar Hen Zion 	queue_delayed_work(fc_stats->wq, dwork, delay);
455f6dfb4c3SHadar Hen Zion }
456f6dfb4c3SHadar Hen Zion 
457f6dfb4c3SHadar Hen Zion void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev,
458f6dfb4c3SHadar Hen Zion 				      unsigned long interval)
459f6dfb4c3SHadar Hen Zion {
460f6dfb4c3SHadar Hen Zion 	struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
461f6dfb4c3SHadar Hen Zion 
462f6dfb4c3SHadar Hen Zion 	fc_stats->sampling_interval = min_t(unsigned long, interval,
463f6dfb4c3SHadar Hen Zion 					    fc_stats->sampling_interval);
464f6dfb4c3SHadar Hen Zion }
4655d8a0253SGavi Teitz 
4665d8a0253SGavi Teitz /* Flow counter bluks */
4675d8a0253SGavi Teitz 
4685d8a0253SGavi Teitz struct mlx5_fc_bulk {
469558101f1SGavi Teitz 	struct list_head pool_list;
4705d8a0253SGavi Teitz 	u32 base_id;
4715d8a0253SGavi Teitz 	int bulk_len;
4725d8a0253SGavi Teitz 	unsigned long *bitmask;
473339ffae5SGustavo A. R. Silva 	struct mlx5_fc fcs[];
4745d8a0253SGavi Teitz };
4755d8a0253SGavi Teitz 
476558101f1SGavi Teitz static void mlx5_fc_init(struct mlx5_fc *counter, struct mlx5_fc_bulk *bulk,
477558101f1SGavi Teitz 			 u32 id)
4785d8a0253SGavi Teitz {
4795d8a0253SGavi Teitz 	counter->bulk = bulk;
4805d8a0253SGavi Teitz 	counter->id = id;
4815d8a0253SGavi Teitz }
4825d8a0253SGavi Teitz 
4835d8a0253SGavi Teitz static int mlx5_fc_bulk_get_free_fcs_amount(struct mlx5_fc_bulk *bulk)
4845d8a0253SGavi Teitz {
4855d8a0253SGavi Teitz 	return bitmap_weight(bulk->bitmask, bulk->bulk_len);
4865d8a0253SGavi Teitz }
4875d8a0253SGavi Teitz 
488558101f1SGavi Teitz static struct mlx5_fc_bulk *mlx5_fc_bulk_create(struct mlx5_core_dev *dev)
4895d8a0253SGavi Teitz {
4905d8a0253SGavi Teitz 	enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask;
4915d8a0253SGavi Teitz 	struct mlx5_fc_bulk *bulk;
4925d8a0253SGavi Teitz 	int err = -ENOMEM;
4935d8a0253SGavi Teitz 	int bulk_len;
4945d8a0253SGavi Teitz 	u32 base_id;
4955d8a0253SGavi Teitz 	int i;
4965d8a0253SGavi Teitz 
4975d8a0253SGavi Teitz 	alloc_bitmask = MLX5_CAP_GEN(dev, flow_counter_bulk_alloc);
4985d8a0253SGavi Teitz 	bulk_len = alloc_bitmask > 0 ? MLX5_FC_BULK_NUM_FCS(alloc_bitmask) : 1;
4995d8a0253SGavi Teitz 
500*5cec6de0SMaor Dickman 	bulk = kvzalloc(sizeof(*bulk) + bulk_len * sizeof(struct mlx5_fc),
5015d8a0253SGavi Teitz 			GFP_KERNEL);
5025d8a0253SGavi Teitz 	if (!bulk)
5035d8a0253SGavi Teitz 		goto err_alloc_bulk;
5045d8a0253SGavi Teitz 
505*5cec6de0SMaor Dickman 	bulk->bitmask = kvcalloc(BITS_TO_LONGS(bulk_len), sizeof(unsigned long),
5065d8a0253SGavi Teitz 				 GFP_KERNEL);
5075d8a0253SGavi Teitz 	if (!bulk->bitmask)
5085d8a0253SGavi Teitz 		goto err_alloc_bitmask;
5095d8a0253SGavi Teitz 
5105d8a0253SGavi Teitz 	err = mlx5_cmd_fc_bulk_alloc(dev, alloc_bitmask, &base_id);
5115d8a0253SGavi Teitz 	if (err)
5125d8a0253SGavi Teitz 		goto err_mlx5_cmd_bulk_alloc;
5135d8a0253SGavi Teitz 
5145d8a0253SGavi Teitz 	bulk->base_id = base_id;
5155d8a0253SGavi Teitz 	bulk->bulk_len = bulk_len;
5165d8a0253SGavi Teitz 	for (i = 0; i < bulk_len; i++) {
5175d8a0253SGavi Teitz 		mlx5_fc_init(&bulk->fcs[i], bulk, base_id + i);
5185d8a0253SGavi Teitz 		set_bit(i, bulk->bitmask);
5195d8a0253SGavi Teitz 	}
5205d8a0253SGavi Teitz 
5215d8a0253SGavi Teitz 	return bulk;
5225d8a0253SGavi Teitz 
5235d8a0253SGavi Teitz err_mlx5_cmd_bulk_alloc:
524*5cec6de0SMaor Dickman 	kvfree(bulk->bitmask);
5255d8a0253SGavi Teitz err_alloc_bitmask:
526*5cec6de0SMaor Dickman 	kvfree(bulk);
5275d8a0253SGavi Teitz err_alloc_bulk:
5285d8a0253SGavi Teitz 	return ERR_PTR(err);
5295d8a0253SGavi Teitz }
5305d8a0253SGavi Teitz 
531558101f1SGavi Teitz static int
5325d8a0253SGavi Teitz mlx5_fc_bulk_destroy(struct mlx5_core_dev *dev, struct mlx5_fc_bulk *bulk)
5335d8a0253SGavi Teitz {
5345d8a0253SGavi Teitz 	if (mlx5_fc_bulk_get_free_fcs_amount(bulk) < bulk->bulk_len) {
5355d8a0253SGavi Teitz 		mlx5_core_err(dev, "Freeing bulk before all counters were released\n");
5365d8a0253SGavi Teitz 		return -EBUSY;
5375d8a0253SGavi Teitz 	}
5385d8a0253SGavi Teitz 
5395d8a0253SGavi Teitz 	mlx5_cmd_fc_free(dev, bulk->base_id);
540*5cec6de0SMaor Dickman 	kvfree(bulk->bitmask);
541*5cec6de0SMaor Dickman 	kvfree(bulk);
5425d8a0253SGavi Teitz 
5435d8a0253SGavi Teitz 	return 0;
5445d8a0253SGavi Teitz }
5455d8a0253SGavi Teitz 
546558101f1SGavi Teitz static struct mlx5_fc *mlx5_fc_bulk_acquire_fc(struct mlx5_fc_bulk *bulk)
5475d8a0253SGavi Teitz {
5485d8a0253SGavi Teitz 	int free_fc_index = find_first_bit(bulk->bitmask, bulk->bulk_len);
5495d8a0253SGavi Teitz 
5505d8a0253SGavi Teitz 	if (free_fc_index >= bulk->bulk_len)
5515d8a0253SGavi Teitz 		return ERR_PTR(-ENOSPC);
5525d8a0253SGavi Teitz 
5535d8a0253SGavi Teitz 	clear_bit(free_fc_index, bulk->bitmask);
5545d8a0253SGavi Teitz 	return &bulk->fcs[free_fc_index];
5555d8a0253SGavi Teitz }
5565d8a0253SGavi Teitz 
557558101f1SGavi Teitz static int mlx5_fc_bulk_release_fc(struct mlx5_fc_bulk *bulk, struct mlx5_fc *fc)
5585d8a0253SGavi Teitz {
5595d8a0253SGavi Teitz 	int fc_index = fc->id - bulk->base_id;
5605d8a0253SGavi Teitz 
5615d8a0253SGavi Teitz 	if (test_bit(fc_index, bulk->bitmask))
5625d8a0253SGavi Teitz 		return -EINVAL;
5635d8a0253SGavi Teitz 
5645d8a0253SGavi Teitz 	set_bit(fc_index, bulk->bitmask);
5655d8a0253SGavi Teitz 	return 0;
5665d8a0253SGavi Teitz }
567558101f1SGavi Teitz 
568558101f1SGavi Teitz /* Flow counters pool API */
569558101f1SGavi Teitz 
570558101f1SGavi Teitz static void mlx5_fc_pool_init(struct mlx5_fc_pool *fc_pool, struct mlx5_core_dev *dev)
571558101f1SGavi Teitz {
572558101f1SGavi Teitz 	fc_pool->dev = dev;
573558101f1SGavi Teitz 	mutex_init(&fc_pool->pool_lock);
574558101f1SGavi Teitz 	INIT_LIST_HEAD(&fc_pool->fully_used);
575558101f1SGavi Teitz 	INIT_LIST_HEAD(&fc_pool->partially_used);
576558101f1SGavi Teitz 	INIT_LIST_HEAD(&fc_pool->unused);
577558101f1SGavi Teitz 	fc_pool->available_fcs = 0;
578558101f1SGavi Teitz 	fc_pool->used_fcs = 0;
579558101f1SGavi Teitz 	fc_pool->threshold = 0;
580558101f1SGavi Teitz }
581558101f1SGavi Teitz 
582558101f1SGavi Teitz static void mlx5_fc_pool_cleanup(struct mlx5_fc_pool *fc_pool)
583558101f1SGavi Teitz {
584558101f1SGavi Teitz 	struct mlx5_core_dev *dev = fc_pool->dev;
585558101f1SGavi Teitz 	struct mlx5_fc_bulk *bulk;
586558101f1SGavi Teitz 	struct mlx5_fc_bulk *tmp;
587558101f1SGavi Teitz 
588558101f1SGavi Teitz 	list_for_each_entry_safe(bulk, tmp, &fc_pool->fully_used, pool_list)
589558101f1SGavi Teitz 		mlx5_fc_bulk_destroy(dev, bulk);
590558101f1SGavi Teitz 	list_for_each_entry_safe(bulk, tmp, &fc_pool->partially_used, pool_list)
591558101f1SGavi Teitz 		mlx5_fc_bulk_destroy(dev, bulk);
592558101f1SGavi Teitz 	list_for_each_entry_safe(bulk, tmp, &fc_pool->unused, pool_list)
593558101f1SGavi Teitz 		mlx5_fc_bulk_destroy(dev, bulk);
594558101f1SGavi Teitz }
595558101f1SGavi Teitz 
596558101f1SGavi Teitz static void mlx5_fc_pool_update_threshold(struct mlx5_fc_pool *fc_pool)
597558101f1SGavi Teitz {
598558101f1SGavi Teitz 	fc_pool->threshold = min_t(int, MLX5_FC_POOL_MAX_THRESHOLD,
599558101f1SGavi Teitz 				   fc_pool->used_fcs / MLX5_FC_POOL_USED_BUFF_RATIO);
600558101f1SGavi Teitz }
601558101f1SGavi Teitz 
602558101f1SGavi Teitz static struct mlx5_fc_bulk *
603558101f1SGavi Teitz mlx5_fc_pool_alloc_new_bulk(struct mlx5_fc_pool *fc_pool)
604558101f1SGavi Teitz {
605558101f1SGavi Teitz 	struct mlx5_core_dev *dev = fc_pool->dev;
606558101f1SGavi Teitz 	struct mlx5_fc_bulk *new_bulk;
607558101f1SGavi Teitz 
608558101f1SGavi Teitz 	new_bulk = mlx5_fc_bulk_create(dev);
609558101f1SGavi Teitz 	if (!IS_ERR(new_bulk))
610558101f1SGavi Teitz 		fc_pool->available_fcs += new_bulk->bulk_len;
611558101f1SGavi Teitz 	mlx5_fc_pool_update_threshold(fc_pool);
612558101f1SGavi Teitz 	return new_bulk;
613558101f1SGavi Teitz }
614558101f1SGavi Teitz 
615558101f1SGavi Teitz static void
616558101f1SGavi Teitz mlx5_fc_pool_free_bulk(struct mlx5_fc_pool *fc_pool, struct mlx5_fc_bulk *bulk)
617558101f1SGavi Teitz {
618558101f1SGavi Teitz 	struct mlx5_core_dev *dev = fc_pool->dev;
619558101f1SGavi Teitz 
620558101f1SGavi Teitz 	fc_pool->available_fcs -= bulk->bulk_len;
621558101f1SGavi Teitz 	mlx5_fc_bulk_destroy(dev, bulk);
622558101f1SGavi Teitz 	mlx5_fc_pool_update_threshold(fc_pool);
623558101f1SGavi Teitz }
624558101f1SGavi Teitz 
625558101f1SGavi Teitz static struct mlx5_fc *
626558101f1SGavi Teitz mlx5_fc_pool_acquire_from_list(struct list_head *src_list,
627558101f1SGavi Teitz 			       struct list_head *next_list,
628558101f1SGavi Teitz 			       bool move_non_full_bulk)
629558101f1SGavi Teitz {
630558101f1SGavi Teitz 	struct mlx5_fc_bulk *bulk;
631558101f1SGavi Teitz 	struct mlx5_fc *fc;
632558101f1SGavi Teitz 
633558101f1SGavi Teitz 	if (list_empty(src_list))
634558101f1SGavi Teitz 		return ERR_PTR(-ENODATA);
635558101f1SGavi Teitz 
636558101f1SGavi Teitz 	bulk = list_first_entry(src_list, struct mlx5_fc_bulk, pool_list);
637558101f1SGavi Teitz 	fc = mlx5_fc_bulk_acquire_fc(bulk);
638558101f1SGavi Teitz 	if (move_non_full_bulk || mlx5_fc_bulk_get_free_fcs_amount(bulk) == 0)
639558101f1SGavi Teitz 		list_move(&bulk->pool_list, next_list);
640558101f1SGavi Teitz 	return fc;
641558101f1SGavi Teitz }
642558101f1SGavi Teitz 
643558101f1SGavi Teitz static struct mlx5_fc *
644558101f1SGavi Teitz mlx5_fc_pool_acquire_counter(struct mlx5_fc_pool *fc_pool)
645558101f1SGavi Teitz {
646558101f1SGavi Teitz 	struct mlx5_fc_bulk *new_bulk;
647558101f1SGavi Teitz 	struct mlx5_fc *fc;
648558101f1SGavi Teitz 
649558101f1SGavi Teitz 	mutex_lock(&fc_pool->pool_lock);
650558101f1SGavi Teitz 
651558101f1SGavi Teitz 	fc = mlx5_fc_pool_acquire_from_list(&fc_pool->partially_used,
652558101f1SGavi Teitz 					    &fc_pool->fully_used, false);
653558101f1SGavi Teitz 	if (IS_ERR(fc))
654558101f1SGavi Teitz 		fc = mlx5_fc_pool_acquire_from_list(&fc_pool->unused,
655558101f1SGavi Teitz 						    &fc_pool->partially_used,
656558101f1SGavi Teitz 						    true);
657558101f1SGavi Teitz 	if (IS_ERR(fc)) {
658558101f1SGavi Teitz 		new_bulk = mlx5_fc_pool_alloc_new_bulk(fc_pool);
659558101f1SGavi Teitz 		if (IS_ERR(new_bulk)) {
660558101f1SGavi Teitz 			fc = ERR_CAST(new_bulk);
661558101f1SGavi Teitz 			goto out;
662558101f1SGavi Teitz 		}
663558101f1SGavi Teitz 		fc = mlx5_fc_bulk_acquire_fc(new_bulk);
664558101f1SGavi Teitz 		list_add(&new_bulk->pool_list, &fc_pool->partially_used);
665558101f1SGavi Teitz 	}
666558101f1SGavi Teitz 	fc_pool->available_fcs--;
667558101f1SGavi Teitz 	fc_pool->used_fcs++;
668558101f1SGavi Teitz 
669558101f1SGavi Teitz out:
670558101f1SGavi Teitz 	mutex_unlock(&fc_pool->pool_lock);
671558101f1SGavi Teitz 	return fc;
672558101f1SGavi Teitz }
673558101f1SGavi Teitz 
674558101f1SGavi Teitz static void
675558101f1SGavi Teitz mlx5_fc_pool_release_counter(struct mlx5_fc_pool *fc_pool, struct mlx5_fc *fc)
676558101f1SGavi Teitz {
677558101f1SGavi Teitz 	struct mlx5_core_dev *dev = fc_pool->dev;
678558101f1SGavi Teitz 	struct mlx5_fc_bulk *bulk = fc->bulk;
679558101f1SGavi Teitz 	int bulk_free_fcs_amount;
680558101f1SGavi Teitz 
681558101f1SGavi Teitz 	mutex_lock(&fc_pool->pool_lock);
682558101f1SGavi Teitz 
683558101f1SGavi Teitz 	if (mlx5_fc_bulk_release_fc(bulk, fc)) {
684558101f1SGavi Teitz 		mlx5_core_warn(dev, "Attempted to release a counter which is not acquired\n");
685558101f1SGavi Teitz 		goto unlock;
686558101f1SGavi Teitz 	}
687558101f1SGavi Teitz 
688558101f1SGavi Teitz 	fc_pool->available_fcs++;
689558101f1SGavi Teitz 	fc_pool->used_fcs--;
690558101f1SGavi Teitz 
691558101f1SGavi Teitz 	bulk_free_fcs_amount = mlx5_fc_bulk_get_free_fcs_amount(bulk);
692558101f1SGavi Teitz 	if (bulk_free_fcs_amount == 1)
693558101f1SGavi Teitz 		list_move_tail(&bulk->pool_list, &fc_pool->partially_used);
694558101f1SGavi Teitz 	if (bulk_free_fcs_amount == bulk->bulk_len) {
695558101f1SGavi Teitz 		list_del(&bulk->pool_list);
696558101f1SGavi Teitz 		if (fc_pool->available_fcs > fc_pool->threshold)
697558101f1SGavi Teitz 			mlx5_fc_pool_free_bulk(fc_pool, bulk);
698558101f1SGavi Teitz 		else
699558101f1SGavi Teitz 			list_add(&bulk->pool_list, &fc_pool->unused);
700558101f1SGavi Teitz 	}
701558101f1SGavi Teitz 
702558101f1SGavi Teitz unlock:
703558101f1SGavi Teitz 	mutex_unlock(&fc_pool->pool_lock);
704558101f1SGavi Teitz }
705