143a335e0SAmir Vadai /*
243a335e0SAmir Vadai  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
343a335e0SAmir Vadai  *
443a335e0SAmir Vadai  * This software is available to you under a choice of one of two
543a335e0SAmir Vadai  * licenses.  You may choose to be licensed under the terms of the GNU
643a335e0SAmir Vadai  * General Public License (GPL) Version 2, available from the file
743a335e0SAmir Vadai  * COPYING in the main directory of this source tree, or the
843a335e0SAmir Vadai  * OpenIB.org BSD license below:
943a335e0SAmir Vadai  *
1043a335e0SAmir Vadai  *     Redistribution and use in source and binary forms, with or
1143a335e0SAmir Vadai  *     without modification, are permitted provided that the following
1243a335e0SAmir Vadai  *     conditions are met:
1343a335e0SAmir Vadai  *
1443a335e0SAmir Vadai  *      - Redistributions of source code must retain the above
1543a335e0SAmir Vadai  *        copyright notice, this list of conditions and the following
1643a335e0SAmir Vadai  *        disclaimer.
1743a335e0SAmir Vadai  *
1843a335e0SAmir Vadai  *      - Redistributions in binary form must reproduce the above
1943a335e0SAmir Vadai  *        copyright notice, this list of conditions and the following
2043a335e0SAmir Vadai  *        disclaimer in the documentation and/or other materials
2143a335e0SAmir Vadai  *        provided with the distribution.
2243a335e0SAmir Vadai  *
2343a335e0SAmir Vadai  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
2443a335e0SAmir Vadai  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
2543a335e0SAmir Vadai  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
2643a335e0SAmir Vadai  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
2743a335e0SAmir Vadai  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
2843a335e0SAmir Vadai  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
2943a335e0SAmir Vadai  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
3043a335e0SAmir Vadai  * SOFTWARE.
3143a335e0SAmir Vadai  */
3243a335e0SAmir Vadai 
3343a335e0SAmir Vadai #include <linux/mlx5/driver.h>
3443a335e0SAmir Vadai #include <linux/mlx5/fs.h>
3529cc6679SAmir Vadai #include <linux/rbtree.h>
3643a335e0SAmir Vadai #include "mlx5_core.h"
3743a335e0SAmir Vadai #include "fs_core.h"
3843a335e0SAmir Vadai #include "fs_cmd.h"
3943a335e0SAmir Vadai 
4043a335e0SAmir Vadai #define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000)
41b247f32aSAvihai Horon #define MLX5_FC_BULK_QUERY_ALLOC_PERIOD msecs_to_jiffies(180 * 1000)
42a8ffcc74SRabie Loulou /* Max number of counters to query in bulk read is 32K */
43a8ffcc74SRabie Loulou #define MLX5_SW_MAX_COUNTERS_BULK BIT(15)
44b247f32aSAvihai Horon #define MLX5_INIT_COUNTERS_BULK 8
45558101f1SGavi Teitz #define MLX5_FC_POOL_MAX_THRESHOLD BIT(18)
46558101f1SGavi Teitz #define MLX5_FC_POOL_USED_BUFF_RATIO 10
4743a335e0SAmir Vadai 
487300375fSSaeed Mahameed struct mlx5_fc_cache {
497300375fSSaeed Mahameed 	u64 packets;
507300375fSSaeed Mahameed 	u64 bytes;
517300375fSSaeed Mahameed 	u64 lastuse;
527300375fSSaeed Mahameed };
537300375fSSaeed Mahameed 
547300375fSSaeed Mahameed struct mlx5_fc {
557300375fSSaeed Mahameed 	struct list_head list;
567300375fSSaeed Mahameed 	struct llist_node addlist;
577300375fSSaeed Mahameed 	struct llist_node dellist;
587300375fSSaeed Mahameed 
597300375fSSaeed Mahameed 	/* last{packets,bytes} members are used when calculating the delta since
607300375fSSaeed Mahameed 	 * last reading
617300375fSSaeed Mahameed 	 */
627300375fSSaeed Mahameed 	u64 lastpackets;
637300375fSSaeed Mahameed 	u64 lastbytes;
647300375fSSaeed Mahameed 
655d8a0253SGavi Teitz 	struct mlx5_fc_bulk *bulk;
667300375fSSaeed Mahameed 	u32 id;
677300375fSSaeed Mahameed 	bool aging;
687300375fSSaeed Mahameed 
697300375fSSaeed Mahameed 	struct mlx5_fc_cache cache ____cacheline_aligned_in_smp;
707300375fSSaeed Mahameed };
717300375fSSaeed Mahameed 
72558101f1SGavi Teitz static void mlx5_fc_pool_init(struct mlx5_fc_pool *fc_pool, struct mlx5_core_dev *dev);
73558101f1SGavi Teitz static void mlx5_fc_pool_cleanup(struct mlx5_fc_pool *fc_pool);
74558101f1SGavi Teitz static struct mlx5_fc *mlx5_fc_pool_acquire_counter(struct mlx5_fc_pool *fc_pool);
75558101f1SGavi Teitz static void mlx5_fc_pool_release_counter(struct mlx5_fc_pool *fc_pool, struct mlx5_fc *fc);
76558101f1SGavi Teitz 
7743a335e0SAmir Vadai /* locking scheme:
7843a335e0SAmir Vadai  *
7943a335e0SAmir Vadai  * It is the responsibility of the user to prevent concurrent calls or bad
8043a335e0SAmir Vadai  * ordering to mlx5_fc_create(), mlx5_fc_destroy() and accessing a reference
8143a335e0SAmir Vadai  * to struct mlx5_fc.
8243a335e0SAmir Vadai  * e.g en_tc.c is protected by RTNL lock of its caller, and will never call a
8343a335e0SAmir Vadai  * dump (access to struct mlx5_fc) after a counter is destroyed.
8443a335e0SAmir Vadai  *
8543a335e0SAmir Vadai  * access to counter list:
8643a335e0SAmir Vadai  * - create (user context)
8743a335e0SAmir Vadai  *   - mlx5_fc_create() only adds to an addlist to be used by
886f06e04bSGavi Teitz  *     mlx5_fc_stats_work(). addlist is a lockless single linked list
8983033688SVlad Buslov  *     that doesn't require any additional synchronization when adding single
9083033688SVlad Buslov  *     node.
9143a335e0SAmir Vadai  *   - spawn thread to do the actual destroy
9243a335e0SAmir Vadai  *
9343a335e0SAmir Vadai  * - destroy (user context)
946e5e2283SVlad Buslov  *   - add a counter to lockless dellist
9543a335e0SAmir Vadai  *   - spawn thread to do the actual del
9643a335e0SAmir Vadai  *
9743a335e0SAmir Vadai  * - dump (user context)
9843a335e0SAmir Vadai  *   user should not call dump after destroy
9943a335e0SAmir Vadai  *
10043a335e0SAmir Vadai  * - query (single thread workqueue context)
10143a335e0SAmir Vadai  *   destroy/dump - no conflict (see destroy)
10243a335e0SAmir Vadai  *   query/dump - packets and bytes might be inconsistent (since update is not
10343a335e0SAmir Vadai  *                atomic)
10443a335e0SAmir Vadai  *   query/create - no conflict (see create)
10543a335e0SAmir Vadai  *   since every create/destroy spawn the work, only after necessary time has
10643a335e0SAmir Vadai  *   elapsed, the thread will actually query the hardware.
10743a335e0SAmir Vadai  */
10843a335e0SAmir Vadai 
mlx5_fc_counters_lookup_next(struct mlx5_core_dev * dev,u32 id)1099aff93d7SVlad Buslov static struct list_head *mlx5_fc_counters_lookup_next(struct mlx5_core_dev *dev,
1109aff93d7SVlad Buslov 						      u32 id)
11129cc6679SAmir Vadai {
1129aff93d7SVlad Buslov 	struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
11312d6066cSVlad Buslov 	unsigned long next_id = (unsigned long)id + 1;
1149aff93d7SVlad Buslov 	struct mlx5_fc *counter;
115d39d7149SCong Wang 	unsigned long tmp;
11629cc6679SAmir Vadai 
11712d6066cSVlad Buslov 	rcu_read_lock();
11812d6066cSVlad Buslov 	/* skip counters that are in idr, but not yet in counters list */
119d39d7149SCong Wang 	idr_for_each_entry_continue_ul(&fc_stats->counters_idr,
120d39d7149SCong Wang 				       counter, tmp, next_id) {
121d39d7149SCong Wang 		if (!list_empty(&counter->list))
122d39d7149SCong Wang 			break;
123d39d7149SCong Wang 	}
12412d6066cSVlad Buslov 	rcu_read_unlock();
12529cc6679SAmir Vadai 
12612d6066cSVlad Buslov 	return counter ? &counter->list : &fc_stats->counters;
12729cc6679SAmir Vadai }
12829cc6679SAmir Vadai 
mlx5_fc_stats_insert(struct mlx5_core_dev * dev,struct mlx5_fc * counter)1299aff93d7SVlad Buslov static void mlx5_fc_stats_insert(struct mlx5_core_dev *dev,
1309aff93d7SVlad Buslov 				 struct mlx5_fc *counter)
1319aff93d7SVlad Buslov {
1329aff93d7SVlad Buslov 	struct list_head *next = mlx5_fc_counters_lookup_next(dev, counter->id);
1339aff93d7SVlad Buslov 
1349aff93d7SVlad Buslov 	list_add_tail(&counter->list, next);
13529cc6679SAmir Vadai }
13629cc6679SAmir Vadai 
mlx5_fc_stats_remove(struct mlx5_core_dev * dev,struct mlx5_fc * counter)1372a4c4298SVlad Buslov static void mlx5_fc_stats_remove(struct mlx5_core_dev *dev,
1382a4c4298SVlad Buslov 				 struct mlx5_fc *counter)
1392a4c4298SVlad Buslov {
1402a4c4298SVlad Buslov 	struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
1412a4c4298SVlad Buslov 
1422a4c4298SVlad Buslov 	list_del(&counter->list);
1432a4c4298SVlad Buslov 
1442a4c4298SVlad Buslov 	spin_lock(&fc_stats->counters_idr_lock);
1452a4c4298SVlad Buslov 	WARN_ON(!idr_remove(&fc_stats->counters_idr, counter->id));
1462a4c4298SVlad Buslov 	spin_unlock(&fc_stats->counters_idr_lock);
1472a4c4298SVlad Buslov }
1482a4c4298SVlad Buslov 
get_init_bulk_query_len(struct mlx5_core_dev * dev)149b247f32aSAvihai Horon static int get_init_bulk_query_len(struct mlx5_core_dev *dev)
150b247f32aSAvihai Horon {
151b247f32aSAvihai Horon 	return min_t(int, MLX5_INIT_COUNTERS_BULK,
152b247f32aSAvihai Horon 		     (1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk)));
153b247f32aSAvihai Horon }
154b247f32aSAvihai Horon 
get_max_bulk_query_len(struct mlx5_core_dev * dev)1556f06e04bSGavi Teitz static int get_max_bulk_query_len(struct mlx5_core_dev *dev)
1566f06e04bSGavi Teitz {
157b247f32aSAvihai Horon 	return min_t(int, MLX5_SW_MAX_COUNTERS_BULK,
1586f06e04bSGavi Teitz 		     (1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk)));
1596f06e04bSGavi Teitz }
1606f06e04bSGavi Teitz 
update_counter_cache(int index,u32 * bulk_raw_data,struct mlx5_fc_cache * cache)1616f06e04bSGavi Teitz static void update_counter_cache(int index, u32 *bulk_raw_data,
1626f06e04bSGavi Teitz 				 struct mlx5_fc_cache *cache)
1636f06e04bSGavi Teitz {
1646f06e04bSGavi Teitz 	void *stats = MLX5_ADDR_OF(query_flow_counter_out, bulk_raw_data,
1656f06e04bSGavi Teitz 			     flow_statistics[index]);
1666f06e04bSGavi Teitz 	u64 packets = MLX5_GET64(traffic_counter, stats, packets);
1676f06e04bSGavi Teitz 	u64 bytes = MLX5_GET64(traffic_counter, stats, octets);
1686f06e04bSGavi Teitz 
1696f06e04bSGavi Teitz 	if (cache->packets == packets)
1706f06e04bSGavi Teitz 		return;
1716f06e04bSGavi Teitz 
1726f06e04bSGavi Teitz 	cache->packets = packets;
1736f06e04bSGavi Teitz 	cache->bytes = bytes;
1746f06e04bSGavi Teitz 	cache->lastuse = jiffies;
1756f06e04bSGavi Teitz }
1766f06e04bSGavi Teitz 
mlx5_fc_stats_query_counter_range(struct mlx5_core_dev * dev,struct mlx5_fc * first,u32 last_id)1776f06e04bSGavi Teitz static void mlx5_fc_stats_query_counter_range(struct mlx5_core_dev *dev,
178a351a1b0SAmir Vadai 					      struct mlx5_fc *first,
179a8ffcc74SRabie Loulou 					      u32 last_id)
180a351a1b0SAmir Vadai {
1819aff93d7SVlad Buslov 	struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
1826f06e04bSGavi Teitz 	bool query_more_counters = (first->id <= last_id);
183b247f32aSAvihai Horon 	int cur_bulk_len = fc_stats->bulk_query_len;
1846f06e04bSGavi Teitz 	u32 *data = fc_stats->bulk_query_out;
1856f06e04bSGavi Teitz 	struct mlx5_fc *counter = first;
1866f06e04bSGavi Teitz 	u32 bulk_base_id;
1876f06e04bSGavi Teitz 	int bulk_len;
188a351a1b0SAmir Vadai 	int err;
189a8ffcc74SRabie Loulou 
1906f06e04bSGavi Teitz 	while (query_more_counters) {
191a351a1b0SAmir Vadai 		/* first id must be aligned to 4 when using bulk query */
1926f06e04bSGavi Teitz 		bulk_base_id = counter->id & ~0x3;
193a351a1b0SAmir Vadai 
194a351a1b0SAmir Vadai 		/* number of counters to query inc. the last counter */
195b247f32aSAvihai Horon 		bulk_len = min_t(int, cur_bulk_len,
1966f06e04bSGavi Teitz 				 ALIGN(last_id - bulk_base_id + 1, 4));
197a351a1b0SAmir Vadai 
1986f06e04bSGavi Teitz 		err = mlx5_cmd_fc_bulk_query(dev, bulk_base_id, bulk_len,
1996f06e04bSGavi Teitz 					     data);
200a351a1b0SAmir Vadai 		if (err) {
201a351a1b0SAmir Vadai 			mlx5_core_err(dev, "Error doing bulk query: %d\n", err);
2026f06e04bSGavi Teitz 			return;
203a351a1b0SAmir Vadai 		}
2046f06e04bSGavi Teitz 		query_more_counters = false;
205a351a1b0SAmir Vadai 
2069aff93d7SVlad Buslov 		list_for_each_entry_from(counter, &fc_stats->counters, list) {
2076f06e04bSGavi Teitz 			int counter_index = counter->id - bulk_base_id;
2086f06e04bSGavi Teitz 			struct mlx5_fc_cache *cache = &counter->cache;
209a351a1b0SAmir Vadai 
2106f06e04bSGavi Teitz 			if (counter->id >= bulk_base_id + bulk_len) {
2116f06e04bSGavi Teitz 				query_more_counters = true;
212a351a1b0SAmir Vadai 				break;
2139aff93d7SVlad Buslov 			}
214a351a1b0SAmir Vadai 
2156f06e04bSGavi Teitz 			update_counter_cache(counter_index, data, cache);
216a351a1b0SAmir Vadai 		}
2176f06e04bSGavi Teitz 	}
218a351a1b0SAmir Vadai }
219a351a1b0SAmir Vadai 
mlx5_fc_free(struct mlx5_core_dev * dev,struct mlx5_fc * counter)220558101f1SGavi Teitz static void mlx5_fc_free(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
22183033688SVlad Buslov {
22283033688SVlad Buslov 	mlx5_cmd_fc_free(dev, counter->id);
22383033688SVlad Buslov 	kfree(counter);
22483033688SVlad Buslov }
22583033688SVlad Buslov 
mlx5_fc_release(struct mlx5_core_dev * dev,struct mlx5_fc * counter)226558101f1SGavi Teitz static void mlx5_fc_release(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
227558101f1SGavi Teitz {
228558101f1SGavi Teitz 	struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
229558101f1SGavi Teitz 
230558101f1SGavi Teitz 	if (counter->bulk)
231558101f1SGavi Teitz 		mlx5_fc_pool_release_counter(&fc_stats->fc_pool, counter);
232558101f1SGavi Teitz 	else
233558101f1SGavi Teitz 		mlx5_fc_free(dev, counter);
234558101f1SGavi Teitz }
235558101f1SGavi Teitz 
mlx5_fc_stats_bulk_query_size_increase(struct mlx5_core_dev * dev)236b247f32aSAvihai Horon static void mlx5_fc_stats_bulk_query_size_increase(struct mlx5_core_dev *dev)
237b247f32aSAvihai Horon {
238b247f32aSAvihai Horon 	struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
239b247f32aSAvihai Horon 	int max_bulk_len = get_max_bulk_query_len(dev);
240b247f32aSAvihai Horon 	unsigned long now = jiffies;
241b247f32aSAvihai Horon 	u32 *bulk_query_out_tmp;
242b247f32aSAvihai Horon 	int max_out_len;
243b247f32aSAvihai Horon 
244b247f32aSAvihai Horon 	if (fc_stats->bulk_query_alloc_failed &&
245b247f32aSAvihai Horon 	    time_before(now, fc_stats->next_bulk_query_alloc))
246b247f32aSAvihai Horon 		return;
247b247f32aSAvihai Horon 
248b247f32aSAvihai Horon 	max_out_len = mlx5_cmd_fc_get_bulk_query_out_len(max_bulk_len);
249b247f32aSAvihai Horon 	bulk_query_out_tmp = kzalloc(max_out_len, GFP_KERNEL);
250b247f32aSAvihai Horon 	if (!bulk_query_out_tmp) {
251b247f32aSAvihai Horon 		mlx5_core_warn_once(dev,
252b247f32aSAvihai Horon 				    "Can't increase flow counters bulk query buffer size, insufficient memory, bulk_size(%d)\n",
253b247f32aSAvihai Horon 				    max_bulk_len);
254b247f32aSAvihai Horon 		fc_stats->bulk_query_alloc_failed = true;
255b247f32aSAvihai Horon 		fc_stats->next_bulk_query_alloc =
256b247f32aSAvihai Horon 			now + MLX5_FC_BULK_QUERY_ALLOC_PERIOD;
257b247f32aSAvihai Horon 		return;
258b247f32aSAvihai Horon 	}
259b247f32aSAvihai Horon 
260b247f32aSAvihai Horon 	kfree(fc_stats->bulk_query_out);
261b247f32aSAvihai Horon 	fc_stats->bulk_query_out = bulk_query_out_tmp;
262b247f32aSAvihai Horon 	fc_stats->bulk_query_len = max_bulk_len;
263b247f32aSAvihai Horon 	if (fc_stats->bulk_query_alloc_failed) {
264b247f32aSAvihai Horon 		mlx5_core_info(dev,
265b247f32aSAvihai Horon 			       "Flow counters bulk query buffer size increased, bulk_size(%d)\n",
266b247f32aSAvihai Horon 			       max_bulk_len);
267b247f32aSAvihai Horon 		fc_stats->bulk_query_alloc_failed = false;
268b247f32aSAvihai Horon 	}
269b247f32aSAvihai Horon }
270b247f32aSAvihai Horon 
mlx5_fc_stats_work(struct work_struct * work)27143a335e0SAmir Vadai static void mlx5_fc_stats_work(struct work_struct *work)
27243a335e0SAmir Vadai {
27343a335e0SAmir Vadai 	struct mlx5_core_dev *dev = container_of(work, struct mlx5_core_dev,
27443a335e0SAmir Vadai 						 priv.fc_stats.work.work);
27543a335e0SAmir Vadai 	struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
276fd330713SVlad Buslov 	/* Take dellist first to ensure that counters cannot be deleted before
277fd330713SVlad Buslov 	 * they are inserted.
278fd330713SVlad Buslov 	 */
279fd330713SVlad Buslov 	struct llist_node *dellist = llist_del_all(&fc_stats->dellist);
280fd330713SVlad Buslov 	struct llist_node *addlist = llist_del_all(&fc_stats->addlist);
2816e5e2283SVlad Buslov 	struct mlx5_fc *counter = NULL, *last = NULL, *tmp;
28243a335e0SAmir Vadai 	unsigned long now = jiffies;
28343a335e0SAmir Vadai 
284fd330713SVlad Buslov 	if (addlist || !list_empty(&fc_stats->counters))
285f6dfb4c3SHadar Hen Zion 		queue_delayed_work(fc_stats->wq, &fc_stats->work,
286f6dfb4c3SHadar Hen Zion 				   fc_stats->sampling_interval);
28743a335e0SAmir Vadai 
288b247f32aSAvihai Horon 	llist_for_each_entry(counter, addlist, addlist) {
2899aff93d7SVlad Buslov 		mlx5_fc_stats_insert(dev, counter);
290b247f32aSAvihai Horon 		fc_stats->num_counters++;
291b247f32aSAvihai Horon 	}
29229cc6679SAmir Vadai 
293fd330713SVlad Buslov 	llist_for_each_entry_safe(counter, tmp, dellist, dellist) {
2942a4c4298SVlad Buslov 		mlx5_fc_stats_remove(dev, counter);
29543a335e0SAmir Vadai 
296558101f1SGavi Teitz 		mlx5_fc_release(dev, counter);
297b247f32aSAvihai Horon 		fc_stats->num_counters--;
29843a335e0SAmir Vadai 	}
29943a335e0SAmir Vadai 
300b247f32aSAvihai Horon 	if (fc_stats->bulk_query_len < get_max_bulk_query_len(dev) &&
301b247f32aSAvihai Horon 	    fc_stats->num_counters > get_init_bulk_query_len(dev))
302b247f32aSAvihai Horon 		mlx5_fc_stats_bulk_query_size_increase(dev);
303b247f32aSAvihai Horon 
3049aff93d7SVlad Buslov 	if (time_before(now, fc_stats->next_query) ||
3059aff93d7SVlad Buslov 	    list_empty(&fc_stats->counters))
306a351a1b0SAmir Vadai 		return;
3079aff93d7SVlad Buslov 	last = list_last_entry(&fc_stats->counters, struct mlx5_fc, list);
30843a335e0SAmir Vadai 
3099aff93d7SVlad Buslov 	counter = list_first_entry(&fc_stats->counters, struct mlx5_fc,
3109aff93d7SVlad Buslov 				   list);
3116f06e04bSGavi Teitz 	if (counter)
3126f06e04bSGavi Teitz 		mlx5_fc_stats_query_counter_range(dev, counter, last->id);
31343a335e0SAmir Vadai 
314f6dfb4c3SHadar Hen Zion 	fc_stats->next_query = now + fc_stats->sampling_interval;
31543a335e0SAmir Vadai }
31643a335e0SAmir Vadai 
mlx5_fc_single_alloc(struct mlx5_core_dev * dev)317558101f1SGavi Teitz static struct mlx5_fc *mlx5_fc_single_alloc(struct mlx5_core_dev *dev)
31843a335e0SAmir Vadai {
31943a335e0SAmir Vadai 	struct mlx5_fc *counter;
32043a335e0SAmir Vadai 	int err;
32143a335e0SAmir Vadai 
32243a335e0SAmir Vadai 	counter = kzalloc(sizeof(*counter), GFP_KERNEL);
32343a335e0SAmir Vadai 	if (!counter)
32443a335e0SAmir Vadai 		return ERR_PTR(-ENOMEM);
32543a335e0SAmir Vadai 
32643a335e0SAmir Vadai 	err = mlx5_cmd_fc_alloc(dev, &counter->id);
327558101f1SGavi Teitz 	if (err) {
328558101f1SGavi Teitz 		kfree(counter);
329558101f1SGavi Teitz 		return ERR_PTR(err);
330558101f1SGavi Teitz 	}
331558101f1SGavi Teitz 
332558101f1SGavi Teitz 	return counter;
333558101f1SGavi Teitz }
334558101f1SGavi Teitz 
mlx5_fc_acquire(struct mlx5_core_dev * dev,bool aging)335558101f1SGavi Teitz static struct mlx5_fc *mlx5_fc_acquire(struct mlx5_core_dev *dev, bool aging)
336558101f1SGavi Teitz {
337558101f1SGavi Teitz 	struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
338558101f1SGavi Teitz 	struct mlx5_fc *counter;
339558101f1SGavi Teitz 
340558101f1SGavi Teitz 	if (aging && MLX5_CAP_GEN(dev, flow_counter_bulk_alloc) != 0) {
341558101f1SGavi Teitz 		counter = mlx5_fc_pool_acquire_counter(&fc_stats->fc_pool);
342558101f1SGavi Teitz 		if (!IS_ERR(counter))
343558101f1SGavi Teitz 			return counter;
344558101f1SGavi Teitz 	}
345558101f1SGavi Teitz 
346558101f1SGavi Teitz 	return mlx5_fc_single_alloc(dev);
347558101f1SGavi Teitz }
348558101f1SGavi Teitz 
mlx5_fc_create_ex(struct mlx5_core_dev * dev,bool aging)349504e1572SPaul Blakey struct mlx5_fc *mlx5_fc_create_ex(struct mlx5_core_dev *dev, bool aging)
350558101f1SGavi Teitz {
351558101f1SGavi Teitz 	struct mlx5_fc *counter = mlx5_fc_acquire(dev, aging);
352558101f1SGavi Teitz 	struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
353558101f1SGavi Teitz 	int err;
354558101f1SGavi Teitz 
355558101f1SGavi Teitz 	if (IS_ERR(counter))
356558101f1SGavi Teitz 		return counter;
357558101f1SGavi Teitz 
358558101f1SGavi Teitz 	INIT_LIST_HEAD(&counter->list);
359558101f1SGavi Teitz 	counter->aging = aging;
36043a335e0SAmir Vadai 
36143a335e0SAmir Vadai 	if (aging) {
36212d6066cSVlad Buslov 		u32 id = counter->id;
36312d6066cSVlad Buslov 
364e83d6955SPaul Blakey 		counter->cache.lastuse = jiffies;
365558101f1SGavi Teitz 		counter->lastbytes = counter->cache.bytes;
366558101f1SGavi Teitz 		counter->lastpackets = counter->cache.packets;
36743a335e0SAmir Vadai 
36812d6066cSVlad Buslov 		idr_preload(GFP_KERNEL);
36912d6066cSVlad Buslov 		spin_lock(&fc_stats->counters_idr_lock);
37012d6066cSVlad Buslov 
37112d6066cSVlad Buslov 		err = idr_alloc_u32(&fc_stats->counters_idr, counter, &id, id,
37212d6066cSVlad Buslov 				    GFP_NOWAIT);
37312d6066cSVlad Buslov 
37412d6066cSVlad Buslov 		spin_unlock(&fc_stats->counters_idr_lock);
37512d6066cSVlad Buslov 		idr_preload_end();
37612d6066cSVlad Buslov 		if (err)
37712d6066cSVlad Buslov 			goto err_out_alloc;
37812d6066cSVlad Buslov 
37983033688SVlad Buslov 		llist_add(&counter->addlist, &fc_stats->addlist);
38043a335e0SAmir Vadai 	}
38143a335e0SAmir Vadai 
38243a335e0SAmir Vadai 	return counter;
38343a335e0SAmir Vadai 
38412d6066cSVlad Buslov err_out_alloc:
385558101f1SGavi Teitz 	mlx5_fc_release(dev, counter);
38643a335e0SAmir Vadai 	return ERR_PTR(err);
38743a335e0SAmir Vadai }
388504e1572SPaul Blakey 
mlx5_fc_create(struct mlx5_core_dev * dev,bool aging)389504e1572SPaul Blakey struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging)
390504e1572SPaul Blakey {
391504e1572SPaul Blakey 	struct mlx5_fc *counter = mlx5_fc_create_ex(dev, aging);
392504e1572SPaul Blakey 	struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
393504e1572SPaul Blakey 
394504e1572SPaul Blakey 	if (aging)
395504e1572SPaul Blakey 		mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
396504e1572SPaul Blakey 	return counter;
397504e1572SPaul Blakey }
3985f9bf63aSRaed Salem EXPORT_SYMBOL(mlx5_fc_create);
39943a335e0SAmir Vadai 
mlx5_fc_id(struct mlx5_fc * counter)400171c7625SMark Bloch u32 mlx5_fc_id(struct mlx5_fc *counter)
401171c7625SMark Bloch {
402171c7625SMark Bloch 	return counter->id;
403171c7625SMark Bloch }
404171c7625SMark Bloch EXPORT_SYMBOL(mlx5_fc_id);
405171c7625SMark Bloch 
mlx5_fc_destroy(struct mlx5_core_dev * dev,struct mlx5_fc * counter)40643a335e0SAmir Vadai void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
40743a335e0SAmir Vadai {
40843a335e0SAmir Vadai 	struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
40943a335e0SAmir Vadai 
41043a335e0SAmir Vadai 	if (!counter)
41143a335e0SAmir Vadai 		return;
41243a335e0SAmir Vadai 
41343a335e0SAmir Vadai 	if (counter->aging) {
4146e5e2283SVlad Buslov 		llist_add(&counter->dellist, &fc_stats->dellist);
41543a335e0SAmir Vadai 		mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
41643a335e0SAmir Vadai 		return;
41743a335e0SAmir Vadai 	}
41843a335e0SAmir Vadai 
419558101f1SGavi Teitz 	mlx5_fc_release(dev, counter);
42043a335e0SAmir Vadai }
4215f9bf63aSRaed Salem EXPORT_SYMBOL(mlx5_fc_destroy);
42243a335e0SAmir Vadai 
mlx5_init_fc_stats(struct mlx5_core_dev * dev)42343a335e0SAmir Vadai int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
42443a335e0SAmir Vadai {
42543a335e0SAmir Vadai 	struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
426b247f32aSAvihai Horon 	int init_bulk_len;
427b247f32aSAvihai Horon 	int init_out_len;
42843a335e0SAmir Vadai 
42912d6066cSVlad Buslov 	spin_lock_init(&fc_stats->counters_idr_lock);
43012d6066cSVlad Buslov 	idr_init(&fc_stats->counters_idr);
4319aff93d7SVlad Buslov 	INIT_LIST_HEAD(&fc_stats->counters);
43283033688SVlad Buslov 	init_llist_head(&fc_stats->addlist);
4336e5e2283SVlad Buslov 	init_llist_head(&fc_stats->dellist);
43443a335e0SAmir Vadai 
435b247f32aSAvihai Horon 	init_bulk_len = get_init_bulk_query_len(dev);
436b247f32aSAvihai Horon 	init_out_len = mlx5_cmd_fc_get_bulk_query_out_len(init_bulk_len);
437b247f32aSAvihai Horon 	fc_stats->bulk_query_out = kzalloc(init_out_len, GFP_KERNEL);
4386f06e04bSGavi Teitz 	if (!fc_stats->bulk_query_out)
4396f06e04bSGavi Teitz 		return -ENOMEM;
440b247f32aSAvihai Horon 	fc_stats->bulk_query_len = init_bulk_len;
4416f06e04bSGavi Teitz 
44243a335e0SAmir Vadai 	fc_stats->wq = create_singlethread_workqueue("mlx5_fc");
44343a335e0SAmir Vadai 	if (!fc_stats->wq)
4446f06e04bSGavi Teitz 		goto err_wq_create;
44543a335e0SAmir Vadai 
446f6dfb4c3SHadar Hen Zion 	fc_stats->sampling_interval = MLX5_FC_STATS_PERIOD;
44743a335e0SAmir Vadai 	INIT_DELAYED_WORK(&fc_stats->work, mlx5_fc_stats_work);
44843a335e0SAmir Vadai 
449558101f1SGavi Teitz 	mlx5_fc_pool_init(&fc_stats->fc_pool, dev);
45043a335e0SAmir Vadai 	return 0;
4516f06e04bSGavi Teitz 
4526f06e04bSGavi Teitz err_wq_create:
4536f06e04bSGavi Teitz 	kfree(fc_stats->bulk_query_out);
4546f06e04bSGavi Teitz 	return -ENOMEM;
45543a335e0SAmir Vadai }
45643a335e0SAmir Vadai 
mlx5_cleanup_fc_stats(struct mlx5_core_dev * dev)45743a335e0SAmir Vadai void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
45843a335e0SAmir Vadai {
45943a335e0SAmir Vadai 	struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
46083033688SVlad Buslov 	struct llist_node *tmplist;
46143a335e0SAmir Vadai 	struct mlx5_fc *counter;
46243a335e0SAmir Vadai 	struct mlx5_fc *tmp;
46343a335e0SAmir Vadai 
46443a335e0SAmir Vadai 	cancel_delayed_work_sync(&dev->priv.fc_stats.work);
46543a335e0SAmir Vadai 	destroy_workqueue(dev->priv.fc_stats.wq);
46643a335e0SAmir Vadai 	dev->priv.fc_stats.wq = NULL;
46743a335e0SAmir Vadai 
46883033688SVlad Buslov 	tmplist = llist_del_all(&fc_stats->addlist);
46983033688SVlad Buslov 	llist_for_each_entry_safe(counter, tmp, tmplist, addlist)
470558101f1SGavi Teitz 		mlx5_fc_release(dev, counter);
47129cc6679SAmir Vadai 
4729aff93d7SVlad Buslov 	list_for_each_entry_safe(counter, tmp, &fc_stats->counters, list)
473558101f1SGavi Teitz 		mlx5_fc_release(dev, counter);
474b1b9f97aSGavi Teitz 
475b1b9f97aSGavi Teitz 	mlx5_fc_pool_cleanup(&fc_stats->fc_pool);
476b1b9f97aSGavi Teitz 	idr_destroy(&fc_stats->counters_idr);
477b1b9f97aSGavi Teitz 	kfree(fc_stats->bulk_query_out);
47829cc6679SAmir Vadai }
47943a335e0SAmir Vadai 
mlx5_fc_query(struct mlx5_core_dev * dev,struct mlx5_fc * counter,u64 * packets,u64 * bytes)480930821e3SOr Gerlitz int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter,
481b8a0dbe3SEugenia Emantayev 		  u64 *packets, u64 *bytes)
482b8a0dbe3SEugenia Emantayev {
483930821e3SOr Gerlitz 	return mlx5_cmd_fc_query(dev, counter->id, packets, bytes);
484b8a0dbe3SEugenia Emantayev }
4855f9bf63aSRaed Salem EXPORT_SYMBOL(mlx5_fc_query);
486b8a0dbe3SEugenia Emantayev 
mlx5_fc_query_lastuse(struct mlx5_fc * counter)48790bb7692SAriel Levkovich u64 mlx5_fc_query_lastuse(struct mlx5_fc *counter)
48890bb7692SAriel Levkovich {
48990bb7692SAriel Levkovich 	return counter->cache.lastuse;
49090bb7692SAriel Levkovich }
49190bb7692SAriel Levkovich 
mlx5_fc_query_cached(struct mlx5_fc * counter,u64 * bytes,u64 * packets,u64 * lastuse)49243a335e0SAmir Vadai void mlx5_fc_query_cached(struct mlx5_fc *counter,
49343a335e0SAmir Vadai 			  u64 *bytes, u64 *packets, u64 *lastuse)
49443a335e0SAmir Vadai {
49543a335e0SAmir Vadai 	struct mlx5_fc_cache c;
49643a335e0SAmir Vadai 
49743a335e0SAmir Vadai 	c = counter->cache;
49843a335e0SAmir Vadai 
49943a335e0SAmir Vadai 	*bytes = c.bytes - counter->lastbytes;
50043a335e0SAmir Vadai 	*packets = c.packets - counter->lastpackets;
50143a335e0SAmir Vadai 	*lastuse = c.lastuse;
50243a335e0SAmir Vadai 
50343a335e0SAmir Vadai 	counter->lastbytes = c.bytes;
50443a335e0SAmir Vadai 	counter->lastpackets = c.packets;
50543a335e0SAmir Vadai }
506f6dfb4c3SHadar Hen Zion 
mlx5_fc_query_cached_raw(struct mlx5_fc * counter,u64 * bytes,u64 * packets,u64 * lastuse)507*2b68d659SOz Shlomo void mlx5_fc_query_cached_raw(struct mlx5_fc *counter,
508*2b68d659SOz Shlomo 			      u64 *bytes, u64 *packets, u64 *lastuse)
509*2b68d659SOz Shlomo {
510*2b68d659SOz Shlomo 	struct mlx5_fc_cache c = counter->cache;
511*2b68d659SOz Shlomo 
512*2b68d659SOz Shlomo 	*bytes = c.bytes;
513*2b68d659SOz Shlomo 	*packets = c.packets;
514*2b68d659SOz Shlomo 	*lastuse = c.lastuse;
515*2b68d659SOz Shlomo }
516*2b68d659SOz Shlomo 
mlx5_fc_queue_stats_work(struct mlx5_core_dev * dev,struct delayed_work * dwork,unsigned long delay)517f6dfb4c3SHadar Hen Zion void mlx5_fc_queue_stats_work(struct mlx5_core_dev *dev,
518f6dfb4c3SHadar Hen Zion 			      struct delayed_work *dwork,
519f6dfb4c3SHadar Hen Zion 			      unsigned long delay)
520f6dfb4c3SHadar Hen Zion {
521f6dfb4c3SHadar Hen Zion 	struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
522f6dfb4c3SHadar Hen Zion 
523f6dfb4c3SHadar Hen Zion 	queue_delayed_work(fc_stats->wq, dwork, delay);
524f6dfb4c3SHadar Hen Zion }
525f6dfb4c3SHadar Hen Zion 
mlx5_fc_update_sampling_interval(struct mlx5_core_dev * dev,unsigned long interval)526f6dfb4c3SHadar Hen Zion void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev,
527f6dfb4c3SHadar Hen Zion 				      unsigned long interval)
528f6dfb4c3SHadar Hen Zion {
529f6dfb4c3SHadar Hen Zion 	struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
530f6dfb4c3SHadar Hen Zion 
531f6dfb4c3SHadar Hen Zion 	fc_stats->sampling_interval = min_t(unsigned long, interval,
532f6dfb4c3SHadar Hen Zion 					    fc_stats->sampling_interval);
533f6dfb4c3SHadar Hen Zion }
5345d8a0253SGavi Teitz 
5355d8a0253SGavi Teitz /* Flow counter bluks */
5365d8a0253SGavi Teitz 
5375d8a0253SGavi Teitz struct mlx5_fc_bulk {
538558101f1SGavi Teitz 	struct list_head pool_list;
5395d8a0253SGavi Teitz 	u32 base_id;
5405d8a0253SGavi Teitz 	int bulk_len;
5415d8a0253SGavi Teitz 	unsigned long *bitmask;
542339ffae5SGustavo A. R. Silva 	struct mlx5_fc fcs[];
5435d8a0253SGavi Teitz };
5445d8a0253SGavi Teitz 
mlx5_fc_init(struct mlx5_fc * counter,struct mlx5_fc_bulk * bulk,u32 id)545558101f1SGavi Teitz static void mlx5_fc_init(struct mlx5_fc *counter, struct mlx5_fc_bulk *bulk,
546558101f1SGavi Teitz 			 u32 id)
5475d8a0253SGavi Teitz {
5485d8a0253SGavi Teitz 	counter->bulk = bulk;
5495d8a0253SGavi Teitz 	counter->id = id;
5505d8a0253SGavi Teitz }
5515d8a0253SGavi Teitz 
mlx5_fc_bulk_get_free_fcs_amount(struct mlx5_fc_bulk * bulk)5525d8a0253SGavi Teitz static int mlx5_fc_bulk_get_free_fcs_amount(struct mlx5_fc_bulk *bulk)
5535d8a0253SGavi Teitz {
5545d8a0253SGavi Teitz 	return bitmap_weight(bulk->bitmask, bulk->bulk_len);
5555d8a0253SGavi Teitz }
5565d8a0253SGavi Teitz 
mlx5_fc_bulk_create(struct mlx5_core_dev * dev)557558101f1SGavi Teitz static struct mlx5_fc_bulk *mlx5_fc_bulk_create(struct mlx5_core_dev *dev)
5585d8a0253SGavi Teitz {
5595d8a0253SGavi Teitz 	enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask;
5605d8a0253SGavi Teitz 	struct mlx5_fc_bulk *bulk;
5615d8a0253SGavi Teitz 	int err = -ENOMEM;
5625d8a0253SGavi Teitz 	int bulk_len;
5635d8a0253SGavi Teitz 	u32 base_id;
5645d8a0253SGavi Teitz 	int i;
5655d8a0253SGavi Teitz 
5665d8a0253SGavi Teitz 	alloc_bitmask = MLX5_CAP_GEN(dev, flow_counter_bulk_alloc);
5675d8a0253SGavi Teitz 	bulk_len = alloc_bitmask > 0 ? MLX5_FC_BULK_NUM_FCS(alloc_bitmask) : 1;
5685d8a0253SGavi Teitz 
569ab9ace34SGustavo A. R. Silva 	bulk = kvzalloc(struct_size(bulk, fcs, bulk_len), GFP_KERNEL);
5705d8a0253SGavi Teitz 	if (!bulk)
5715d8a0253SGavi Teitz 		goto err_alloc_bulk;
5725d8a0253SGavi Teitz 
5735cec6de0SMaor Dickman 	bulk->bitmask = kvcalloc(BITS_TO_LONGS(bulk_len), sizeof(unsigned long),
5745d8a0253SGavi Teitz 				 GFP_KERNEL);
5755d8a0253SGavi Teitz 	if (!bulk->bitmask)
5765d8a0253SGavi Teitz 		goto err_alloc_bitmask;
5775d8a0253SGavi Teitz 
5785d8a0253SGavi Teitz 	err = mlx5_cmd_fc_bulk_alloc(dev, alloc_bitmask, &base_id);
5795d8a0253SGavi Teitz 	if (err)
5805d8a0253SGavi Teitz 		goto err_mlx5_cmd_bulk_alloc;
5815d8a0253SGavi Teitz 
5825d8a0253SGavi Teitz 	bulk->base_id = base_id;
5835d8a0253SGavi Teitz 	bulk->bulk_len = bulk_len;
5845d8a0253SGavi Teitz 	for (i = 0; i < bulk_len; i++) {
5855d8a0253SGavi Teitz 		mlx5_fc_init(&bulk->fcs[i], bulk, base_id + i);
5865d8a0253SGavi Teitz 		set_bit(i, bulk->bitmask);
5875d8a0253SGavi Teitz 	}
5885d8a0253SGavi Teitz 
5895d8a0253SGavi Teitz 	return bulk;
5905d8a0253SGavi Teitz 
5915d8a0253SGavi Teitz err_mlx5_cmd_bulk_alloc:
5925cec6de0SMaor Dickman 	kvfree(bulk->bitmask);
5935d8a0253SGavi Teitz err_alloc_bitmask:
5945cec6de0SMaor Dickman 	kvfree(bulk);
5955d8a0253SGavi Teitz err_alloc_bulk:
5965d8a0253SGavi Teitz 	return ERR_PTR(err);
5975d8a0253SGavi Teitz }
5985d8a0253SGavi Teitz 
599558101f1SGavi Teitz static int
mlx5_fc_bulk_destroy(struct mlx5_core_dev * dev,struct mlx5_fc_bulk * bulk)6005d8a0253SGavi Teitz mlx5_fc_bulk_destroy(struct mlx5_core_dev *dev, struct mlx5_fc_bulk *bulk)
6015d8a0253SGavi Teitz {
6025d8a0253SGavi Teitz 	if (mlx5_fc_bulk_get_free_fcs_amount(bulk) < bulk->bulk_len) {
6035d8a0253SGavi Teitz 		mlx5_core_err(dev, "Freeing bulk before all counters were released\n");
6045d8a0253SGavi Teitz 		return -EBUSY;
6055d8a0253SGavi Teitz 	}
6065d8a0253SGavi Teitz 
6075d8a0253SGavi Teitz 	mlx5_cmd_fc_free(dev, bulk->base_id);
6085cec6de0SMaor Dickman 	kvfree(bulk->bitmask);
6095cec6de0SMaor Dickman 	kvfree(bulk);
6105d8a0253SGavi Teitz 
6115d8a0253SGavi Teitz 	return 0;
6125d8a0253SGavi Teitz }
6135d8a0253SGavi Teitz 
mlx5_fc_bulk_acquire_fc(struct mlx5_fc_bulk * bulk)614558101f1SGavi Teitz static struct mlx5_fc *mlx5_fc_bulk_acquire_fc(struct mlx5_fc_bulk *bulk)
6155d8a0253SGavi Teitz {
6165d8a0253SGavi Teitz 	int free_fc_index = find_first_bit(bulk->bitmask, bulk->bulk_len);
6175d8a0253SGavi Teitz 
6185d8a0253SGavi Teitz 	if (free_fc_index >= bulk->bulk_len)
6195d8a0253SGavi Teitz 		return ERR_PTR(-ENOSPC);
6205d8a0253SGavi Teitz 
6215d8a0253SGavi Teitz 	clear_bit(free_fc_index, bulk->bitmask);
6225d8a0253SGavi Teitz 	return &bulk->fcs[free_fc_index];
6235d8a0253SGavi Teitz }
6245d8a0253SGavi Teitz 
mlx5_fc_bulk_release_fc(struct mlx5_fc_bulk * bulk,struct mlx5_fc * fc)625558101f1SGavi Teitz static int mlx5_fc_bulk_release_fc(struct mlx5_fc_bulk *bulk, struct mlx5_fc *fc)
6265d8a0253SGavi Teitz {
6275d8a0253SGavi Teitz 	int fc_index = fc->id - bulk->base_id;
6285d8a0253SGavi Teitz 
6295d8a0253SGavi Teitz 	if (test_bit(fc_index, bulk->bitmask))
6305d8a0253SGavi Teitz 		return -EINVAL;
6315d8a0253SGavi Teitz 
6325d8a0253SGavi Teitz 	set_bit(fc_index, bulk->bitmask);
6335d8a0253SGavi Teitz 	return 0;
6345d8a0253SGavi Teitz }
635558101f1SGavi Teitz 
636558101f1SGavi Teitz /* Flow counters pool API */
637558101f1SGavi Teitz 
mlx5_fc_pool_init(struct mlx5_fc_pool * fc_pool,struct mlx5_core_dev * dev)638558101f1SGavi Teitz static void mlx5_fc_pool_init(struct mlx5_fc_pool *fc_pool, struct mlx5_core_dev *dev)
639558101f1SGavi Teitz {
640558101f1SGavi Teitz 	fc_pool->dev = dev;
641558101f1SGavi Teitz 	mutex_init(&fc_pool->pool_lock);
642558101f1SGavi Teitz 	INIT_LIST_HEAD(&fc_pool->fully_used);
643558101f1SGavi Teitz 	INIT_LIST_HEAD(&fc_pool->partially_used);
644558101f1SGavi Teitz 	INIT_LIST_HEAD(&fc_pool->unused);
645558101f1SGavi Teitz 	fc_pool->available_fcs = 0;
646558101f1SGavi Teitz 	fc_pool->used_fcs = 0;
647558101f1SGavi Teitz 	fc_pool->threshold = 0;
648558101f1SGavi Teitz }
649558101f1SGavi Teitz 
mlx5_fc_pool_cleanup(struct mlx5_fc_pool * fc_pool)650558101f1SGavi Teitz static void mlx5_fc_pool_cleanup(struct mlx5_fc_pool *fc_pool)
651558101f1SGavi Teitz {
652558101f1SGavi Teitz 	struct mlx5_core_dev *dev = fc_pool->dev;
653558101f1SGavi Teitz 	struct mlx5_fc_bulk *bulk;
654558101f1SGavi Teitz 	struct mlx5_fc_bulk *tmp;
655558101f1SGavi Teitz 
656558101f1SGavi Teitz 	list_for_each_entry_safe(bulk, tmp, &fc_pool->fully_used, pool_list)
657558101f1SGavi Teitz 		mlx5_fc_bulk_destroy(dev, bulk);
658558101f1SGavi Teitz 	list_for_each_entry_safe(bulk, tmp, &fc_pool->partially_used, pool_list)
659558101f1SGavi Teitz 		mlx5_fc_bulk_destroy(dev, bulk);
660558101f1SGavi Teitz 	list_for_each_entry_safe(bulk, tmp, &fc_pool->unused, pool_list)
661558101f1SGavi Teitz 		mlx5_fc_bulk_destroy(dev, bulk);
662558101f1SGavi Teitz }
663558101f1SGavi Teitz 
mlx5_fc_pool_update_threshold(struct mlx5_fc_pool * fc_pool)664558101f1SGavi Teitz static void mlx5_fc_pool_update_threshold(struct mlx5_fc_pool *fc_pool)
665558101f1SGavi Teitz {
666558101f1SGavi Teitz 	fc_pool->threshold = min_t(int, MLX5_FC_POOL_MAX_THRESHOLD,
667558101f1SGavi Teitz 				   fc_pool->used_fcs / MLX5_FC_POOL_USED_BUFF_RATIO);
668558101f1SGavi Teitz }
669558101f1SGavi Teitz 
670558101f1SGavi Teitz static struct mlx5_fc_bulk *
mlx5_fc_pool_alloc_new_bulk(struct mlx5_fc_pool * fc_pool)671558101f1SGavi Teitz mlx5_fc_pool_alloc_new_bulk(struct mlx5_fc_pool *fc_pool)
672558101f1SGavi Teitz {
673558101f1SGavi Teitz 	struct mlx5_core_dev *dev = fc_pool->dev;
674558101f1SGavi Teitz 	struct mlx5_fc_bulk *new_bulk;
675558101f1SGavi Teitz 
676558101f1SGavi Teitz 	new_bulk = mlx5_fc_bulk_create(dev);
677558101f1SGavi Teitz 	if (!IS_ERR(new_bulk))
678558101f1SGavi Teitz 		fc_pool->available_fcs += new_bulk->bulk_len;
679558101f1SGavi Teitz 	mlx5_fc_pool_update_threshold(fc_pool);
680558101f1SGavi Teitz 	return new_bulk;
681558101f1SGavi Teitz }
682558101f1SGavi Teitz 
683558101f1SGavi Teitz static void
mlx5_fc_pool_free_bulk(struct mlx5_fc_pool * fc_pool,struct mlx5_fc_bulk * bulk)684558101f1SGavi Teitz mlx5_fc_pool_free_bulk(struct mlx5_fc_pool *fc_pool, struct mlx5_fc_bulk *bulk)
685558101f1SGavi Teitz {
686558101f1SGavi Teitz 	struct mlx5_core_dev *dev = fc_pool->dev;
687558101f1SGavi Teitz 
688558101f1SGavi Teitz 	fc_pool->available_fcs -= bulk->bulk_len;
689558101f1SGavi Teitz 	mlx5_fc_bulk_destroy(dev, bulk);
690558101f1SGavi Teitz 	mlx5_fc_pool_update_threshold(fc_pool);
691558101f1SGavi Teitz }
692558101f1SGavi Teitz 
693558101f1SGavi Teitz static struct mlx5_fc *
mlx5_fc_pool_acquire_from_list(struct list_head * src_list,struct list_head * next_list,bool move_non_full_bulk)694558101f1SGavi Teitz mlx5_fc_pool_acquire_from_list(struct list_head *src_list,
695558101f1SGavi Teitz 			       struct list_head *next_list,
696558101f1SGavi Teitz 			       bool move_non_full_bulk)
697558101f1SGavi Teitz {
698558101f1SGavi Teitz 	struct mlx5_fc_bulk *bulk;
699558101f1SGavi Teitz 	struct mlx5_fc *fc;
700558101f1SGavi Teitz 
701558101f1SGavi Teitz 	if (list_empty(src_list))
702558101f1SGavi Teitz 		return ERR_PTR(-ENODATA);
703558101f1SGavi Teitz 
704558101f1SGavi Teitz 	bulk = list_first_entry(src_list, struct mlx5_fc_bulk, pool_list);
705558101f1SGavi Teitz 	fc = mlx5_fc_bulk_acquire_fc(bulk);
706558101f1SGavi Teitz 	if (move_non_full_bulk || mlx5_fc_bulk_get_free_fcs_amount(bulk) == 0)
707558101f1SGavi Teitz 		list_move(&bulk->pool_list, next_list);
708558101f1SGavi Teitz 	return fc;
709558101f1SGavi Teitz }
710558101f1SGavi Teitz 
711558101f1SGavi Teitz static struct mlx5_fc *
mlx5_fc_pool_acquire_counter(struct mlx5_fc_pool * fc_pool)712558101f1SGavi Teitz mlx5_fc_pool_acquire_counter(struct mlx5_fc_pool *fc_pool)
713558101f1SGavi Teitz {
714558101f1SGavi Teitz 	struct mlx5_fc_bulk *new_bulk;
715558101f1SGavi Teitz 	struct mlx5_fc *fc;
716558101f1SGavi Teitz 
717558101f1SGavi Teitz 	mutex_lock(&fc_pool->pool_lock);
718558101f1SGavi Teitz 
719558101f1SGavi Teitz 	fc = mlx5_fc_pool_acquire_from_list(&fc_pool->partially_used,
720558101f1SGavi Teitz 					    &fc_pool->fully_used, false);
721558101f1SGavi Teitz 	if (IS_ERR(fc))
722558101f1SGavi Teitz 		fc = mlx5_fc_pool_acquire_from_list(&fc_pool->unused,
723558101f1SGavi Teitz 						    &fc_pool->partially_used,
724558101f1SGavi Teitz 						    true);
725558101f1SGavi Teitz 	if (IS_ERR(fc)) {
726558101f1SGavi Teitz 		new_bulk = mlx5_fc_pool_alloc_new_bulk(fc_pool);
727558101f1SGavi Teitz 		if (IS_ERR(new_bulk)) {
728558101f1SGavi Teitz 			fc = ERR_CAST(new_bulk);
729558101f1SGavi Teitz 			goto out;
730558101f1SGavi Teitz 		}
731558101f1SGavi Teitz 		fc = mlx5_fc_bulk_acquire_fc(new_bulk);
732558101f1SGavi Teitz 		list_add(&new_bulk->pool_list, &fc_pool->partially_used);
733558101f1SGavi Teitz 	}
734558101f1SGavi Teitz 	fc_pool->available_fcs--;
735558101f1SGavi Teitz 	fc_pool->used_fcs++;
736558101f1SGavi Teitz 
737558101f1SGavi Teitz out:
738558101f1SGavi Teitz 	mutex_unlock(&fc_pool->pool_lock);
739558101f1SGavi Teitz 	return fc;
740558101f1SGavi Teitz }
741558101f1SGavi Teitz 
742558101f1SGavi Teitz static void
mlx5_fc_pool_release_counter(struct mlx5_fc_pool * fc_pool,struct mlx5_fc * fc)743558101f1SGavi Teitz mlx5_fc_pool_release_counter(struct mlx5_fc_pool *fc_pool, struct mlx5_fc *fc)
744558101f1SGavi Teitz {
745558101f1SGavi Teitz 	struct mlx5_core_dev *dev = fc_pool->dev;
746558101f1SGavi Teitz 	struct mlx5_fc_bulk *bulk = fc->bulk;
747558101f1SGavi Teitz 	int bulk_free_fcs_amount;
748558101f1SGavi Teitz 
749558101f1SGavi Teitz 	mutex_lock(&fc_pool->pool_lock);
750558101f1SGavi Teitz 
751558101f1SGavi Teitz 	if (mlx5_fc_bulk_release_fc(bulk, fc)) {
752558101f1SGavi Teitz 		mlx5_core_warn(dev, "Attempted to release a counter which is not acquired\n");
753558101f1SGavi Teitz 		goto unlock;
754558101f1SGavi Teitz 	}
755558101f1SGavi Teitz 
756558101f1SGavi Teitz 	fc_pool->available_fcs++;
757558101f1SGavi Teitz 	fc_pool->used_fcs--;
758558101f1SGavi Teitz 
759558101f1SGavi Teitz 	bulk_free_fcs_amount = mlx5_fc_bulk_get_free_fcs_amount(bulk);
760558101f1SGavi Teitz 	if (bulk_free_fcs_amount == 1)
761558101f1SGavi Teitz 		list_move_tail(&bulk->pool_list, &fc_pool->partially_used);
762558101f1SGavi Teitz 	if (bulk_free_fcs_amount == bulk->bulk_len) {
763558101f1SGavi Teitz 		list_del(&bulk->pool_list);
764558101f1SGavi Teitz 		if (fc_pool->available_fcs > fc_pool->threshold)
765558101f1SGavi Teitz 			mlx5_fc_pool_free_bulk(fc_pool, bulk);
766558101f1SGavi Teitz 		else
767558101f1SGavi Teitz 			list_add(&bulk->pool_list, &fc_pool->unused);
768558101f1SGavi Teitz 	}
769558101f1SGavi Teitz 
770558101f1SGavi Teitz unlock:
771558101f1SGavi Teitz 	mutex_unlock(&fc_pool->pool_lock);
772558101f1SGavi Teitz }
773