143a335e0SAmir Vadai /*
243a335e0SAmir Vadai  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
343a335e0SAmir Vadai  *
443a335e0SAmir Vadai  * This software is available to you under a choice of one of two
543a335e0SAmir Vadai  * licenses.  You may choose to be licensed under the terms of the GNU
643a335e0SAmir Vadai  * General Public License (GPL) Version 2, available from the file
743a335e0SAmir Vadai  * COPYING in the main directory of this source tree, or the
843a335e0SAmir Vadai  * OpenIB.org BSD license below:
943a335e0SAmir Vadai  *
1043a335e0SAmir Vadai  *     Redistribution and use in source and binary forms, with or
1143a335e0SAmir Vadai  *     without modification, are permitted provided that the following
1243a335e0SAmir Vadai  *     conditions are met:
1343a335e0SAmir Vadai  *
1443a335e0SAmir Vadai  *      - Redistributions of source code must retain the above
1543a335e0SAmir Vadai  *        copyright notice, this list of conditions and the following
1643a335e0SAmir Vadai  *        disclaimer.
1743a335e0SAmir Vadai  *
1843a335e0SAmir Vadai  *      - Redistributions in binary form must reproduce the above
1943a335e0SAmir Vadai  *        copyright notice, this list of conditions and the following
2043a335e0SAmir Vadai  *        disclaimer in the documentation and/or other materials
2143a335e0SAmir Vadai  *        provided with the distribution.
2243a335e0SAmir Vadai  *
2343a335e0SAmir Vadai  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
2443a335e0SAmir Vadai  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
2543a335e0SAmir Vadai  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
2643a335e0SAmir Vadai  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
2743a335e0SAmir Vadai  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
2843a335e0SAmir Vadai  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
2943a335e0SAmir Vadai  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
3043a335e0SAmir Vadai  * SOFTWARE.
3143a335e0SAmir Vadai  */
3243a335e0SAmir Vadai 
3343a335e0SAmir Vadai #include <linux/mlx5/driver.h>
3443a335e0SAmir Vadai #include <linux/mlx5/fs.h>
3529cc6679SAmir Vadai #include <linux/rbtree.h>
3643a335e0SAmir Vadai #include "mlx5_core.h"
3743a335e0SAmir Vadai #include "fs_core.h"
3843a335e0SAmir Vadai #include "fs_cmd.h"
3943a335e0SAmir Vadai 
4043a335e0SAmir Vadai #define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000)
41a8ffcc74SRabie Loulou /* Max number of counters to query in bulk read is 32K */
42a8ffcc74SRabie Loulou #define MLX5_SW_MAX_COUNTERS_BULK BIT(15)
4343a335e0SAmir Vadai 
4443a335e0SAmir Vadai /* locking scheme:
4543a335e0SAmir Vadai  *
4643a335e0SAmir Vadai  * It is the responsibility of the user to prevent concurrent calls or bad
4743a335e0SAmir Vadai  * ordering to mlx5_fc_create(), mlx5_fc_destroy() and accessing a reference
4843a335e0SAmir Vadai  * to struct mlx5_fc.
4943a335e0SAmir Vadai  * e.g en_tc.c is protected by RTNL lock of its caller, and will never call a
5043a335e0SAmir Vadai  * dump (access to struct mlx5_fc) after a counter is destroyed.
5143a335e0SAmir Vadai  *
5243a335e0SAmir Vadai  * access to counter list:
5343a335e0SAmir Vadai  * - create (user context)
5443a335e0SAmir Vadai  *   - mlx5_fc_create() only adds to an addlist to be used by
5583033688SVlad Buslov  *     mlx5_fc_stats_query_work(). addlist is a lockless single linked list
5683033688SVlad Buslov  *     that doesn't require any additional synchronization when adding single
5783033688SVlad Buslov  *     node.
5843a335e0SAmir Vadai  *   - spawn thread to do the actual destroy
5943a335e0SAmir Vadai  *
6043a335e0SAmir Vadai  * - destroy (user context)
6143a335e0SAmir Vadai  *   - mark a counter as deleted
6243a335e0SAmir Vadai  *   - spawn thread to do the actual del
6343a335e0SAmir Vadai  *
6443a335e0SAmir Vadai  * - dump (user context)
6543a335e0SAmir Vadai  *   user should not call dump after destroy
6643a335e0SAmir Vadai  *
6743a335e0SAmir Vadai  * - query (single thread workqueue context)
6843a335e0SAmir Vadai  *   destroy/dump - no conflict (see destroy)
6943a335e0SAmir Vadai  *   query/dump - packets and bytes might be inconsistent (since update is not
7043a335e0SAmir Vadai  *                atomic)
7143a335e0SAmir Vadai  *   query/create - no conflict (see create)
7243a335e0SAmir Vadai  *   since every create/destroy spawn the work, only after necessary time has
7343a335e0SAmir Vadai  *   elapsed, the thread will actually query the hardware.
7443a335e0SAmir Vadai  */
7543a335e0SAmir Vadai 
7629cc6679SAmir Vadai static void mlx5_fc_stats_insert(struct rb_root *root, struct mlx5_fc *counter)
7729cc6679SAmir Vadai {
7829cc6679SAmir Vadai 	struct rb_node **new = &root->rb_node;
7929cc6679SAmir Vadai 	struct rb_node *parent = NULL;
8029cc6679SAmir Vadai 
8129cc6679SAmir Vadai 	while (*new) {
82f7fb1383SGeliang Tang 		struct mlx5_fc *this = rb_entry(*new, struct mlx5_fc, node);
8329cc6679SAmir Vadai 		int result = counter->id - this->id;
8429cc6679SAmir Vadai 
8529cc6679SAmir Vadai 		parent = *new;
8629cc6679SAmir Vadai 		if (result < 0)
8729cc6679SAmir Vadai 			new = &((*new)->rb_left);
8829cc6679SAmir Vadai 		else
8929cc6679SAmir Vadai 			new = &((*new)->rb_right);
9029cc6679SAmir Vadai 	}
9129cc6679SAmir Vadai 
9229cc6679SAmir Vadai 	/* Add new node and rebalance tree. */
9329cc6679SAmir Vadai 	rb_link_node(&counter->node, parent, new);
9429cc6679SAmir Vadai 	rb_insert_color(&counter->node, root);
9529cc6679SAmir Vadai }
9629cc6679SAmir Vadai 
97a8ffcc74SRabie Loulou /* The function returns the last node that was queried so the caller
98a8ffcc74SRabie Loulou  * function can continue calling it till all counters are queried.
99a8ffcc74SRabie Loulou  */
100a351a1b0SAmir Vadai static struct rb_node *mlx5_fc_stats_query(struct mlx5_core_dev *dev,
101a351a1b0SAmir Vadai 					   struct mlx5_fc *first,
102a8ffcc74SRabie Loulou 					   u32 last_id)
103a351a1b0SAmir Vadai {
104a351a1b0SAmir Vadai 	struct mlx5_cmd_fc_bulk *b;
105a351a1b0SAmir Vadai 	struct rb_node *node = NULL;
106a8ffcc74SRabie Loulou 	u32 afirst_id;
107a351a1b0SAmir Vadai 	int num;
108a351a1b0SAmir Vadai 	int err;
109a8ffcc74SRabie Loulou 
110a8ffcc74SRabie Loulou 	int max_bulk = min_t(int, MLX5_SW_MAX_COUNTERS_BULK,
111a8ffcc74SRabie Loulou 			     (1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk)));
112a351a1b0SAmir Vadai 
113a351a1b0SAmir Vadai 	/* first id must be aligned to 4 when using bulk query */
114a351a1b0SAmir Vadai 	afirst_id = first->id & ~0x3;
115a351a1b0SAmir Vadai 
116a351a1b0SAmir Vadai 	/* number of counters to query inc. the last counter */
117a351a1b0SAmir Vadai 	num = ALIGN(last_id - afirst_id + 1, 4);
118a351a1b0SAmir Vadai 	if (num > max_bulk) {
119a351a1b0SAmir Vadai 		num = max_bulk;
120a351a1b0SAmir Vadai 		last_id = afirst_id + num - 1;
121a351a1b0SAmir Vadai 	}
122a351a1b0SAmir Vadai 
123a351a1b0SAmir Vadai 	b = mlx5_cmd_fc_bulk_alloc(dev, afirst_id, num);
124a351a1b0SAmir Vadai 	if (!b) {
125a351a1b0SAmir Vadai 		mlx5_core_err(dev, "Error allocating resources for bulk query\n");
126a351a1b0SAmir Vadai 		return NULL;
127a351a1b0SAmir Vadai 	}
128a351a1b0SAmir Vadai 
129a351a1b0SAmir Vadai 	err = mlx5_cmd_fc_bulk_query(dev, b);
130a351a1b0SAmir Vadai 	if (err) {
131a351a1b0SAmir Vadai 		mlx5_core_err(dev, "Error doing bulk query: %d\n", err);
132a351a1b0SAmir Vadai 		goto out;
133a351a1b0SAmir Vadai 	}
134a351a1b0SAmir Vadai 
135a351a1b0SAmir Vadai 	for (node = &first->node; node; node = rb_next(node)) {
136a351a1b0SAmir Vadai 		struct mlx5_fc *counter = rb_entry(node, struct mlx5_fc, node);
137a351a1b0SAmir Vadai 		struct mlx5_fc_cache *c = &counter->cache;
1386c3b4f90SAmir Vadai 		u64 packets;
1396c3b4f90SAmir Vadai 		u64 bytes;
140a351a1b0SAmir Vadai 
141a351a1b0SAmir Vadai 		if (counter->id > last_id)
142a351a1b0SAmir Vadai 			break;
143a351a1b0SAmir Vadai 
144a351a1b0SAmir Vadai 		mlx5_cmd_fc_bulk_get(dev, b,
1456c3b4f90SAmir Vadai 				     counter->id, &packets, &bytes);
1466c3b4f90SAmir Vadai 
1476c3b4f90SAmir Vadai 		if (c->packets == packets)
1486c3b4f90SAmir Vadai 			continue;
1496c3b4f90SAmir Vadai 
1506c3b4f90SAmir Vadai 		c->packets = packets;
1516c3b4f90SAmir Vadai 		c->bytes = bytes;
1526c3b4f90SAmir Vadai 		c->lastuse = jiffies;
153a351a1b0SAmir Vadai 	}
154a351a1b0SAmir Vadai 
155a351a1b0SAmir Vadai out:
156a351a1b0SAmir Vadai 	mlx5_cmd_fc_bulk_free(b);
157a351a1b0SAmir Vadai 
158a351a1b0SAmir Vadai 	return node;
159a351a1b0SAmir Vadai }
160a351a1b0SAmir Vadai 
16183033688SVlad Buslov static void mlx5_free_fc(struct mlx5_core_dev *dev,
16283033688SVlad Buslov 			 struct mlx5_fc *counter)
16383033688SVlad Buslov {
16483033688SVlad Buslov 	mlx5_cmd_fc_free(dev, counter->id);
16583033688SVlad Buslov 	kfree(counter);
16683033688SVlad Buslov }
16783033688SVlad Buslov 
16843a335e0SAmir Vadai static void mlx5_fc_stats_work(struct work_struct *work)
16943a335e0SAmir Vadai {
17043a335e0SAmir Vadai 	struct mlx5_core_dev *dev = container_of(work, struct mlx5_core_dev,
17143a335e0SAmir Vadai 						 priv.fc_stats.work.work);
17243a335e0SAmir Vadai 	struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
17383033688SVlad Buslov 	struct llist_node *tmplist = llist_del_all(&fc_stats->addlist);
17443a335e0SAmir Vadai 	unsigned long now = jiffies;
175a351a1b0SAmir Vadai 	struct mlx5_fc *counter = NULL;
176a351a1b0SAmir Vadai 	struct mlx5_fc *last = NULL;
17729cc6679SAmir Vadai 	struct rb_node *node;
17843a335e0SAmir Vadai 
17983033688SVlad Buslov 	if (tmplist || !RB_EMPTY_ROOT(&fc_stats->counters))
180f6dfb4c3SHadar Hen Zion 		queue_delayed_work(fc_stats->wq, &fc_stats->work,
181f6dfb4c3SHadar Hen Zion 				   fc_stats->sampling_interval);
18243a335e0SAmir Vadai 
18383033688SVlad Buslov 	llist_for_each_entry(counter, tmplist, addlist)
18429cc6679SAmir Vadai 		mlx5_fc_stats_insert(&fc_stats->counters, counter);
18529cc6679SAmir Vadai 
18629cc6679SAmir Vadai 	node = rb_first(&fc_stats->counters);
18729cc6679SAmir Vadai 	while (node) {
18829cc6679SAmir Vadai 		counter = rb_entry(node, struct mlx5_fc, node);
18929cc6679SAmir Vadai 
19029cc6679SAmir Vadai 		node = rb_next(node);
19129cc6679SAmir Vadai 
19243a335e0SAmir Vadai 		if (counter->deleted) {
19329cc6679SAmir Vadai 			rb_erase(&counter->node, &fc_stats->counters);
19443a335e0SAmir Vadai 
19543a335e0SAmir Vadai 			mlx5_cmd_fc_free(dev, counter->id);
19643a335e0SAmir Vadai 
19743a335e0SAmir Vadai 			kfree(counter);
19843a335e0SAmir Vadai 			continue;
19943a335e0SAmir Vadai 		}
20043a335e0SAmir Vadai 
201a351a1b0SAmir Vadai 		last = counter;
20243a335e0SAmir Vadai 	}
20343a335e0SAmir Vadai 
204a351a1b0SAmir Vadai 	if (time_before(now, fc_stats->next_query) || !last)
205a351a1b0SAmir Vadai 		return;
20643a335e0SAmir Vadai 
207a351a1b0SAmir Vadai 	node = rb_first(&fc_stats->counters);
208a351a1b0SAmir Vadai 	while (node) {
209a351a1b0SAmir Vadai 		counter = rb_entry(node, struct mlx5_fc, node);
210a351a1b0SAmir Vadai 
211a351a1b0SAmir Vadai 		node = mlx5_fc_stats_query(dev, counter, last->id);
21243a335e0SAmir Vadai 	}
21343a335e0SAmir Vadai 
214f6dfb4c3SHadar Hen Zion 	fc_stats->next_query = now + fc_stats->sampling_interval;
21543a335e0SAmir Vadai }
21643a335e0SAmir Vadai 
21743a335e0SAmir Vadai struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging)
21843a335e0SAmir Vadai {
21943a335e0SAmir Vadai 	struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
22043a335e0SAmir Vadai 	struct mlx5_fc *counter;
22143a335e0SAmir Vadai 	int err;
22243a335e0SAmir Vadai 
22343a335e0SAmir Vadai 	counter = kzalloc(sizeof(*counter), GFP_KERNEL);
22443a335e0SAmir Vadai 	if (!counter)
22543a335e0SAmir Vadai 		return ERR_PTR(-ENOMEM);
22643a335e0SAmir Vadai 
22743a335e0SAmir Vadai 	err = mlx5_cmd_fc_alloc(dev, &counter->id);
22843a335e0SAmir Vadai 	if (err)
22943a335e0SAmir Vadai 		goto err_out;
23043a335e0SAmir Vadai 
23143a335e0SAmir Vadai 	if (aging) {
232e83d6955SPaul Blakey 		counter->cache.lastuse = jiffies;
23343a335e0SAmir Vadai 		counter->aging = true;
23443a335e0SAmir Vadai 
23583033688SVlad Buslov 		llist_add(&counter->addlist, &fc_stats->addlist);
23643a335e0SAmir Vadai 
23743a335e0SAmir Vadai 		mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
23843a335e0SAmir Vadai 	}
23943a335e0SAmir Vadai 
24043a335e0SAmir Vadai 	return counter;
24143a335e0SAmir Vadai 
24243a335e0SAmir Vadai err_out:
24343a335e0SAmir Vadai 	kfree(counter);
24443a335e0SAmir Vadai 
24543a335e0SAmir Vadai 	return ERR_PTR(err);
24643a335e0SAmir Vadai }
2475f9bf63aSRaed Salem EXPORT_SYMBOL(mlx5_fc_create);
24843a335e0SAmir Vadai 
24943a335e0SAmir Vadai void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
25043a335e0SAmir Vadai {
25143a335e0SAmir Vadai 	struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
25243a335e0SAmir Vadai 
25343a335e0SAmir Vadai 	if (!counter)
25443a335e0SAmir Vadai 		return;
25543a335e0SAmir Vadai 
25643a335e0SAmir Vadai 	if (counter->aging) {
25743a335e0SAmir Vadai 		counter->deleted = true;
25843a335e0SAmir Vadai 		mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
25943a335e0SAmir Vadai 		return;
26043a335e0SAmir Vadai 	}
26143a335e0SAmir Vadai 
26243a335e0SAmir Vadai 	mlx5_cmd_fc_free(dev, counter->id);
26343a335e0SAmir Vadai 	kfree(counter);
26443a335e0SAmir Vadai }
2655f9bf63aSRaed Salem EXPORT_SYMBOL(mlx5_fc_destroy);
26643a335e0SAmir Vadai 
26743a335e0SAmir Vadai int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
26843a335e0SAmir Vadai {
26943a335e0SAmir Vadai 	struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
27043a335e0SAmir Vadai 
27129cc6679SAmir Vadai 	fc_stats->counters = RB_ROOT;
27283033688SVlad Buslov 	init_llist_head(&fc_stats->addlist);
27343a335e0SAmir Vadai 
27443a335e0SAmir Vadai 	fc_stats->wq = create_singlethread_workqueue("mlx5_fc");
27543a335e0SAmir Vadai 	if (!fc_stats->wq)
27643a335e0SAmir Vadai 		return -ENOMEM;
27743a335e0SAmir Vadai 
278f6dfb4c3SHadar Hen Zion 	fc_stats->sampling_interval = MLX5_FC_STATS_PERIOD;
27943a335e0SAmir Vadai 	INIT_DELAYED_WORK(&fc_stats->work, mlx5_fc_stats_work);
28043a335e0SAmir Vadai 
28143a335e0SAmir Vadai 	return 0;
28243a335e0SAmir Vadai }
28343a335e0SAmir Vadai 
28443a335e0SAmir Vadai void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
28543a335e0SAmir Vadai {
28643a335e0SAmir Vadai 	struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
28783033688SVlad Buslov 	struct llist_node *tmplist;
28843a335e0SAmir Vadai 	struct mlx5_fc *counter;
28943a335e0SAmir Vadai 	struct mlx5_fc *tmp;
29029cc6679SAmir Vadai 	struct rb_node *node;
29143a335e0SAmir Vadai 
29243a335e0SAmir Vadai 	cancel_delayed_work_sync(&dev->priv.fc_stats.work);
29343a335e0SAmir Vadai 	destroy_workqueue(dev->priv.fc_stats.wq);
29443a335e0SAmir Vadai 	dev->priv.fc_stats.wq = NULL;
29543a335e0SAmir Vadai 
29683033688SVlad Buslov 	tmplist = llist_del_all(&fc_stats->addlist);
29783033688SVlad Buslov 	llist_for_each_entry_safe(counter, tmp, tmplist, addlist)
29883033688SVlad Buslov 		mlx5_free_fc(dev, counter);
29929cc6679SAmir Vadai 
30029cc6679SAmir Vadai 	node = rb_first(&fc_stats->counters);
30129cc6679SAmir Vadai 	while (node) {
30229cc6679SAmir Vadai 		counter = rb_entry(node, struct mlx5_fc, node);
30329cc6679SAmir Vadai 
30429cc6679SAmir Vadai 		node = rb_next(node);
30529cc6679SAmir Vadai 
30629cc6679SAmir Vadai 		rb_erase(&counter->node, &fc_stats->counters);
30729cc6679SAmir Vadai 
30883033688SVlad Buslov 		mlx5_free_fc(dev, counter);
30929cc6679SAmir Vadai 	}
31043a335e0SAmir Vadai }
31143a335e0SAmir Vadai 
312930821e3SOr Gerlitz int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter,
313b8a0dbe3SEugenia Emantayev 		  u64 *packets, u64 *bytes)
314b8a0dbe3SEugenia Emantayev {
315930821e3SOr Gerlitz 	return mlx5_cmd_fc_query(dev, counter->id, packets, bytes);
316b8a0dbe3SEugenia Emantayev }
3175f9bf63aSRaed Salem EXPORT_SYMBOL(mlx5_fc_query);
318b8a0dbe3SEugenia Emantayev 
31943a335e0SAmir Vadai void mlx5_fc_query_cached(struct mlx5_fc *counter,
32043a335e0SAmir Vadai 			  u64 *bytes, u64 *packets, u64 *lastuse)
32143a335e0SAmir Vadai {
32243a335e0SAmir Vadai 	struct mlx5_fc_cache c;
32343a335e0SAmir Vadai 
32443a335e0SAmir Vadai 	c = counter->cache;
32543a335e0SAmir Vadai 
32643a335e0SAmir Vadai 	*bytes = c.bytes - counter->lastbytes;
32743a335e0SAmir Vadai 	*packets = c.packets - counter->lastpackets;
32843a335e0SAmir Vadai 	*lastuse = c.lastuse;
32943a335e0SAmir Vadai 
33043a335e0SAmir Vadai 	counter->lastbytes = c.bytes;
33143a335e0SAmir Vadai 	counter->lastpackets = c.packets;
33243a335e0SAmir Vadai }
333f6dfb4c3SHadar Hen Zion 
334f6dfb4c3SHadar Hen Zion void mlx5_fc_queue_stats_work(struct mlx5_core_dev *dev,
335f6dfb4c3SHadar Hen Zion 			      struct delayed_work *dwork,
336f6dfb4c3SHadar Hen Zion 			      unsigned long delay)
337f6dfb4c3SHadar Hen Zion {
338f6dfb4c3SHadar Hen Zion 	struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
339f6dfb4c3SHadar Hen Zion 
340f6dfb4c3SHadar Hen Zion 	queue_delayed_work(fc_stats->wq, dwork, delay);
341f6dfb4c3SHadar Hen Zion }
342f6dfb4c3SHadar Hen Zion 
343f6dfb4c3SHadar Hen Zion void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev,
344f6dfb4c3SHadar Hen Zion 				      unsigned long interval)
345f6dfb4c3SHadar Hen Zion {
346f6dfb4c3SHadar Hen Zion 	struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
347f6dfb4c3SHadar Hen Zion 
348f6dfb4c3SHadar Hen Zion 	fc_stats->sampling_interval = min_t(unsigned long, interval,
349f6dfb4c3SHadar Hen Zion 					    fc_stats->sampling_interval);
350f6dfb4c3SHadar Hen Zion }
351