143a335e0SAmir Vadai /*
243a335e0SAmir Vadai  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
343a335e0SAmir Vadai  *
443a335e0SAmir Vadai  * This software is available to you under a choice of one of two
543a335e0SAmir Vadai  * licenses.  You may choose to be licensed under the terms of the GNU
643a335e0SAmir Vadai  * General Public License (GPL) Version 2, available from the file
743a335e0SAmir Vadai  * COPYING in the main directory of this source tree, or the
843a335e0SAmir Vadai  * OpenIB.org BSD license below:
943a335e0SAmir Vadai  *
1043a335e0SAmir Vadai  *     Redistribution and use in source and binary forms, with or
1143a335e0SAmir Vadai  *     without modification, are permitted provided that the following
1243a335e0SAmir Vadai  *     conditions are met:
1343a335e0SAmir Vadai  *
1443a335e0SAmir Vadai  *      - Redistributions of source code must retain the above
1543a335e0SAmir Vadai  *        copyright notice, this list of conditions and the following
1643a335e0SAmir Vadai  *        disclaimer.
1743a335e0SAmir Vadai  *
1843a335e0SAmir Vadai  *      - Redistributions in binary form must reproduce the above
1943a335e0SAmir Vadai  *        copyright notice, this list of conditions and the following
2043a335e0SAmir Vadai  *        disclaimer in the documentation and/or other materials
2143a335e0SAmir Vadai  *        provided with the distribution.
2243a335e0SAmir Vadai  *
2343a335e0SAmir Vadai  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
2443a335e0SAmir Vadai  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
2543a335e0SAmir Vadai  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
2643a335e0SAmir Vadai  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
2743a335e0SAmir Vadai  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
2843a335e0SAmir Vadai  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
2943a335e0SAmir Vadai  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
3043a335e0SAmir Vadai  * SOFTWARE.
3143a335e0SAmir Vadai  */
3243a335e0SAmir Vadai 
3343a335e0SAmir Vadai #include <linux/mlx5/driver.h>
3443a335e0SAmir Vadai #include <linux/mlx5/fs.h>
3529cc6679SAmir Vadai #include <linux/rbtree.h>
3643a335e0SAmir Vadai #include "mlx5_core.h"
3743a335e0SAmir Vadai #include "fs_core.h"
3843a335e0SAmir Vadai #include "fs_cmd.h"
3943a335e0SAmir Vadai 
4043a335e0SAmir Vadai #define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000)
4143a335e0SAmir Vadai 
4243a335e0SAmir Vadai /* locking scheme:
4343a335e0SAmir Vadai  *
4443a335e0SAmir Vadai  * It is the responsibility of the user to prevent concurrent calls or bad
4543a335e0SAmir Vadai  * ordering to mlx5_fc_create(), mlx5_fc_destroy() and accessing a reference
4643a335e0SAmir Vadai  * to struct mlx5_fc.
4743a335e0SAmir Vadai  * e.g en_tc.c is protected by RTNL lock of its caller, and will never call a
4843a335e0SAmir Vadai  * dump (access to struct mlx5_fc) after a counter is destroyed.
4943a335e0SAmir Vadai  *
5043a335e0SAmir Vadai  * access to counter list:
5143a335e0SAmir Vadai  * - create (user context)
5243a335e0SAmir Vadai  *   - mlx5_fc_create() only adds to an addlist to be used by
5343a335e0SAmir Vadai  *     mlx5_fc_stats_query_work(). addlist is protected by a spinlock.
5443a335e0SAmir Vadai  *   - spawn thread to do the actual destroy
5543a335e0SAmir Vadai  *
5643a335e0SAmir Vadai  * - destroy (user context)
5743a335e0SAmir Vadai  *   - mark a counter as deleted
5843a335e0SAmir Vadai  *   - spawn thread to do the actual del
5943a335e0SAmir Vadai  *
6043a335e0SAmir Vadai  * - dump (user context)
6143a335e0SAmir Vadai  *   user should not call dump after destroy
6243a335e0SAmir Vadai  *
6343a335e0SAmir Vadai  * - query (single thread workqueue context)
6443a335e0SAmir Vadai  *   destroy/dump - no conflict (see destroy)
6543a335e0SAmir Vadai  *   query/dump - packets and bytes might be inconsistent (since update is not
6643a335e0SAmir Vadai  *                atomic)
6743a335e0SAmir Vadai  *   query/create - no conflict (see create)
6843a335e0SAmir Vadai  *   since every create/destroy spawn the work, only after necessary time has
6943a335e0SAmir Vadai  *   elapsed, the thread will actually query the hardware.
7043a335e0SAmir Vadai  */
7143a335e0SAmir Vadai 
7229cc6679SAmir Vadai static void mlx5_fc_stats_insert(struct rb_root *root, struct mlx5_fc *counter)
7329cc6679SAmir Vadai {
7429cc6679SAmir Vadai 	struct rb_node **new = &root->rb_node;
7529cc6679SAmir Vadai 	struct rb_node *parent = NULL;
7629cc6679SAmir Vadai 
7729cc6679SAmir Vadai 	while (*new) {
7829cc6679SAmir Vadai 		struct mlx5_fc *this = container_of(*new, struct mlx5_fc, node);
7929cc6679SAmir Vadai 		int result = counter->id - this->id;
8029cc6679SAmir Vadai 
8129cc6679SAmir Vadai 		parent = *new;
8229cc6679SAmir Vadai 		if (result < 0)
8329cc6679SAmir Vadai 			new = &((*new)->rb_left);
8429cc6679SAmir Vadai 		else
8529cc6679SAmir Vadai 			new = &((*new)->rb_right);
8629cc6679SAmir Vadai 	}
8729cc6679SAmir Vadai 
8829cc6679SAmir Vadai 	/* Add new node and rebalance tree. */
8929cc6679SAmir Vadai 	rb_link_node(&counter->node, parent, new);
9029cc6679SAmir Vadai 	rb_insert_color(&counter->node, root);
9129cc6679SAmir Vadai }
9229cc6679SAmir Vadai 
9343a335e0SAmir Vadai static void mlx5_fc_stats_work(struct work_struct *work)
9443a335e0SAmir Vadai {
9543a335e0SAmir Vadai 	struct mlx5_core_dev *dev = container_of(work, struct mlx5_core_dev,
9643a335e0SAmir Vadai 						 priv.fc_stats.work.work);
9743a335e0SAmir Vadai 	struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
9843a335e0SAmir Vadai 	unsigned long now = jiffies;
9943a335e0SAmir Vadai 	struct mlx5_fc *counter;
10029cc6679SAmir Vadai 	struct rb_node *node;
10129cc6679SAmir Vadai 	LIST_HEAD(tmplist);
10243a335e0SAmir Vadai 	int err = 0;
10343a335e0SAmir Vadai 
10443a335e0SAmir Vadai 	spin_lock(&fc_stats->addlist_lock);
10543a335e0SAmir Vadai 
10629cc6679SAmir Vadai 	list_splice_tail_init(&fc_stats->addlist, &tmplist);
10743a335e0SAmir Vadai 
10829cc6679SAmir Vadai 	if (!list_empty(&tmplist) || !RB_EMPTY_ROOT(&fc_stats->counters))
10943a335e0SAmir Vadai 		queue_delayed_work(fc_stats->wq, &fc_stats->work, MLX5_FC_STATS_PERIOD);
11043a335e0SAmir Vadai 
11143a335e0SAmir Vadai 	spin_unlock(&fc_stats->addlist_lock);
11243a335e0SAmir Vadai 
11329cc6679SAmir Vadai 	list_for_each_entry(counter, &tmplist, list)
11429cc6679SAmir Vadai 		mlx5_fc_stats_insert(&fc_stats->counters, counter);
11529cc6679SAmir Vadai 
11629cc6679SAmir Vadai 	node = rb_first(&fc_stats->counters);
11729cc6679SAmir Vadai 	while (node) {
11829cc6679SAmir Vadai 		struct mlx5_fc_cache *c;
11943a335e0SAmir Vadai 		u64 packets;
12043a335e0SAmir Vadai 		u64 bytes;
12143a335e0SAmir Vadai 
12229cc6679SAmir Vadai 		counter = rb_entry(node, struct mlx5_fc, node);
12329cc6679SAmir Vadai 		c = &counter->cache;
12429cc6679SAmir Vadai 
12529cc6679SAmir Vadai 		node = rb_next(node);
12629cc6679SAmir Vadai 
12743a335e0SAmir Vadai 		if (counter->deleted) {
12829cc6679SAmir Vadai 			rb_erase(&counter->node, &fc_stats->counters);
12943a335e0SAmir Vadai 
13043a335e0SAmir Vadai 			mlx5_cmd_fc_free(dev, counter->id);
13143a335e0SAmir Vadai 
13243a335e0SAmir Vadai 			kfree(counter);
13343a335e0SAmir Vadai 			continue;
13443a335e0SAmir Vadai 		}
13543a335e0SAmir Vadai 
13643a335e0SAmir Vadai 		if (time_before(now, fc_stats->next_query))
13743a335e0SAmir Vadai 			continue;
13843a335e0SAmir Vadai 
13943a335e0SAmir Vadai 		err = mlx5_cmd_fc_query(dev, counter->id, &packets, &bytes);
14043a335e0SAmir Vadai 		if (err) {
14143a335e0SAmir Vadai 			pr_err("Error querying stats for counter id %d\n",
14243a335e0SAmir Vadai 			       counter->id);
14343a335e0SAmir Vadai 			continue;
14443a335e0SAmir Vadai 		}
14543a335e0SAmir Vadai 
14643a335e0SAmir Vadai 		if (packets == c->packets)
14743a335e0SAmir Vadai 			continue;
14843a335e0SAmir Vadai 
14943a335e0SAmir Vadai 		c->lastuse = jiffies;
15043a335e0SAmir Vadai 		c->packets = packets;
15143a335e0SAmir Vadai 		c->bytes   = bytes;
15243a335e0SAmir Vadai 	}
15343a335e0SAmir Vadai 
15443a335e0SAmir Vadai 	if (time_after_eq(now, fc_stats->next_query))
15543a335e0SAmir Vadai 		fc_stats->next_query = now + MLX5_FC_STATS_PERIOD;
15643a335e0SAmir Vadai }
15743a335e0SAmir Vadai 
15843a335e0SAmir Vadai struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging)
15943a335e0SAmir Vadai {
16043a335e0SAmir Vadai 	struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
16143a335e0SAmir Vadai 	struct mlx5_fc *counter;
16243a335e0SAmir Vadai 	int err;
16343a335e0SAmir Vadai 
16443a335e0SAmir Vadai 	counter = kzalloc(sizeof(*counter), GFP_KERNEL);
16543a335e0SAmir Vadai 	if (!counter)
16643a335e0SAmir Vadai 		return ERR_PTR(-ENOMEM);
16743a335e0SAmir Vadai 
16843a335e0SAmir Vadai 	err = mlx5_cmd_fc_alloc(dev, &counter->id);
16943a335e0SAmir Vadai 	if (err)
17043a335e0SAmir Vadai 		goto err_out;
17143a335e0SAmir Vadai 
17243a335e0SAmir Vadai 	if (aging) {
17343a335e0SAmir Vadai 		counter->aging = true;
17443a335e0SAmir Vadai 
17543a335e0SAmir Vadai 		spin_lock(&fc_stats->addlist_lock);
17643a335e0SAmir Vadai 		list_add(&counter->list, &fc_stats->addlist);
17743a335e0SAmir Vadai 		spin_unlock(&fc_stats->addlist_lock);
17843a335e0SAmir Vadai 
17943a335e0SAmir Vadai 		mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
18043a335e0SAmir Vadai 	}
18143a335e0SAmir Vadai 
18243a335e0SAmir Vadai 	return counter;
18343a335e0SAmir Vadai 
18443a335e0SAmir Vadai err_out:
18543a335e0SAmir Vadai 	kfree(counter);
18643a335e0SAmir Vadai 
18743a335e0SAmir Vadai 	return ERR_PTR(err);
18843a335e0SAmir Vadai }
18943a335e0SAmir Vadai 
19043a335e0SAmir Vadai void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
19143a335e0SAmir Vadai {
19243a335e0SAmir Vadai 	struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
19343a335e0SAmir Vadai 
19443a335e0SAmir Vadai 	if (!counter)
19543a335e0SAmir Vadai 		return;
19643a335e0SAmir Vadai 
19743a335e0SAmir Vadai 	if (counter->aging) {
19843a335e0SAmir Vadai 		counter->deleted = true;
19943a335e0SAmir Vadai 		mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
20043a335e0SAmir Vadai 		return;
20143a335e0SAmir Vadai 	}
20243a335e0SAmir Vadai 
20343a335e0SAmir Vadai 	mlx5_cmd_fc_free(dev, counter->id);
20443a335e0SAmir Vadai 	kfree(counter);
20543a335e0SAmir Vadai }
20643a335e0SAmir Vadai 
20743a335e0SAmir Vadai int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
20843a335e0SAmir Vadai {
20943a335e0SAmir Vadai 	struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
21043a335e0SAmir Vadai 
21129cc6679SAmir Vadai 	fc_stats->counters = RB_ROOT;
21243a335e0SAmir Vadai 	INIT_LIST_HEAD(&fc_stats->addlist);
21343a335e0SAmir Vadai 	spin_lock_init(&fc_stats->addlist_lock);
21443a335e0SAmir Vadai 
21543a335e0SAmir Vadai 	fc_stats->wq = create_singlethread_workqueue("mlx5_fc");
21643a335e0SAmir Vadai 	if (!fc_stats->wq)
21743a335e0SAmir Vadai 		return -ENOMEM;
21843a335e0SAmir Vadai 
21943a335e0SAmir Vadai 	INIT_DELAYED_WORK(&fc_stats->work, mlx5_fc_stats_work);
22043a335e0SAmir Vadai 
22143a335e0SAmir Vadai 	return 0;
22243a335e0SAmir Vadai }
22343a335e0SAmir Vadai 
22443a335e0SAmir Vadai void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
22543a335e0SAmir Vadai {
22643a335e0SAmir Vadai 	struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
22743a335e0SAmir Vadai 	struct mlx5_fc *counter;
22843a335e0SAmir Vadai 	struct mlx5_fc *tmp;
22929cc6679SAmir Vadai 	struct rb_node *node;
23043a335e0SAmir Vadai 
23143a335e0SAmir Vadai 	cancel_delayed_work_sync(&dev->priv.fc_stats.work);
23243a335e0SAmir Vadai 	destroy_workqueue(dev->priv.fc_stats.wq);
23343a335e0SAmir Vadai 	dev->priv.fc_stats.wq = NULL;
23443a335e0SAmir Vadai 
23529cc6679SAmir Vadai 	list_for_each_entry_safe(counter, tmp, &fc_stats->addlist, list) {
23643a335e0SAmir Vadai 		list_del(&counter->list);
23743a335e0SAmir Vadai 
23843a335e0SAmir Vadai 		mlx5_cmd_fc_free(dev, counter->id);
23943a335e0SAmir Vadai 
24043a335e0SAmir Vadai 		kfree(counter);
24143a335e0SAmir Vadai 	}
24229cc6679SAmir Vadai 
24329cc6679SAmir Vadai 	node = rb_first(&fc_stats->counters);
24429cc6679SAmir Vadai 	while (node) {
24529cc6679SAmir Vadai 		counter = rb_entry(node, struct mlx5_fc, node);
24629cc6679SAmir Vadai 
24729cc6679SAmir Vadai 		node = rb_next(node);
24829cc6679SAmir Vadai 
24929cc6679SAmir Vadai 		rb_erase(&counter->node, &fc_stats->counters);
25029cc6679SAmir Vadai 
25129cc6679SAmir Vadai 		mlx5_cmd_fc_free(dev, counter->id);
25229cc6679SAmir Vadai 
25329cc6679SAmir Vadai 		kfree(counter);
25429cc6679SAmir Vadai 	}
25543a335e0SAmir Vadai }
25643a335e0SAmir Vadai 
25743a335e0SAmir Vadai void mlx5_fc_query_cached(struct mlx5_fc *counter,
25843a335e0SAmir Vadai 			  u64 *bytes, u64 *packets, u64 *lastuse)
25943a335e0SAmir Vadai {
26043a335e0SAmir Vadai 	struct mlx5_fc_cache c;
26143a335e0SAmir Vadai 
26243a335e0SAmir Vadai 	c = counter->cache;
26343a335e0SAmir Vadai 
26443a335e0SAmir Vadai 	*bytes = c.bytes - counter->lastbytes;
26543a335e0SAmir Vadai 	*packets = c.packets - counter->lastpackets;
26643a335e0SAmir Vadai 	*lastuse = c.lastuse;
26743a335e0SAmir Vadai 
26843a335e0SAmir Vadai 	counter->lastbytes = c.bytes;
26943a335e0SAmir Vadai 	counter->lastpackets = c.packets;
27043a335e0SAmir Vadai }
271