1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/bitops.h>
6 #include <linux/spinlock.h>
7 
8 #include "spectrum_cnt.h"
9 
10 #define MLXSW_SP_COUNTER_POOL_BANK_SIZE 4096
11 
12 struct mlxsw_sp_counter_sub_pool {
13 	unsigned int base_index;
14 	unsigned int size;
15 	unsigned int entry_size;
16 	unsigned int bank_count;
17 };
18 
19 struct mlxsw_sp_counter_pool {
20 	unsigned int pool_size;
21 	unsigned long *usage; /* Usage bitmap */
22 	spinlock_t counter_pool_lock; /* Protects counter pool allocations */
23 	struct mlxsw_sp_counter_sub_pool *sub_pools;
24 };
25 
26 static struct mlxsw_sp_counter_sub_pool mlxsw_sp_counter_sub_pools[] = {
27 	[MLXSW_SP_COUNTER_SUB_POOL_FLOW] = {
28 		.bank_count = 6,
29 	},
30 	[MLXSW_SP_COUNTER_SUB_POOL_RIF] = {
31 		.bank_count = 2,
32 	}
33 };
34 
35 static int mlxsw_sp_counter_pool_validate(struct mlxsw_sp *mlxsw_sp)
36 {
37 	unsigned int total_bank_config = 0;
38 	unsigned int pool_size;
39 	int i;
40 
41 	pool_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, COUNTER_POOL_SIZE);
42 	/* Check config is valid, no bank over subscription */
43 	for (i = 0; i < ARRAY_SIZE(mlxsw_sp_counter_sub_pools); i++)
44 		total_bank_config += mlxsw_sp_counter_sub_pools[i].bank_count;
45 	if (total_bank_config > pool_size / MLXSW_SP_COUNTER_POOL_BANK_SIZE + 1)
46 		return -EINVAL;
47 	return 0;
48 }
49 
50 static int mlxsw_sp_counter_sub_pools_prepare(struct mlxsw_sp *mlxsw_sp)
51 {
52 	struct mlxsw_sp_counter_sub_pool *sub_pool;
53 
54 	/* Prepare generic flow pool*/
55 	sub_pool = &mlxsw_sp_counter_sub_pools[MLXSW_SP_COUNTER_SUB_POOL_FLOW];
56 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, COUNTER_SIZE_PACKETS_BYTES))
57 		return -EIO;
58 	sub_pool->entry_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
59 						  COUNTER_SIZE_PACKETS_BYTES);
60 	/* Prepare erif pool*/
61 	sub_pool = &mlxsw_sp_counter_sub_pools[MLXSW_SP_COUNTER_SUB_POOL_RIF];
62 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, COUNTER_SIZE_ROUTER_BASIC))
63 		return -EIO;
64 	sub_pool->entry_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
65 						  COUNTER_SIZE_ROUTER_BASIC);
66 	return 0;
67 }
68 
69 int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp)
70 {
71 	struct mlxsw_sp_counter_sub_pool *sub_pool;
72 	struct mlxsw_sp_counter_pool *pool;
73 	unsigned int base_index;
74 	unsigned int map_size;
75 	int i;
76 	int err;
77 
78 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, COUNTER_POOL_SIZE))
79 		return -EIO;
80 
81 	err = mlxsw_sp_counter_pool_validate(mlxsw_sp);
82 	if (err)
83 		return err;
84 
85 	err = mlxsw_sp_counter_sub_pools_prepare(mlxsw_sp);
86 	if (err)
87 		return err;
88 
89 	pool = kzalloc(sizeof(*pool), GFP_KERNEL);
90 	if (!pool)
91 		return -ENOMEM;
92 	spin_lock_init(&pool->counter_pool_lock);
93 
94 	pool->pool_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, COUNTER_POOL_SIZE);
95 	map_size = BITS_TO_LONGS(pool->pool_size) * sizeof(unsigned long);
96 
97 	pool->usage = kzalloc(map_size, GFP_KERNEL);
98 	if (!pool->usage) {
99 		err = -ENOMEM;
100 		goto err_usage_alloc;
101 	}
102 
103 	pool->sub_pools = mlxsw_sp_counter_sub_pools;
104 	/* Allocation is based on bank count which should be
105 	 * specified for each sub pool statically.
106 	 */
107 	base_index = 0;
108 	for (i = 0; i < ARRAY_SIZE(mlxsw_sp_counter_sub_pools); i++) {
109 		sub_pool = &pool->sub_pools[i];
110 		sub_pool->size = sub_pool->bank_count *
111 				 MLXSW_SP_COUNTER_POOL_BANK_SIZE;
112 		sub_pool->base_index = base_index;
113 		base_index += sub_pool->size;
114 		/* The last bank can't be fully used */
115 		if (sub_pool->base_index + sub_pool->size > pool->pool_size)
116 			sub_pool->size = pool->pool_size - sub_pool->base_index;
117 	}
118 
119 	mlxsw_sp->counter_pool = pool;
120 	return 0;
121 
122 err_usage_alloc:
123 	kfree(pool);
124 	return err;
125 }
126 
127 void mlxsw_sp_counter_pool_fini(struct mlxsw_sp *mlxsw_sp)
128 {
129 	struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool;
130 
131 	WARN_ON(find_first_bit(pool->usage, pool->pool_size) !=
132 			       pool->pool_size);
133 	kfree(pool->usage);
134 	kfree(pool);
135 }
136 
137 int mlxsw_sp_counter_alloc(struct mlxsw_sp *mlxsw_sp,
138 			   enum mlxsw_sp_counter_sub_pool_id sub_pool_id,
139 			   unsigned int *p_counter_index)
140 {
141 	struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool;
142 	struct mlxsw_sp_counter_sub_pool *sub_pool;
143 	unsigned int entry_index;
144 	unsigned int stop_index;
145 	int i, err;
146 
147 	sub_pool = &mlxsw_sp_counter_sub_pools[sub_pool_id];
148 	stop_index = sub_pool->base_index + sub_pool->size;
149 	entry_index = sub_pool->base_index;
150 
151 	spin_lock(&pool->counter_pool_lock);
152 	entry_index = find_next_zero_bit(pool->usage, stop_index, entry_index);
153 	if (entry_index == stop_index) {
154 		err = -ENOBUFS;
155 		goto err_alloc;
156 	}
157 	/* The sub-pools can contain non-integer number of entries
158 	 * so we must check for overflow
159 	 */
160 	if (entry_index + sub_pool->entry_size > stop_index) {
161 		err = -ENOBUFS;
162 		goto err_alloc;
163 	}
164 	for (i = 0; i < sub_pool->entry_size; i++)
165 		__set_bit(entry_index + i, pool->usage);
166 	spin_unlock(&pool->counter_pool_lock);
167 
168 	*p_counter_index = entry_index;
169 	return 0;
170 
171 err_alloc:
172 	spin_unlock(&pool->counter_pool_lock);
173 	return err;
174 }
175 
176 void mlxsw_sp_counter_free(struct mlxsw_sp *mlxsw_sp,
177 			   enum mlxsw_sp_counter_sub_pool_id sub_pool_id,
178 			   unsigned int counter_index)
179 {
180 	struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool;
181 	struct mlxsw_sp_counter_sub_pool *sub_pool;
182 	int i;
183 
184 	if (WARN_ON(counter_index >= pool->pool_size))
185 		return;
186 	sub_pool = &mlxsw_sp_counter_sub_pools[sub_pool_id];
187 	spin_lock(&pool->counter_pool_lock);
188 	for (i = 0; i < sub_pool->entry_size; i++)
189 		__clear_bit(counter_index + i, pool->usage);
190 	spin_unlock(&pool->counter_pool_lock);
191 }
192