1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/bitops.h>
6 #include <linux/spinlock.h>
7 
8 #include "spectrum_cnt.h"
9 
10 struct mlxsw_sp_counter_sub_pool {
11 	u64 size;
12 	unsigned int base_index;
13 	enum mlxsw_res_id entry_size_res_id;
14 	const char *resource_name; /* devlink resource name */
15 	u64 resource_id; /* devlink resource id */
16 	unsigned int entry_size;
17 	unsigned int bank_count;
18 	atomic_t active_entries_count;
19 };
20 
21 struct mlxsw_sp_counter_pool {
22 	u64 pool_size;
23 	unsigned long *usage; /* Usage bitmap */
24 	spinlock_t counter_pool_lock; /* Protects counter pool allocations */
25 	atomic_t active_entries_count;
26 	unsigned int sub_pools_count;
27 	struct mlxsw_sp_counter_sub_pool sub_pools[];
28 };
29 
30 static const struct mlxsw_sp_counter_sub_pool mlxsw_sp_counter_sub_pools[] = {
31 	[MLXSW_SP_COUNTER_SUB_POOL_FLOW] = {
32 		.entry_size_res_id = MLXSW_RES_ID_COUNTER_SIZE_PACKETS_BYTES,
33 		.resource_name = MLXSW_SP_RESOURCE_NAME_COUNTERS_FLOW,
34 		.resource_id = MLXSW_SP_RESOURCE_COUNTERS_FLOW,
35 		.bank_count = 6,
36 	},
37 	[MLXSW_SP_COUNTER_SUB_POOL_RIF] = {
38 		.entry_size_res_id = MLXSW_RES_ID_COUNTER_SIZE_ROUTER_BASIC,
39 		.resource_name = MLXSW_SP_RESOURCE_NAME_COUNTERS_RIF,
40 		.resource_id = MLXSW_SP_RESOURCE_COUNTERS_RIF,
41 		.bank_count = 2,
42 	}
43 };
44 
45 static u64 mlxsw_sp_counter_sub_pool_occ_get(void *priv)
46 {
47 	const struct mlxsw_sp_counter_sub_pool *sub_pool = priv;
48 
49 	return atomic_read(&sub_pool->active_entries_count);
50 }
51 
52 static int mlxsw_sp_counter_sub_pools_init(struct mlxsw_sp *mlxsw_sp)
53 {
54 	struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool;
55 	struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
56 	struct mlxsw_sp_counter_sub_pool *sub_pool;
57 	unsigned int base_index = 0;
58 	enum mlxsw_res_id res_id;
59 	int err;
60 	int i;
61 
62 	for (i = 0; i < pool->sub_pools_count; i++) {
63 		sub_pool = &pool->sub_pools[i];
64 		res_id = sub_pool->entry_size_res_id;
65 
66 		if (!mlxsw_core_res_valid(mlxsw_sp->core, res_id))
67 			return -EIO;
68 		sub_pool->entry_size = mlxsw_core_res_get(mlxsw_sp->core,
69 							  res_id);
70 		err = devlink_resource_size_get(devlink,
71 						sub_pool->resource_id,
72 						&sub_pool->size);
73 		if (err)
74 			goto err_resource_size_get;
75 
76 		devlink_resource_occ_get_register(devlink,
77 						  sub_pool->resource_id,
78 						  mlxsw_sp_counter_sub_pool_occ_get,
79 						  sub_pool);
80 
81 		sub_pool->base_index = base_index;
82 		base_index += sub_pool->size;
83 		atomic_set(&sub_pool->active_entries_count, 0);
84 	}
85 	return 0;
86 
87 err_resource_size_get:
88 	for (i--; i >= 0; i--) {
89 		sub_pool = &pool->sub_pools[i];
90 
91 		devlink_resource_occ_get_unregister(devlink,
92 						    sub_pool->resource_id);
93 	}
94 	return err;
95 }
96 
97 static void mlxsw_sp_counter_sub_pools_fini(struct mlxsw_sp *mlxsw_sp)
98 {
99 	struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool;
100 	struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
101 	struct mlxsw_sp_counter_sub_pool *sub_pool;
102 	int i;
103 
104 	for (i = 0; i < pool->sub_pools_count; i++) {
105 		sub_pool = &pool->sub_pools[i];
106 
107 		WARN_ON(atomic_read(&sub_pool->active_entries_count));
108 		devlink_resource_occ_get_unregister(devlink,
109 						    sub_pool->resource_id);
110 	}
111 }
112 
113 static u64 mlxsw_sp_counter_pool_occ_get(void *priv)
114 {
115 	const struct mlxsw_sp_counter_pool *pool = priv;
116 
117 	return atomic_read(&pool->active_entries_count);
118 }
119 
120 int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp)
121 {
122 	unsigned int sub_pools_count = ARRAY_SIZE(mlxsw_sp_counter_sub_pools);
123 	struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
124 	struct mlxsw_sp_counter_pool *pool;
125 	unsigned int map_size;
126 	int err;
127 
128 	pool = kzalloc(struct_size(pool, sub_pools, sub_pools_count),
129 		       GFP_KERNEL);
130 	if (!pool)
131 		return -ENOMEM;
132 	mlxsw_sp->counter_pool = pool;
133 	pool->sub_pools_count = sub_pools_count;
134 	memcpy(pool->sub_pools, mlxsw_sp_counter_sub_pools,
135 	       flex_array_size(pool, sub_pools, pool->sub_pools_count));
136 	spin_lock_init(&pool->counter_pool_lock);
137 	atomic_set(&pool->active_entries_count, 0);
138 
139 	err = devlink_resource_size_get(devlink, MLXSW_SP_RESOURCE_COUNTERS,
140 					&pool->pool_size);
141 	if (err)
142 		goto err_pool_resource_size_get;
143 	devlink_resource_occ_get_register(devlink, MLXSW_SP_RESOURCE_COUNTERS,
144 					  mlxsw_sp_counter_pool_occ_get, pool);
145 
146 	map_size = BITS_TO_LONGS(pool->pool_size) * sizeof(unsigned long);
147 
148 	pool->usage = kzalloc(map_size, GFP_KERNEL);
149 	if (!pool->usage) {
150 		err = -ENOMEM;
151 		goto err_usage_alloc;
152 	}
153 
154 	err = mlxsw_sp_counter_sub_pools_init(mlxsw_sp);
155 	if (err)
156 		goto err_sub_pools_init;
157 
158 	return 0;
159 
160 err_sub_pools_init:
161 	kfree(pool->usage);
162 err_usage_alloc:
163 	devlink_resource_occ_get_unregister(devlink,
164 					    MLXSW_SP_RESOURCE_COUNTERS);
165 err_pool_resource_size_get:
166 	kfree(pool);
167 	return err;
168 }
169 
170 void mlxsw_sp_counter_pool_fini(struct mlxsw_sp *mlxsw_sp)
171 {
172 	struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool;
173 	struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
174 
175 	mlxsw_sp_counter_sub_pools_fini(mlxsw_sp);
176 	WARN_ON(find_first_bit(pool->usage, pool->pool_size) !=
177 			       pool->pool_size);
178 	WARN_ON(atomic_read(&pool->active_entries_count));
179 	kfree(pool->usage);
180 	devlink_resource_occ_get_unregister(devlink,
181 					    MLXSW_SP_RESOURCE_COUNTERS);
182 	kfree(pool);
183 }
184 
185 int mlxsw_sp_counter_alloc(struct mlxsw_sp *mlxsw_sp,
186 			   enum mlxsw_sp_counter_sub_pool_id sub_pool_id,
187 			   unsigned int *p_counter_index)
188 {
189 	struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool;
190 	struct mlxsw_sp_counter_sub_pool *sub_pool;
191 	unsigned int entry_index;
192 	unsigned int stop_index;
193 	int i, err;
194 
195 	sub_pool = &pool->sub_pools[sub_pool_id];
196 	stop_index = sub_pool->base_index + sub_pool->size;
197 	entry_index = sub_pool->base_index;
198 
199 	spin_lock(&pool->counter_pool_lock);
200 	entry_index = find_next_zero_bit(pool->usage, stop_index, entry_index);
201 	if (entry_index == stop_index) {
202 		err = -ENOBUFS;
203 		goto err_alloc;
204 	}
205 	/* The sub-pools can contain non-integer number of entries
206 	 * so we must check for overflow
207 	 */
208 	if (entry_index + sub_pool->entry_size > stop_index) {
209 		err = -ENOBUFS;
210 		goto err_alloc;
211 	}
212 	for (i = 0; i < sub_pool->entry_size; i++)
213 		__set_bit(entry_index + i, pool->usage);
214 	spin_unlock(&pool->counter_pool_lock);
215 
216 	*p_counter_index = entry_index;
217 	atomic_add(sub_pool->entry_size, &sub_pool->active_entries_count);
218 	atomic_add(sub_pool->entry_size, &pool->active_entries_count);
219 	return 0;
220 
221 err_alloc:
222 	spin_unlock(&pool->counter_pool_lock);
223 	return err;
224 }
225 
226 void mlxsw_sp_counter_free(struct mlxsw_sp *mlxsw_sp,
227 			   enum mlxsw_sp_counter_sub_pool_id sub_pool_id,
228 			   unsigned int counter_index)
229 {
230 	struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool;
231 	struct mlxsw_sp_counter_sub_pool *sub_pool;
232 	int i;
233 
234 	if (WARN_ON(counter_index >= pool->pool_size))
235 		return;
236 	sub_pool = &pool->sub_pools[sub_pool_id];
237 	spin_lock(&pool->counter_pool_lock);
238 	for (i = 0; i < sub_pool->entry_size; i++)
239 		__clear_bit(counter_index + i, pool->usage);
240 	spin_unlock(&pool->counter_pool_lock);
241 	atomic_sub(sub_pool->entry_size, &sub_pool->active_entries_count);
242 	atomic_sub(sub_pool->entry_size, &pool->active_entries_count);
243 }
244 
245 int mlxsw_sp_counter_resources_register(struct mlxsw_core *mlxsw_core)
246 {
247 	static struct devlink_resource_size_params size_params;
248 	struct devlink *devlink = priv_to_devlink(mlxsw_core);
249 	const struct mlxsw_sp_counter_sub_pool *sub_pool;
250 	unsigned int total_bank_config;
251 	u64 sub_pool_size;
252 	u64 base_index;
253 	u64 pool_size;
254 	u64 bank_size;
255 	int err;
256 	int i;
257 
258 	if (!MLXSW_CORE_RES_VALID(mlxsw_core, COUNTER_POOL_SIZE) ||
259 	    !MLXSW_CORE_RES_VALID(mlxsw_core, COUNTER_BANK_SIZE))
260 		return -EIO;
261 
262 	pool_size = MLXSW_CORE_RES_GET(mlxsw_core, COUNTER_POOL_SIZE);
263 	bank_size = MLXSW_CORE_RES_GET(mlxsw_core, COUNTER_BANK_SIZE);
264 
265 	devlink_resource_size_params_init(&size_params, pool_size,
266 					  pool_size, bank_size,
267 					  DEVLINK_RESOURCE_UNIT_ENTRY);
268 	err = devlink_resource_register(devlink,
269 					MLXSW_SP_RESOURCE_NAME_COUNTERS,
270 					pool_size,
271 					MLXSW_SP_RESOURCE_COUNTERS,
272 					DEVLINK_RESOURCE_ID_PARENT_TOP,
273 					&size_params);
274 	if (err)
275 		return err;
276 
277 	/* Allocation is based on bank count which should be
278 	 * specified for each sub pool statically.
279 	 */
280 	total_bank_config = 0;
281 	base_index = 0;
282 	for (i = 0; i < ARRAY_SIZE(mlxsw_sp_counter_sub_pools); i++) {
283 		sub_pool = &mlxsw_sp_counter_sub_pools[i];
284 		sub_pool_size = sub_pool->bank_count * bank_size;
285 		/* The last bank can't be fully used */
286 		if (base_index + sub_pool_size > pool_size)
287 			sub_pool_size = pool_size - base_index;
288 		base_index += sub_pool_size;
289 
290 		devlink_resource_size_params_init(&size_params, sub_pool_size,
291 						  sub_pool_size, bank_size,
292 						  DEVLINK_RESOURCE_UNIT_ENTRY);
293 		err = devlink_resource_register(devlink,
294 						sub_pool->resource_name,
295 						sub_pool_size,
296 						sub_pool->resource_id,
297 						MLXSW_SP_RESOURCE_COUNTERS,
298 						&size_params);
299 		if (err)
300 			return err;
301 		total_bank_config += sub_pool->bank_count;
302 	}
303 
304 	/* Check config is valid, no bank over subscription */
305 	if (WARN_ON(total_bank_config > div64_u64(pool_size, bank_size) + 1))
306 		return -EINVAL;
307 
308 	return 0;
309 }
310