1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/bitops.h>
6 #include <linux/spinlock.h>
7 
8 #include "spectrum_cnt.h"
9 
10 struct mlxsw_sp_counter_sub_pool {
11 	u64 size;
12 	unsigned int base_index;
13 	enum mlxsw_res_id entry_size_res_id;
14 	const char *resource_name; /* devlink resource name */
15 	u64 resource_id; /* devlink resource id */
16 	unsigned int entry_size;
17 	unsigned int bank_count;
18 	atomic_t active_entries_count;
19 };
20 
21 struct mlxsw_sp_counter_pool {
22 	u64 pool_size;
23 	unsigned long *usage; /* Usage bitmap */
24 	spinlock_t counter_pool_lock; /* Protects counter pool allocations */
25 	atomic_t active_entries_count;
26 	unsigned int sub_pools_count;
27 	struct mlxsw_sp_counter_sub_pool sub_pools[];
28 };
29 
30 static const struct mlxsw_sp_counter_sub_pool mlxsw_sp_counter_sub_pools[] = {
31 	[MLXSW_SP_COUNTER_SUB_POOL_FLOW] = {
32 		.entry_size_res_id = MLXSW_RES_ID_COUNTER_SIZE_PACKETS_BYTES,
33 		.resource_name = MLXSW_SP_RESOURCE_NAME_COUNTERS_FLOW,
34 		.resource_id = MLXSW_SP_RESOURCE_COUNTERS_FLOW,
35 		.bank_count = 6,
36 	},
37 	[MLXSW_SP_COUNTER_SUB_POOL_RIF] = {
38 		.entry_size_res_id = MLXSW_RES_ID_COUNTER_SIZE_ROUTER_BASIC,
39 		.resource_name = MLXSW_SP_RESOURCE_NAME_COUNTERS_RIF,
40 		.resource_id = MLXSW_SP_RESOURCE_COUNTERS_RIF,
41 		.bank_count = 2,
42 	}
43 };
44 
45 static u64 mlxsw_sp_counter_sub_pool_occ_get(void *priv)
46 {
47 	const struct mlxsw_sp_counter_sub_pool *sub_pool = priv;
48 
49 	return atomic_read(&sub_pool->active_entries_count);
50 }
51 
52 static int mlxsw_sp_counter_sub_pools_init(struct mlxsw_sp *mlxsw_sp)
53 {
54 	struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool;
55 	struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
56 	struct mlxsw_sp_counter_sub_pool *sub_pool;
57 	unsigned int base_index = 0;
58 	enum mlxsw_res_id res_id;
59 	int err;
60 	int i;
61 
62 	for (i = 0; i < pool->sub_pools_count; i++) {
63 		sub_pool = &pool->sub_pools[i];
64 		res_id = sub_pool->entry_size_res_id;
65 
66 		if (!mlxsw_core_res_valid(mlxsw_sp->core, res_id))
67 			return -EIO;
68 		sub_pool->entry_size = mlxsw_core_res_get(mlxsw_sp->core,
69 							  res_id);
70 		err = devlink_resource_size_get(devlink,
71 						sub_pool->resource_id,
72 						&sub_pool->size);
73 		if (err)
74 			goto err_resource_size_get;
75 
76 		devlink_resource_occ_get_register(devlink,
77 						  sub_pool->resource_id,
78 						  mlxsw_sp_counter_sub_pool_occ_get,
79 						  sub_pool);
80 
81 		sub_pool->base_index = base_index;
82 		base_index += sub_pool->size;
83 		atomic_set(&sub_pool->active_entries_count, 0);
84 	}
85 	return 0;
86 
87 err_resource_size_get:
88 	for (i--; i >= 0; i--) {
89 		sub_pool = &pool->sub_pools[i];
90 
91 		devlink_resource_occ_get_unregister(devlink,
92 						    sub_pool->resource_id);
93 	}
94 	return err;
95 }
96 
97 static void mlxsw_sp_counter_sub_pools_fini(struct mlxsw_sp *mlxsw_sp)
98 {
99 	struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool;
100 	struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
101 	struct mlxsw_sp_counter_sub_pool *sub_pool;
102 	int i;
103 
104 	for (i = 0; i < pool->sub_pools_count; i++) {
105 		sub_pool = &pool->sub_pools[i];
106 
107 		WARN_ON(atomic_read(&sub_pool->active_entries_count));
108 		devlink_resource_occ_get_unregister(devlink,
109 						    sub_pool->resource_id);
110 	}
111 }
112 
113 static u64 mlxsw_sp_counter_pool_occ_get(void *priv)
114 {
115 	const struct mlxsw_sp_counter_pool *pool = priv;
116 
117 	return atomic_read(&pool->active_entries_count);
118 }
119 
120 int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp)
121 {
122 	unsigned int sub_pools_count = ARRAY_SIZE(mlxsw_sp_counter_sub_pools);
123 	struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
124 	struct mlxsw_sp_counter_sub_pool *sub_pool;
125 	struct mlxsw_sp_counter_pool *pool;
126 	unsigned int map_size;
127 	int err;
128 
129 	pool = kzalloc(struct_size(pool, sub_pools, sub_pools_count),
130 		       GFP_KERNEL);
131 	if (!pool)
132 		return -ENOMEM;
133 	mlxsw_sp->counter_pool = pool;
134 	memcpy(pool->sub_pools, mlxsw_sp_counter_sub_pools,
135 	       sub_pools_count * sizeof(*sub_pool));
136 	pool->sub_pools_count = sub_pools_count;
137 	spin_lock_init(&pool->counter_pool_lock);
138 	atomic_set(&pool->active_entries_count, 0);
139 
140 	err = devlink_resource_size_get(devlink, MLXSW_SP_RESOURCE_COUNTERS,
141 					&pool->pool_size);
142 	if (err)
143 		goto err_pool_resource_size_get;
144 	devlink_resource_occ_get_register(devlink, MLXSW_SP_RESOURCE_COUNTERS,
145 					  mlxsw_sp_counter_pool_occ_get, pool);
146 
147 	map_size = BITS_TO_LONGS(pool->pool_size) * sizeof(unsigned long);
148 
149 	pool->usage = kzalloc(map_size, GFP_KERNEL);
150 	if (!pool->usage) {
151 		err = -ENOMEM;
152 		goto err_usage_alloc;
153 	}
154 
155 	err = mlxsw_sp_counter_sub_pools_init(mlxsw_sp);
156 	if (err)
157 		goto err_sub_pools_init;
158 
159 	return 0;
160 
161 err_sub_pools_init:
162 	kfree(pool->usage);
163 err_usage_alloc:
164 	devlink_resource_occ_get_unregister(devlink,
165 					    MLXSW_SP_RESOURCE_COUNTERS);
166 err_pool_resource_size_get:
167 	kfree(pool);
168 	return err;
169 }
170 
171 void mlxsw_sp_counter_pool_fini(struct mlxsw_sp *mlxsw_sp)
172 {
173 	struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool;
174 	struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
175 
176 	mlxsw_sp_counter_sub_pools_fini(mlxsw_sp);
177 	WARN_ON(find_first_bit(pool->usage, pool->pool_size) !=
178 			       pool->pool_size);
179 	WARN_ON(atomic_read(&pool->active_entries_count));
180 	kfree(pool->usage);
181 	devlink_resource_occ_get_unregister(devlink,
182 					    MLXSW_SP_RESOURCE_COUNTERS);
183 	kfree(pool);
184 }
185 
186 int mlxsw_sp_counter_alloc(struct mlxsw_sp *mlxsw_sp,
187 			   enum mlxsw_sp_counter_sub_pool_id sub_pool_id,
188 			   unsigned int *p_counter_index)
189 {
190 	struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool;
191 	struct mlxsw_sp_counter_sub_pool *sub_pool;
192 	unsigned int entry_index;
193 	unsigned int stop_index;
194 	int i, err;
195 
196 	sub_pool = &pool->sub_pools[sub_pool_id];
197 	stop_index = sub_pool->base_index + sub_pool->size;
198 	entry_index = sub_pool->base_index;
199 
200 	spin_lock(&pool->counter_pool_lock);
201 	entry_index = find_next_zero_bit(pool->usage, stop_index, entry_index);
202 	if (entry_index == stop_index) {
203 		err = -ENOBUFS;
204 		goto err_alloc;
205 	}
206 	/* The sub-pools can contain non-integer number of entries
207 	 * so we must check for overflow
208 	 */
209 	if (entry_index + sub_pool->entry_size > stop_index) {
210 		err = -ENOBUFS;
211 		goto err_alloc;
212 	}
213 	for (i = 0; i < sub_pool->entry_size; i++)
214 		__set_bit(entry_index + i, pool->usage);
215 	spin_unlock(&pool->counter_pool_lock);
216 
217 	*p_counter_index = entry_index;
218 	atomic_add(sub_pool->entry_size, &sub_pool->active_entries_count);
219 	atomic_add(sub_pool->entry_size, &pool->active_entries_count);
220 	return 0;
221 
222 err_alloc:
223 	spin_unlock(&pool->counter_pool_lock);
224 	return err;
225 }
226 
227 void mlxsw_sp_counter_free(struct mlxsw_sp *mlxsw_sp,
228 			   enum mlxsw_sp_counter_sub_pool_id sub_pool_id,
229 			   unsigned int counter_index)
230 {
231 	struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool;
232 	struct mlxsw_sp_counter_sub_pool *sub_pool;
233 	int i;
234 
235 	if (WARN_ON(counter_index >= pool->pool_size))
236 		return;
237 	sub_pool = &pool->sub_pools[sub_pool_id];
238 	spin_lock(&pool->counter_pool_lock);
239 	for (i = 0; i < sub_pool->entry_size; i++)
240 		__clear_bit(counter_index + i, pool->usage);
241 	spin_unlock(&pool->counter_pool_lock);
242 	atomic_sub(sub_pool->entry_size, &sub_pool->active_entries_count);
243 	atomic_sub(sub_pool->entry_size, &pool->active_entries_count);
244 }
245 
246 int mlxsw_sp_counter_resources_register(struct mlxsw_core *mlxsw_core)
247 {
248 	static struct devlink_resource_size_params size_params;
249 	struct devlink *devlink = priv_to_devlink(mlxsw_core);
250 	const struct mlxsw_sp_counter_sub_pool *sub_pool;
251 	unsigned int total_bank_config;
252 	u64 sub_pool_size;
253 	u64 base_index;
254 	u64 pool_size;
255 	u64 bank_size;
256 	int err;
257 	int i;
258 
259 	if (!MLXSW_CORE_RES_VALID(mlxsw_core, COUNTER_POOL_SIZE) ||
260 	    !MLXSW_CORE_RES_VALID(mlxsw_core, COUNTER_BANK_SIZE))
261 		return -EIO;
262 
263 	pool_size = MLXSW_CORE_RES_GET(mlxsw_core, COUNTER_POOL_SIZE);
264 	bank_size = MLXSW_CORE_RES_GET(mlxsw_core, COUNTER_BANK_SIZE);
265 
266 	devlink_resource_size_params_init(&size_params, pool_size,
267 					  pool_size, bank_size,
268 					  DEVLINK_RESOURCE_UNIT_ENTRY);
269 	err = devlink_resource_register(devlink,
270 					MLXSW_SP_RESOURCE_NAME_COUNTERS,
271 					pool_size,
272 					MLXSW_SP_RESOURCE_COUNTERS,
273 					DEVLINK_RESOURCE_ID_PARENT_TOP,
274 					&size_params);
275 	if (err)
276 		return err;
277 
278 	/* Allocation is based on bank count which should be
279 	 * specified for each sub pool statically.
280 	 */
281 	total_bank_config = 0;
282 	base_index = 0;
283 	for (i = 0; i < ARRAY_SIZE(mlxsw_sp_counter_sub_pools); i++) {
284 		sub_pool = &mlxsw_sp_counter_sub_pools[i];
285 		sub_pool_size = sub_pool->bank_count * bank_size;
286 		/* The last bank can't be fully used */
287 		if (base_index + sub_pool_size > pool_size)
288 			sub_pool_size = pool_size - base_index;
289 		base_index += sub_pool_size;
290 
291 		devlink_resource_size_params_init(&size_params, sub_pool_size,
292 						  sub_pool_size, bank_size,
293 						  DEVLINK_RESOURCE_UNIT_ENTRY);
294 		err = devlink_resource_register(devlink,
295 						sub_pool->resource_name,
296 						sub_pool_size,
297 						sub_pool->resource_id,
298 						MLXSW_SP_RESOURCE_COUNTERS,
299 						&size_params);
300 		if (err)
301 			return err;
302 		total_bank_config += sub_pool->bank_count;
303 	}
304 
305 	/* Check config is valid, no bank over subscription */
306 	if (WARN_ON(total_bank_config > div64_u64(pool_size, bank_size) + 1))
307 		return -EINVAL;
308 
309 	return 0;
310 }
311