1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/kernel.h> 5 #include <linux/bitops.h> 6 #include <linux/spinlock.h> 7 8 #include "spectrum_cnt.h" 9 10 struct mlxsw_sp_counter_sub_pool { 11 u64 size; 12 unsigned int base_index; 13 enum mlxsw_res_id entry_size_res_id; 14 const char *resource_name; /* devlink resource name */ 15 u64 resource_id; /* devlink resource id */ 16 unsigned int entry_size; 17 unsigned int bank_count; 18 atomic_t active_entries_count; 19 }; 20 21 struct mlxsw_sp_counter_pool { 22 u64 pool_size; 23 unsigned long *usage; /* Usage bitmap */ 24 spinlock_t counter_pool_lock; /* Protects counter pool allocations */ 25 atomic_t active_entries_count; 26 unsigned int sub_pools_count; 27 struct mlxsw_sp_counter_sub_pool sub_pools[]; 28 }; 29 30 static const struct mlxsw_sp_counter_sub_pool mlxsw_sp_counter_sub_pools[] = { 31 [MLXSW_SP_COUNTER_SUB_POOL_FLOW] = { 32 .entry_size_res_id = MLXSW_RES_ID_COUNTER_SIZE_PACKETS_BYTES, 33 .resource_name = MLXSW_SP_RESOURCE_NAME_COUNTERS_FLOW, 34 .resource_id = MLXSW_SP_RESOURCE_COUNTERS_FLOW, 35 .bank_count = 6, 36 }, 37 [MLXSW_SP_COUNTER_SUB_POOL_RIF] = { 38 .entry_size_res_id = MLXSW_RES_ID_COUNTER_SIZE_ROUTER_BASIC, 39 .resource_name = MLXSW_SP_RESOURCE_NAME_COUNTERS_RIF, 40 .resource_id = MLXSW_SP_RESOURCE_COUNTERS_RIF, 41 .bank_count = 2, 42 } 43 }; 44 45 static u64 mlxsw_sp_counter_sub_pool_occ_get(void *priv) 46 { 47 const struct mlxsw_sp_counter_sub_pool *sub_pool = priv; 48 49 return atomic_read(&sub_pool->active_entries_count); 50 } 51 52 static int mlxsw_sp_counter_sub_pools_init(struct mlxsw_sp *mlxsw_sp) 53 { 54 struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool; 55 struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); 56 struct mlxsw_sp_counter_sub_pool *sub_pool; 57 unsigned int base_index = 0; 58 enum mlxsw_res_id res_id; 59 int err; 60 int i; 61 62 for (i = 0; i < pool->sub_pools_count; i++) { 63 sub_pool = &pool->sub_pools[i]; 64 res_id = sub_pool->entry_size_res_id; 65 66 if (!mlxsw_core_res_valid(mlxsw_sp->core, res_id)) 67 return -EIO; 68 sub_pool->entry_size = mlxsw_core_res_get(mlxsw_sp->core, 69 res_id); 70 err = devlink_resource_size_get(devlink, 71 sub_pool->resource_id, 72 &sub_pool->size); 73 if (err) 74 goto err_resource_size_get; 75 76 devlink_resource_occ_get_register(devlink, 77 sub_pool->resource_id, 78 mlxsw_sp_counter_sub_pool_occ_get, 79 sub_pool); 80 81 sub_pool->base_index = base_index; 82 base_index += sub_pool->size; 83 atomic_set(&sub_pool->active_entries_count, 0); 84 } 85 return 0; 86 87 err_resource_size_get: 88 for (i--; i >= 0; i--) { 89 sub_pool = &pool->sub_pools[i]; 90 91 devlink_resource_occ_get_unregister(devlink, 92 sub_pool->resource_id); 93 } 94 return err; 95 } 96 97 static void mlxsw_sp_counter_sub_pools_fini(struct mlxsw_sp *mlxsw_sp) 98 { 99 struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool; 100 struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); 101 struct mlxsw_sp_counter_sub_pool *sub_pool; 102 int i; 103 104 for (i = 0; i < pool->sub_pools_count; i++) { 105 sub_pool = &pool->sub_pools[i]; 106 107 WARN_ON(atomic_read(&sub_pool->active_entries_count)); 108 devlink_resource_occ_get_unregister(devlink, 109 sub_pool->resource_id); 110 } 111 } 112 113 static u64 mlxsw_sp_counter_pool_occ_get(void *priv) 114 { 115 const struct mlxsw_sp_counter_pool *pool = priv; 116 117 return atomic_read(&pool->active_entries_count); 118 } 119 120 int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp) 121 { 122 unsigned int sub_pools_count = ARRAY_SIZE(mlxsw_sp_counter_sub_pools); 123 struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); 124 struct mlxsw_sp_counter_pool *pool; 125 int err; 126 127 pool = kzalloc(struct_size(pool, sub_pools, sub_pools_count), 128 GFP_KERNEL); 129 if (!pool) 130 return -ENOMEM; 131 mlxsw_sp->counter_pool = pool; 132 pool->sub_pools_count = sub_pools_count; 133 memcpy(pool->sub_pools, mlxsw_sp_counter_sub_pools, 134 flex_array_size(pool, sub_pools, pool->sub_pools_count)); 135 spin_lock_init(&pool->counter_pool_lock); 136 atomic_set(&pool->active_entries_count, 0); 137 138 err = devlink_resource_size_get(devlink, MLXSW_SP_RESOURCE_COUNTERS, 139 &pool->pool_size); 140 if (err) 141 goto err_pool_resource_size_get; 142 devlink_resource_occ_get_register(devlink, MLXSW_SP_RESOURCE_COUNTERS, 143 mlxsw_sp_counter_pool_occ_get, pool); 144 145 pool->usage = bitmap_zalloc(pool->pool_size, GFP_KERNEL); 146 if (!pool->usage) { 147 err = -ENOMEM; 148 goto err_usage_alloc; 149 } 150 151 err = mlxsw_sp_counter_sub_pools_init(mlxsw_sp); 152 if (err) 153 goto err_sub_pools_init; 154 155 return 0; 156 157 err_sub_pools_init: 158 bitmap_free(pool->usage); 159 err_usage_alloc: 160 devlink_resource_occ_get_unregister(devlink, 161 MLXSW_SP_RESOURCE_COUNTERS); 162 err_pool_resource_size_get: 163 kfree(pool); 164 return err; 165 } 166 167 void mlxsw_sp_counter_pool_fini(struct mlxsw_sp *mlxsw_sp) 168 { 169 struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool; 170 struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); 171 172 mlxsw_sp_counter_sub_pools_fini(mlxsw_sp); 173 WARN_ON(find_first_bit(pool->usage, pool->pool_size) != 174 pool->pool_size); 175 WARN_ON(atomic_read(&pool->active_entries_count)); 176 bitmap_free(pool->usage); 177 devlink_resource_occ_get_unregister(devlink, 178 MLXSW_SP_RESOURCE_COUNTERS); 179 kfree(pool); 180 } 181 182 int mlxsw_sp_counter_alloc(struct mlxsw_sp *mlxsw_sp, 183 enum mlxsw_sp_counter_sub_pool_id sub_pool_id, 184 unsigned int *p_counter_index) 185 { 186 struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool; 187 struct mlxsw_sp_counter_sub_pool *sub_pool; 188 unsigned int entry_index; 189 unsigned int stop_index; 190 int i, err; 191 192 sub_pool = &pool->sub_pools[sub_pool_id]; 193 stop_index = sub_pool->base_index + sub_pool->size; 194 entry_index = sub_pool->base_index; 195 196 spin_lock(&pool->counter_pool_lock); 197 entry_index = find_next_zero_bit(pool->usage, stop_index, entry_index); 198 if (entry_index == stop_index) { 199 err = -ENOBUFS; 200 goto err_alloc; 201 } 202 /* The sub-pools can contain non-integer number of entries 203 * so we must check for overflow 204 */ 205 if (entry_index + sub_pool->entry_size > stop_index) { 206 err = -ENOBUFS; 207 goto err_alloc; 208 } 209 for (i = 0; i < sub_pool->entry_size; i++) 210 __set_bit(entry_index + i, pool->usage); 211 spin_unlock(&pool->counter_pool_lock); 212 213 *p_counter_index = entry_index; 214 atomic_add(sub_pool->entry_size, &sub_pool->active_entries_count); 215 atomic_add(sub_pool->entry_size, &pool->active_entries_count); 216 return 0; 217 218 err_alloc: 219 spin_unlock(&pool->counter_pool_lock); 220 return err; 221 } 222 223 void mlxsw_sp_counter_free(struct mlxsw_sp *mlxsw_sp, 224 enum mlxsw_sp_counter_sub_pool_id sub_pool_id, 225 unsigned int counter_index) 226 { 227 struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool; 228 struct mlxsw_sp_counter_sub_pool *sub_pool; 229 int i; 230 231 if (WARN_ON(counter_index >= pool->pool_size)) 232 return; 233 sub_pool = &pool->sub_pools[sub_pool_id]; 234 spin_lock(&pool->counter_pool_lock); 235 for (i = 0; i < sub_pool->entry_size; i++) 236 __clear_bit(counter_index + i, pool->usage); 237 spin_unlock(&pool->counter_pool_lock); 238 atomic_sub(sub_pool->entry_size, &sub_pool->active_entries_count); 239 atomic_sub(sub_pool->entry_size, &pool->active_entries_count); 240 } 241 242 int mlxsw_sp_counter_resources_register(struct mlxsw_core *mlxsw_core) 243 { 244 static struct devlink_resource_size_params size_params; 245 struct devlink *devlink = priv_to_devlink(mlxsw_core); 246 const struct mlxsw_sp_counter_sub_pool *sub_pool; 247 unsigned int total_bank_config; 248 u64 sub_pool_size; 249 u64 base_index; 250 u64 pool_size; 251 u64 bank_size; 252 int err; 253 int i; 254 255 if (!MLXSW_CORE_RES_VALID(mlxsw_core, COUNTER_POOL_SIZE) || 256 !MLXSW_CORE_RES_VALID(mlxsw_core, COUNTER_BANK_SIZE)) 257 return -EIO; 258 259 pool_size = MLXSW_CORE_RES_GET(mlxsw_core, COUNTER_POOL_SIZE); 260 bank_size = MLXSW_CORE_RES_GET(mlxsw_core, COUNTER_BANK_SIZE); 261 262 devlink_resource_size_params_init(&size_params, pool_size, 263 pool_size, bank_size, 264 DEVLINK_RESOURCE_UNIT_ENTRY); 265 err = devlink_resource_register(devlink, 266 MLXSW_SP_RESOURCE_NAME_COUNTERS, 267 pool_size, 268 MLXSW_SP_RESOURCE_COUNTERS, 269 DEVLINK_RESOURCE_ID_PARENT_TOP, 270 &size_params); 271 if (err) 272 return err; 273 274 /* Allocation is based on bank count which should be 275 * specified for each sub pool statically. 276 */ 277 total_bank_config = 0; 278 base_index = 0; 279 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_counter_sub_pools); i++) { 280 sub_pool = &mlxsw_sp_counter_sub_pools[i]; 281 sub_pool_size = sub_pool->bank_count * bank_size; 282 /* The last bank can't be fully used */ 283 if (base_index + sub_pool_size > pool_size) 284 sub_pool_size = pool_size - base_index; 285 base_index += sub_pool_size; 286 287 devlink_resource_size_params_init(&size_params, sub_pool_size, 288 sub_pool_size, bank_size, 289 DEVLINK_RESOURCE_UNIT_ENTRY); 290 err = devlink_resource_register(devlink, 291 sub_pool->resource_name, 292 sub_pool_size, 293 sub_pool->resource_id, 294 MLXSW_SP_RESOURCE_COUNTERS, 295 &size_params); 296 if (err) 297 return err; 298 total_bank_config += sub_pool->bank_count; 299 } 300 301 /* Check config is valid, no bank over subscription */ 302 if (WARN_ON(total_bank_config > div64_u64(pool_size, bank_size) + 1)) 303 return -EINVAL; 304 305 return 0; 306 } 307