1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ 3 4 #include <linux/refcount.h> 5 #include <linux/idr.h> 6 7 #include "spectrum.h" 8 #include "reg.h" 9 10 struct mlxsw_sp_pgt { 11 struct idr pgt_idr; 12 u16 end_index; /* Exclusive. */ 13 struct mutex lock; /* Protects PGT. */ 14 bool smpe_index_valid; 15 }; 16 17 struct mlxsw_sp_pgt_entry { 18 struct list_head ports_list; 19 u16 index; 20 u16 smpe_index; 21 }; 22 23 struct mlxsw_sp_pgt_entry_port { 24 struct list_head list; /* Member of 'ports_list'. */ 25 u16 local_port; 26 }; 27 28 int mlxsw_sp_pgt_mid_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_mid) 29 { 30 int index, err = 0; 31 32 mutex_lock(&mlxsw_sp->pgt->lock); 33 index = idr_alloc(&mlxsw_sp->pgt->pgt_idr, NULL, 0, 34 mlxsw_sp->pgt->end_index, GFP_KERNEL); 35 36 if (index < 0) { 37 err = index; 38 goto err_idr_alloc; 39 } 40 41 *p_mid = index; 42 mutex_unlock(&mlxsw_sp->pgt->lock); 43 return 0; 44 45 err_idr_alloc: 46 mutex_unlock(&mlxsw_sp->pgt->lock); 47 return err; 48 } 49 50 void mlxsw_sp_pgt_mid_free(struct mlxsw_sp *mlxsw_sp, u16 mid_base) 51 { 52 mutex_lock(&mlxsw_sp->pgt->lock); 53 WARN_ON(idr_remove(&mlxsw_sp->pgt->pgt_idr, mid_base)); 54 mutex_unlock(&mlxsw_sp->pgt->lock); 55 } 56 57 int 58 mlxsw_sp_pgt_mid_alloc_range(struct mlxsw_sp *mlxsw_sp, u16 mid_base, u16 count) 59 { 60 unsigned int idr_cursor; 61 int i, err; 62 63 mutex_lock(&mlxsw_sp->pgt->lock); 64 65 /* This function is supposed to be called several times as part of 66 * driver init, in specific order. Verify that the mid_index is the 67 * first free index in the idr, to be able to free the indexes in case 68 * of error. 69 */ 70 idr_cursor = idr_get_cursor(&mlxsw_sp->pgt->pgt_idr); 71 if (WARN_ON(idr_cursor != mid_base)) { 72 err = -EINVAL; 73 goto err_idr_cursor; 74 } 75 76 for (i = 0; i < count; i++) { 77 err = idr_alloc_cyclic(&mlxsw_sp->pgt->pgt_idr, NULL, 78 mid_base, mid_base + count, GFP_KERNEL); 79 if (err < 0) 80 goto err_idr_alloc_cyclic; 81 } 82 83 mutex_unlock(&mlxsw_sp->pgt->lock); 84 return 0; 85 86 err_idr_alloc_cyclic: 87 for (i--; i >= 0; i--) 88 idr_remove(&mlxsw_sp->pgt->pgt_idr, mid_base + i); 89 err_idr_cursor: 90 mutex_unlock(&mlxsw_sp->pgt->lock); 91 return err; 92 } 93 94 void 95 mlxsw_sp_pgt_mid_free_range(struct mlxsw_sp *mlxsw_sp, u16 mid_base, u16 count) 96 { 97 struct idr *pgt_idr = &mlxsw_sp->pgt->pgt_idr; 98 int i; 99 100 mutex_lock(&mlxsw_sp->pgt->lock); 101 102 for (i = 0; i < count; i++) 103 WARN_ON_ONCE(idr_remove(pgt_idr, mid_base + i)); 104 105 mutex_unlock(&mlxsw_sp->pgt->lock); 106 } 107 108 static struct mlxsw_sp_pgt_entry_port * 109 mlxsw_sp_pgt_entry_port_lookup(struct mlxsw_sp_pgt_entry *pgt_entry, 110 u16 local_port) 111 { 112 struct mlxsw_sp_pgt_entry_port *pgt_entry_port; 113 114 list_for_each_entry(pgt_entry_port, &pgt_entry->ports_list, list) { 115 if (pgt_entry_port->local_port == local_port) 116 return pgt_entry_port; 117 } 118 119 return NULL; 120 } 121 122 static struct mlxsw_sp_pgt_entry * 123 mlxsw_sp_pgt_entry_create(struct mlxsw_sp_pgt *pgt, u16 mid, u16 smpe) 124 { 125 struct mlxsw_sp_pgt_entry *pgt_entry; 126 void *ret; 127 int err; 128 129 pgt_entry = kzalloc(sizeof(*pgt_entry), GFP_KERNEL); 130 if (!pgt_entry) 131 return ERR_PTR(-ENOMEM); 132 133 ret = idr_replace(&pgt->pgt_idr, pgt_entry, mid); 134 if (IS_ERR(ret)) { 135 err = PTR_ERR(ret); 136 goto err_idr_replace; 137 } 138 139 INIT_LIST_HEAD(&pgt_entry->ports_list); 140 pgt_entry->index = mid; 141 pgt_entry->smpe_index = smpe; 142 return pgt_entry; 143 144 err_idr_replace: 145 kfree(pgt_entry); 146 return ERR_PTR(err); 147 } 148 149 static void mlxsw_sp_pgt_entry_destroy(struct mlxsw_sp_pgt *pgt, 150 struct mlxsw_sp_pgt_entry *pgt_entry) 151 { 152 WARN_ON(!list_empty(&pgt_entry->ports_list)); 153 154 pgt_entry = idr_replace(&pgt->pgt_idr, NULL, pgt_entry->index); 155 if (WARN_ON(IS_ERR(pgt_entry))) 156 return; 157 158 kfree(pgt_entry); 159 } 160 161 static struct mlxsw_sp_pgt_entry * 162 mlxsw_sp_pgt_entry_get(struct mlxsw_sp_pgt *pgt, u16 mid, u16 smpe) 163 { 164 struct mlxsw_sp_pgt_entry *pgt_entry; 165 166 pgt_entry = idr_find(&pgt->pgt_idr, mid); 167 if (pgt_entry) 168 return pgt_entry; 169 170 return mlxsw_sp_pgt_entry_create(pgt, mid, smpe); 171 } 172 173 static void mlxsw_sp_pgt_entry_put(struct mlxsw_sp_pgt *pgt, u16 mid) 174 { 175 struct mlxsw_sp_pgt_entry *pgt_entry; 176 177 pgt_entry = idr_find(&pgt->pgt_idr, mid); 178 if (WARN_ON(!pgt_entry)) 179 return; 180 181 if (list_empty(&pgt_entry->ports_list)) 182 mlxsw_sp_pgt_entry_destroy(pgt, pgt_entry); 183 } 184 185 static void mlxsw_sp_pgt_smid2_port_set(char *smid2_pl, u16 local_port, 186 bool member) 187 { 188 mlxsw_reg_smid2_port_set(smid2_pl, local_port, member); 189 mlxsw_reg_smid2_port_mask_set(smid2_pl, local_port, 1); 190 } 191 192 static int 193 mlxsw_sp_pgt_entry_port_write(struct mlxsw_sp *mlxsw_sp, 194 const struct mlxsw_sp_pgt_entry *pgt_entry, 195 u16 local_port, bool member) 196 { 197 char *smid2_pl; 198 int err; 199 200 smid2_pl = kmalloc(MLXSW_REG_SMID2_LEN, GFP_KERNEL); 201 if (!smid2_pl) 202 return -ENOMEM; 203 204 mlxsw_reg_smid2_pack(smid2_pl, pgt_entry->index, 0, 0, 205 mlxsw_sp->pgt->smpe_index_valid, 206 pgt_entry->smpe_index); 207 208 mlxsw_sp_pgt_smid2_port_set(smid2_pl, local_port, member); 209 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid2), smid2_pl); 210 211 kfree(smid2_pl); 212 213 return err; 214 } 215 216 static struct mlxsw_sp_pgt_entry_port * 217 mlxsw_sp_pgt_entry_port_create(struct mlxsw_sp *mlxsw_sp, 218 struct mlxsw_sp_pgt_entry *pgt_entry, 219 u16 local_port) 220 { 221 struct mlxsw_sp_pgt_entry_port *pgt_entry_port; 222 int err; 223 224 pgt_entry_port = kzalloc(sizeof(*pgt_entry_port), GFP_KERNEL); 225 if (!pgt_entry_port) 226 return ERR_PTR(-ENOMEM); 227 228 err = mlxsw_sp_pgt_entry_port_write(mlxsw_sp, pgt_entry, local_port, 229 true); 230 if (err) 231 goto err_pgt_entry_port_write; 232 233 pgt_entry_port->local_port = local_port; 234 list_add(&pgt_entry_port->list, &pgt_entry->ports_list); 235 236 return pgt_entry_port; 237 238 err_pgt_entry_port_write: 239 kfree(pgt_entry_port); 240 return ERR_PTR(err); 241 } 242 243 static void 244 mlxsw_sp_pgt_entry_port_destroy(struct mlxsw_sp *mlxsw_sp, 245 struct mlxsw_sp_pgt_entry *pgt_entry, 246 struct mlxsw_sp_pgt_entry_port *pgt_entry_port) 247 248 { 249 list_del(&pgt_entry_port->list); 250 mlxsw_sp_pgt_entry_port_write(mlxsw_sp, pgt_entry, 251 pgt_entry_port->local_port, false); 252 kfree(pgt_entry_port); 253 } 254 255 static int mlxsw_sp_pgt_entry_port_add(struct mlxsw_sp *mlxsw_sp, u16 mid, 256 u16 smpe, u16 local_port) 257 { 258 struct mlxsw_sp_pgt_entry_port *pgt_entry_port; 259 struct mlxsw_sp_pgt_entry *pgt_entry; 260 int err; 261 262 mutex_lock(&mlxsw_sp->pgt->lock); 263 264 pgt_entry = mlxsw_sp_pgt_entry_get(mlxsw_sp->pgt, mid, smpe); 265 if (IS_ERR(pgt_entry)) { 266 err = PTR_ERR(pgt_entry); 267 goto err_pgt_entry_get; 268 } 269 270 pgt_entry_port = mlxsw_sp_pgt_entry_port_create(mlxsw_sp, pgt_entry, 271 local_port); 272 if (IS_ERR(pgt_entry_port)) { 273 err = PTR_ERR(pgt_entry_port); 274 goto err_pgt_entry_port_get; 275 } 276 277 mutex_unlock(&mlxsw_sp->pgt->lock); 278 return 0; 279 280 err_pgt_entry_port_get: 281 mlxsw_sp_pgt_entry_put(mlxsw_sp->pgt, mid); 282 err_pgt_entry_get: 283 mutex_unlock(&mlxsw_sp->pgt->lock); 284 return err; 285 } 286 287 static void mlxsw_sp_pgt_entry_port_del(struct mlxsw_sp *mlxsw_sp, 288 u16 mid, u16 smpe, u16 local_port) 289 { 290 struct mlxsw_sp_pgt_entry_port *pgt_entry_port; 291 struct mlxsw_sp_pgt_entry *pgt_entry; 292 293 mutex_lock(&mlxsw_sp->pgt->lock); 294 295 pgt_entry = idr_find(&mlxsw_sp->pgt->pgt_idr, mid); 296 if (!pgt_entry) 297 goto out; 298 299 pgt_entry_port = mlxsw_sp_pgt_entry_port_lookup(pgt_entry, local_port); 300 if (!pgt_entry_port) 301 goto out; 302 303 mlxsw_sp_pgt_entry_port_destroy(mlxsw_sp, pgt_entry, pgt_entry_port); 304 mlxsw_sp_pgt_entry_put(mlxsw_sp->pgt, mid); 305 306 out: 307 mutex_unlock(&mlxsw_sp->pgt->lock); 308 } 309 310 int mlxsw_sp_pgt_entry_port_set(struct mlxsw_sp *mlxsw_sp, u16 mid, 311 u16 smpe, u16 local_port, bool member) 312 { 313 if (member) 314 return mlxsw_sp_pgt_entry_port_add(mlxsw_sp, mid, smpe, 315 local_port); 316 317 mlxsw_sp_pgt_entry_port_del(mlxsw_sp, mid, smpe, local_port); 318 return 0; 319 } 320 321 int mlxsw_sp_pgt_init(struct mlxsw_sp *mlxsw_sp) 322 { 323 struct mlxsw_sp_pgt *pgt; 324 325 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, PGT_SIZE)) 326 return -EIO; 327 328 pgt = kzalloc(sizeof(*mlxsw_sp->pgt), GFP_KERNEL); 329 if (!pgt) 330 return -ENOMEM; 331 332 idr_init(&pgt->pgt_idr); 333 pgt->end_index = MLXSW_CORE_RES_GET(mlxsw_sp->core, PGT_SIZE); 334 mutex_init(&pgt->lock); 335 pgt->smpe_index_valid = mlxsw_sp->pgt_smpe_index_valid; 336 mlxsw_sp->pgt = pgt; 337 return 0; 338 } 339 340 void mlxsw_sp_pgt_fini(struct mlxsw_sp *mlxsw_sp) 341 { 342 mutex_destroy(&mlxsw_sp->pgt->lock); 343 WARN_ON(!idr_is_empty(&mlxsw_sp->pgt->pgt_idr)); 344 idr_destroy(&mlxsw_sp->pgt->pgt_idr); 345 kfree(mlxsw_sp->pgt); 346 } 347