1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
3 
4 #include <linux/refcount.h>
5 #include <linux/idr.h>
6 
7 #include "spectrum.h"
8 #include "reg.h"
9 
10 struct mlxsw_sp_pgt {
11 	struct idr pgt_idr;
12 	u16 end_index; /* Exclusive. */
13 	struct mutex lock; /* Protects PGT. */
14 	bool smpe_index_valid;
15 };
16 
17 struct mlxsw_sp_pgt_entry {
18 	struct list_head ports_list;
19 	u16 index;
20 	u16 smpe_index;
21 };
22 
23 struct mlxsw_sp_pgt_entry_port {
24 	struct list_head list; /* Member of 'ports_list'. */
25 	u16 local_port;
26 };
27 
28 int mlxsw_sp_pgt_mid_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_mid)
29 {
30 	int index, err = 0;
31 
32 	mutex_lock(&mlxsw_sp->pgt->lock);
33 	index = idr_alloc(&mlxsw_sp->pgt->pgt_idr, NULL, 0,
34 			  mlxsw_sp->pgt->end_index, GFP_KERNEL);
35 
36 	if (index < 0) {
37 		err = index;
38 		goto err_idr_alloc;
39 	}
40 
41 	*p_mid = index;
42 	mutex_unlock(&mlxsw_sp->pgt->lock);
43 	return 0;
44 
45 err_idr_alloc:
46 	mutex_unlock(&mlxsw_sp->pgt->lock);
47 	return err;
48 }
49 
50 void mlxsw_sp_pgt_mid_free(struct mlxsw_sp *mlxsw_sp, u16 mid_base)
51 {
52 	mutex_lock(&mlxsw_sp->pgt->lock);
53 	WARN_ON(idr_remove(&mlxsw_sp->pgt->pgt_idr, mid_base));
54 	mutex_unlock(&mlxsw_sp->pgt->lock);
55 }
56 
57 int
58 mlxsw_sp_pgt_mid_alloc_range(struct mlxsw_sp *mlxsw_sp, u16 mid_base, u16 count)
59 {
60 	unsigned int idr_cursor;
61 	int i, err;
62 
63 	mutex_lock(&mlxsw_sp->pgt->lock);
64 
65 	/* This function is supposed to be called several times as part of
66 	 * driver init, in specific order. Verify that the mid_index is the
67 	 * first free index in the idr, to be able to free the indexes in case
68 	 * of error.
69 	 */
70 	idr_cursor = idr_get_cursor(&mlxsw_sp->pgt->pgt_idr);
71 	if (WARN_ON(idr_cursor != mid_base)) {
72 		err = -EINVAL;
73 		goto err_idr_cursor;
74 	}
75 
76 	for (i = 0; i < count; i++) {
77 		err = idr_alloc_cyclic(&mlxsw_sp->pgt->pgt_idr, NULL,
78 				       mid_base, mid_base + count, GFP_KERNEL);
79 		if (err < 0)
80 			goto err_idr_alloc_cyclic;
81 	}
82 
83 	mutex_unlock(&mlxsw_sp->pgt->lock);
84 	return 0;
85 
86 err_idr_alloc_cyclic:
87 	for (i--; i >= 0; i--)
88 		idr_remove(&mlxsw_sp->pgt->pgt_idr, mid_base + i);
89 err_idr_cursor:
90 	mutex_unlock(&mlxsw_sp->pgt->lock);
91 	return err;
92 }
93 
94 void
95 mlxsw_sp_pgt_mid_free_range(struct mlxsw_sp *mlxsw_sp, u16 mid_base, u16 count)
96 {
97 	struct idr *pgt_idr = &mlxsw_sp->pgt->pgt_idr;
98 	int i;
99 
100 	mutex_lock(&mlxsw_sp->pgt->lock);
101 
102 	for (i = 0; i < count; i++)
103 		WARN_ON_ONCE(idr_remove(pgt_idr, mid_base + i));
104 
105 	mutex_unlock(&mlxsw_sp->pgt->lock);
106 }
107 
108 static struct mlxsw_sp_pgt_entry_port *
109 mlxsw_sp_pgt_entry_port_lookup(struct mlxsw_sp_pgt_entry *pgt_entry,
110 			       u16 local_port)
111 {
112 	struct mlxsw_sp_pgt_entry_port *pgt_entry_port;
113 
114 	list_for_each_entry(pgt_entry_port, &pgt_entry->ports_list, list) {
115 		if (pgt_entry_port->local_port == local_port)
116 			return pgt_entry_port;
117 	}
118 
119 	return NULL;
120 }
121 
122 static struct mlxsw_sp_pgt_entry *
123 mlxsw_sp_pgt_entry_create(struct mlxsw_sp_pgt *pgt, u16 mid, u16 smpe)
124 {
125 	struct mlxsw_sp_pgt_entry *pgt_entry;
126 	void *ret;
127 	int err;
128 
129 	pgt_entry = kzalloc(sizeof(*pgt_entry), GFP_KERNEL);
130 	if (!pgt_entry)
131 		return ERR_PTR(-ENOMEM);
132 
133 	ret = idr_replace(&pgt->pgt_idr, pgt_entry, mid);
134 	if (IS_ERR(ret)) {
135 		err = PTR_ERR(ret);
136 		goto err_idr_replace;
137 	}
138 
139 	INIT_LIST_HEAD(&pgt_entry->ports_list);
140 	pgt_entry->index = mid;
141 	pgt_entry->smpe_index = smpe;
142 	return pgt_entry;
143 
144 err_idr_replace:
145 	kfree(pgt_entry);
146 	return ERR_PTR(err);
147 }
148 
149 static void mlxsw_sp_pgt_entry_destroy(struct mlxsw_sp_pgt *pgt,
150 				       struct mlxsw_sp_pgt_entry *pgt_entry)
151 {
152 	WARN_ON(!list_empty(&pgt_entry->ports_list));
153 
154 	pgt_entry = idr_replace(&pgt->pgt_idr, NULL, pgt_entry->index);
155 	if (WARN_ON(IS_ERR(pgt_entry)))
156 		return;
157 
158 	kfree(pgt_entry);
159 }
160 
161 static struct mlxsw_sp_pgt_entry *
162 mlxsw_sp_pgt_entry_get(struct mlxsw_sp_pgt *pgt, u16 mid, u16 smpe)
163 {
164 	struct mlxsw_sp_pgt_entry *pgt_entry;
165 
166 	pgt_entry = idr_find(&pgt->pgt_idr, mid);
167 	if (pgt_entry)
168 		return pgt_entry;
169 
170 	return mlxsw_sp_pgt_entry_create(pgt, mid, smpe);
171 }
172 
173 static void mlxsw_sp_pgt_entry_put(struct mlxsw_sp_pgt *pgt, u16 mid)
174 {
175 	struct mlxsw_sp_pgt_entry *pgt_entry;
176 
177 	pgt_entry = idr_find(&pgt->pgt_idr, mid);
178 	if (WARN_ON(!pgt_entry))
179 		return;
180 
181 	if (list_empty(&pgt_entry->ports_list))
182 		mlxsw_sp_pgt_entry_destroy(pgt, pgt_entry);
183 }
184 
185 #define MLXSW_SP_FID_PGT_FLOOD_ENTRIES	15354 /* Reserved for flooding. */
186 
187 u16 mlxsw_sp_pgt_index_to_mid(const struct mlxsw_sp *mlxsw_sp, u16 pgt_index)
188 {
189 	if (mlxsw_sp->ubridge)
190 		return pgt_index;
191 
192 	return pgt_index - MLXSW_SP_FID_PGT_FLOOD_ENTRIES;
193 }
194 
195 static void mlxsw_sp_pgt_smid2_port_set(char *smid2_pl, u16 local_port,
196 					bool member)
197 {
198 	mlxsw_reg_smid2_port_set(smid2_pl, local_port, member);
199 	mlxsw_reg_smid2_port_mask_set(smid2_pl, local_port, 1);
200 }
201 
202 static int
203 mlxsw_sp_pgt_entry_port_write(struct mlxsw_sp *mlxsw_sp,
204 			      const struct mlxsw_sp_pgt_entry *pgt_entry,
205 			      u16 local_port, bool member)
206 {
207 	bool smpe_index_valid;
208 	char *smid2_pl;
209 	u16 smpe, mid;
210 	int err;
211 
212 	smid2_pl = kmalloc(MLXSW_REG_SMID2_LEN, GFP_KERNEL);
213 	if (!smid2_pl)
214 		return -ENOMEM;
215 
216 	smpe_index_valid = mlxsw_sp->ubridge ? mlxsw_sp->pgt->smpe_index_valid :
217 			   false;
218 	smpe = mlxsw_sp->ubridge ? pgt_entry->smpe_index : 0;
219 	mid = mlxsw_sp_pgt_index_to_mid(mlxsw_sp, pgt_entry->index);
220 
221 	mlxsw_reg_smid2_pack(smid2_pl, mid, 0, 0, smpe_index_valid, smpe);
222 
223 	mlxsw_sp_pgt_smid2_port_set(smid2_pl, local_port, member);
224 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid2), smid2_pl);
225 
226 	kfree(smid2_pl);
227 
228 	return err;
229 }
230 
231 static struct mlxsw_sp_pgt_entry_port *
232 mlxsw_sp_pgt_entry_port_create(struct mlxsw_sp *mlxsw_sp,
233 			       struct mlxsw_sp_pgt_entry *pgt_entry,
234 			       u16 local_port)
235 {
236 	struct mlxsw_sp_pgt_entry_port *pgt_entry_port;
237 	int err;
238 
239 	pgt_entry_port = kzalloc(sizeof(*pgt_entry_port), GFP_KERNEL);
240 	if (!pgt_entry_port)
241 		return ERR_PTR(-ENOMEM);
242 
243 	err = mlxsw_sp_pgt_entry_port_write(mlxsw_sp, pgt_entry, local_port,
244 					    true);
245 	if (err)
246 		goto err_pgt_entry_port_write;
247 
248 	pgt_entry_port->local_port = local_port;
249 	list_add(&pgt_entry_port->list, &pgt_entry->ports_list);
250 
251 	return pgt_entry_port;
252 
253 err_pgt_entry_port_write:
254 	kfree(pgt_entry_port);
255 	return ERR_PTR(err);
256 }
257 
258 static void
259 mlxsw_sp_pgt_entry_port_destroy(struct mlxsw_sp *mlxsw_sp,
260 				struct mlxsw_sp_pgt_entry *pgt_entry,
261 				struct mlxsw_sp_pgt_entry_port *pgt_entry_port)
262 
263 {
264 	list_del(&pgt_entry_port->list);
265 	mlxsw_sp_pgt_entry_port_write(mlxsw_sp, pgt_entry,
266 				      pgt_entry_port->local_port, false);
267 	kfree(pgt_entry_port);
268 }
269 
270 static int mlxsw_sp_pgt_entry_port_add(struct mlxsw_sp *mlxsw_sp, u16 mid,
271 				       u16 smpe, u16 local_port)
272 {
273 	struct mlxsw_sp_pgt_entry_port *pgt_entry_port;
274 	struct mlxsw_sp_pgt_entry *pgt_entry;
275 	int err;
276 
277 	mutex_lock(&mlxsw_sp->pgt->lock);
278 
279 	pgt_entry = mlxsw_sp_pgt_entry_get(mlxsw_sp->pgt, mid, smpe);
280 	if (IS_ERR(pgt_entry)) {
281 		err = PTR_ERR(pgt_entry);
282 		goto err_pgt_entry_get;
283 	}
284 
285 	pgt_entry_port = mlxsw_sp_pgt_entry_port_create(mlxsw_sp, pgt_entry,
286 							local_port);
287 	if (IS_ERR(pgt_entry_port)) {
288 		err = PTR_ERR(pgt_entry_port);
289 		goto err_pgt_entry_port_get;
290 	}
291 
292 	mutex_unlock(&mlxsw_sp->pgt->lock);
293 	return 0;
294 
295 err_pgt_entry_port_get:
296 	mlxsw_sp_pgt_entry_put(mlxsw_sp->pgt, mid);
297 err_pgt_entry_get:
298 	mutex_unlock(&mlxsw_sp->pgt->lock);
299 	return err;
300 }
301 
302 static void mlxsw_sp_pgt_entry_port_del(struct mlxsw_sp *mlxsw_sp,
303 					u16 mid, u16 smpe, u16 local_port)
304 {
305 	struct mlxsw_sp_pgt_entry_port *pgt_entry_port;
306 	struct mlxsw_sp_pgt_entry *pgt_entry;
307 
308 	mutex_lock(&mlxsw_sp->pgt->lock);
309 
310 	pgt_entry = idr_find(&mlxsw_sp->pgt->pgt_idr, mid);
311 	if (!pgt_entry)
312 		goto out;
313 
314 	pgt_entry_port = mlxsw_sp_pgt_entry_port_lookup(pgt_entry, local_port);
315 	if (!pgt_entry_port)
316 		goto out;
317 
318 	mlxsw_sp_pgt_entry_port_destroy(mlxsw_sp, pgt_entry, pgt_entry_port);
319 	mlxsw_sp_pgt_entry_put(mlxsw_sp->pgt, mid);
320 
321 out:
322 	mutex_unlock(&mlxsw_sp->pgt->lock);
323 }
324 
325 int mlxsw_sp_pgt_entry_port_set(struct mlxsw_sp *mlxsw_sp, u16 mid,
326 				u16 smpe, u16 local_port, bool member)
327 {
328 	if (member)
329 		return mlxsw_sp_pgt_entry_port_add(mlxsw_sp, mid, smpe,
330 						   local_port);
331 
332 	mlxsw_sp_pgt_entry_port_del(mlxsw_sp, mid, smpe, local_port);
333 	return 0;
334 }
335 
336 int mlxsw_sp_pgt_init(struct mlxsw_sp *mlxsw_sp)
337 {
338 	struct mlxsw_sp_pgt *pgt;
339 
340 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, PGT_SIZE))
341 		return -EIO;
342 
343 	pgt = kzalloc(sizeof(*mlxsw_sp->pgt), GFP_KERNEL);
344 	if (!pgt)
345 		return -ENOMEM;
346 
347 	idr_init(&pgt->pgt_idr);
348 	pgt->end_index = MLXSW_CORE_RES_GET(mlxsw_sp->core, PGT_SIZE);
349 	mutex_init(&pgt->lock);
350 	pgt->smpe_index_valid = mlxsw_sp->pgt_smpe_index_valid;
351 	mlxsw_sp->pgt = pgt;
352 	return 0;
353 }
354 
355 void mlxsw_sp_pgt_fini(struct mlxsw_sp *mlxsw_sp)
356 {
357 	mutex_destroy(&mlxsw_sp->pgt->lock);
358 	WARN_ON(!idr_is_empty(&mlxsw_sp->pgt->pgt_idr));
359 	idr_destroy(&mlxsw_sp->pgt->pgt_idr);
360 	kfree(mlxsw_sp->pgt);
361 }
362