1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5  */
6 
7 #include "rxe.h"
8 #include "rxe_loc.h"
9 
10 /* info about object pools
11  * note that mr and mw share a single index space
12  * so that one can map an lkey to the correct type of object
13  */
14 struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = {
15 	[RXE_TYPE_UC] = {
16 		.name		= "rxe-uc",
17 		.size		= sizeof(struct rxe_ucontext),
18 		.elem_offset	= offsetof(struct rxe_ucontext, pelem),
19 		.flags          = RXE_POOL_NO_ALLOC,
20 	},
21 	[RXE_TYPE_PD] = {
22 		.name		= "rxe-pd",
23 		.size		= sizeof(struct rxe_pd),
24 		.elem_offset	= offsetof(struct rxe_pd, pelem),
25 		.flags		= RXE_POOL_NO_ALLOC,
26 	},
27 	[RXE_TYPE_AH] = {
28 		.name		= "rxe-ah",
29 		.size		= sizeof(struct rxe_ah),
30 		.elem_offset	= offsetof(struct rxe_ah, pelem),
31 		.flags		= RXE_POOL_NO_ALLOC,
32 	},
33 	[RXE_TYPE_SRQ] = {
34 		.name		= "rxe-srq",
35 		.size		= sizeof(struct rxe_srq),
36 		.elem_offset	= offsetof(struct rxe_srq, pelem),
37 		.flags		= RXE_POOL_INDEX | RXE_POOL_NO_ALLOC,
38 		.min_index	= RXE_MIN_SRQ_INDEX,
39 		.max_index	= RXE_MAX_SRQ_INDEX,
40 	},
41 	[RXE_TYPE_QP] = {
42 		.name		= "rxe-qp",
43 		.size		= sizeof(struct rxe_qp),
44 		.elem_offset	= offsetof(struct rxe_qp, pelem),
45 		.cleanup	= rxe_qp_cleanup,
46 		.flags		= RXE_POOL_INDEX,
47 		.min_index	= RXE_MIN_QP_INDEX,
48 		.max_index	= RXE_MAX_QP_INDEX,
49 	},
50 	[RXE_TYPE_CQ] = {
51 		.name		= "rxe-cq",
52 		.size		= sizeof(struct rxe_cq),
53 		.elem_offset	= offsetof(struct rxe_cq, pelem),
54 		.flags          = RXE_POOL_NO_ALLOC,
55 		.cleanup	= rxe_cq_cleanup,
56 	},
57 	[RXE_TYPE_MR] = {
58 		.name		= "rxe-mr",
59 		.size		= sizeof(struct rxe_mem),
60 		.elem_offset	= offsetof(struct rxe_mem, pelem),
61 		.cleanup	= rxe_mem_cleanup,
62 		.flags		= RXE_POOL_INDEX,
63 		.max_index	= RXE_MAX_MR_INDEX,
64 		.min_index	= RXE_MIN_MR_INDEX,
65 	},
66 	[RXE_TYPE_MW] = {
67 		.name		= "rxe-mw",
68 		.size		= sizeof(struct rxe_mem),
69 		.elem_offset	= offsetof(struct rxe_mem, pelem),
70 		.flags		= RXE_POOL_INDEX,
71 		.max_index	= RXE_MAX_MW_INDEX,
72 		.min_index	= RXE_MIN_MW_INDEX,
73 	},
74 	[RXE_TYPE_MC_GRP] = {
75 		.name		= "rxe-mc_grp",
76 		.size		= sizeof(struct rxe_mc_grp),
77 		.elem_offset	= offsetof(struct rxe_mc_grp, pelem),
78 		.cleanup	= rxe_mc_cleanup,
79 		.flags		= RXE_POOL_KEY,
80 		.key_offset	= offsetof(struct rxe_mc_grp, mgid),
81 		.key_size	= sizeof(union ib_gid),
82 	},
83 	[RXE_TYPE_MC_ELEM] = {
84 		.name		= "rxe-mc_elem",
85 		.size		= sizeof(struct rxe_mc_elem),
86 		.elem_offset	= offsetof(struct rxe_mc_elem, pelem),
87 	},
88 };
89 
90 static inline const char *pool_name(struct rxe_pool *pool)
91 {
92 	return rxe_type_info[pool->type].name;
93 }
94 
95 static int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min)
96 {
97 	int err = 0;
98 	size_t size;
99 
100 	if ((max - min + 1) < pool->max_elem) {
101 		pr_warn("not enough indices for max_elem\n");
102 		err = -EINVAL;
103 		goto out;
104 	}
105 
106 	pool->index.max_index = max;
107 	pool->index.min_index = min;
108 
109 	size = BITS_TO_LONGS(max - min + 1) * sizeof(long);
110 	pool->index.table = kmalloc(size, GFP_KERNEL);
111 	if (!pool->index.table) {
112 		err = -ENOMEM;
113 		goto out;
114 	}
115 
116 	pool->index.table_size = size;
117 	bitmap_zero(pool->index.table, max - min + 1);
118 
119 out:
120 	return err;
121 }
122 
123 int rxe_pool_init(
124 	struct rxe_dev		*rxe,
125 	struct rxe_pool		*pool,
126 	enum rxe_elem_type	type,
127 	unsigned int		max_elem)
128 {
129 	int			err = 0;
130 	size_t			size = rxe_type_info[type].size;
131 
132 	memset(pool, 0, sizeof(*pool));
133 
134 	pool->rxe		= rxe;
135 	pool->type		= type;
136 	pool->max_elem		= max_elem;
137 	pool->elem_size		= ALIGN(size, RXE_POOL_ALIGN);
138 	pool->flags		= rxe_type_info[type].flags;
139 	pool->index.tree	= RB_ROOT;
140 	pool->key.tree		= RB_ROOT;
141 	pool->cleanup		= rxe_type_info[type].cleanup;
142 
143 	atomic_set(&pool->num_elem, 0);
144 
145 	rwlock_init(&pool->pool_lock);
146 
147 	if (rxe_type_info[type].flags & RXE_POOL_INDEX) {
148 		err = rxe_pool_init_index(pool,
149 					  rxe_type_info[type].max_index,
150 					  rxe_type_info[type].min_index);
151 		if (err)
152 			goto out;
153 	}
154 
155 	if (rxe_type_info[type].flags & RXE_POOL_KEY) {
156 		pool->key.key_offset = rxe_type_info[type].key_offset;
157 		pool->key.key_size = rxe_type_info[type].key_size;
158 	}
159 
160 out:
161 	return err;
162 }
163 
164 void rxe_pool_cleanup(struct rxe_pool *pool)
165 {
166 	if (atomic_read(&pool->num_elem) > 0)
167 		pr_warn("%s pool destroyed with unfree'd elem\n",
168 			pool_name(pool));
169 
170 	kfree(pool->index.table);
171 }
172 
173 static u32 alloc_index(struct rxe_pool *pool)
174 {
175 	u32 index;
176 	u32 range = pool->index.max_index - pool->index.min_index + 1;
177 
178 	index = find_next_zero_bit(pool->index.table, range, pool->index.last);
179 	if (index >= range)
180 		index = find_first_zero_bit(pool->index.table, range);
181 
182 	WARN_ON_ONCE(index >= range);
183 	set_bit(index, pool->index.table);
184 	pool->index.last = index;
185 	return index + pool->index.min_index;
186 }
187 
188 static void insert_index(struct rxe_pool *pool, struct rxe_pool_entry *new)
189 {
190 	struct rb_node **link = &pool->index.tree.rb_node;
191 	struct rb_node *parent = NULL;
192 	struct rxe_pool_entry *elem;
193 
194 	while (*link) {
195 		parent = *link;
196 		elem = rb_entry(parent, struct rxe_pool_entry, index_node);
197 
198 		if (elem->index == new->index) {
199 			pr_warn("element already exists!\n");
200 			goto out;
201 		}
202 
203 		if (elem->index > new->index)
204 			link = &(*link)->rb_left;
205 		else
206 			link = &(*link)->rb_right;
207 	}
208 
209 	rb_link_node(&new->index_node, parent, link);
210 	rb_insert_color(&new->index_node, &pool->index.tree);
211 out:
212 	return;
213 }
214 
215 static void insert_key(struct rxe_pool *pool, struct rxe_pool_entry *new)
216 {
217 	struct rb_node **link = &pool->key.tree.rb_node;
218 	struct rb_node *parent = NULL;
219 	struct rxe_pool_entry *elem;
220 	int cmp;
221 
222 	while (*link) {
223 		parent = *link;
224 		elem = rb_entry(parent, struct rxe_pool_entry, key_node);
225 
226 		cmp = memcmp((u8 *)elem + pool->key.key_offset,
227 			     (u8 *)new + pool->key.key_offset, pool->key.key_size);
228 
229 		if (cmp == 0) {
230 			pr_warn("key already exists!\n");
231 			goto out;
232 		}
233 
234 		if (cmp > 0)
235 			link = &(*link)->rb_left;
236 		else
237 			link = &(*link)->rb_right;
238 	}
239 
240 	rb_link_node(&new->key_node, parent, link);
241 	rb_insert_color(&new->key_node, &pool->key.tree);
242 out:
243 	return;
244 }
245 
246 void __rxe_add_key_locked(struct rxe_pool_entry *elem, void *key)
247 {
248 	struct rxe_pool *pool = elem->pool;
249 
250 	memcpy((u8 *)elem + pool->key.key_offset, key, pool->key.key_size);
251 	insert_key(pool, elem);
252 }
253 
254 void __rxe_add_key(struct rxe_pool_entry *elem, void *key)
255 {
256 	struct rxe_pool *pool = elem->pool;
257 	unsigned long flags;
258 
259 	write_lock_irqsave(&pool->pool_lock, flags);
260 	__rxe_add_key_locked(elem, key);
261 	write_unlock_irqrestore(&pool->pool_lock, flags);
262 }
263 
264 void __rxe_drop_key_locked(struct rxe_pool_entry *elem)
265 {
266 	struct rxe_pool *pool = elem->pool;
267 
268 	rb_erase(&elem->key_node, &pool->key.tree);
269 }
270 
271 void __rxe_drop_key(struct rxe_pool_entry *elem)
272 {
273 	struct rxe_pool *pool = elem->pool;
274 	unsigned long flags;
275 
276 	write_lock_irqsave(&pool->pool_lock, flags);
277 	__rxe_drop_key_locked(elem);
278 	write_unlock_irqrestore(&pool->pool_lock, flags);
279 }
280 
281 void __rxe_add_index_locked(struct rxe_pool_entry *elem)
282 {
283 	struct rxe_pool *pool = elem->pool;
284 
285 	elem->index = alloc_index(pool);
286 	insert_index(pool, elem);
287 }
288 
289 void __rxe_add_index(struct rxe_pool_entry *elem)
290 {
291 	struct rxe_pool *pool = elem->pool;
292 	unsigned long flags;
293 
294 	write_lock_irqsave(&pool->pool_lock, flags);
295 	__rxe_add_index_locked(elem);
296 	write_unlock_irqrestore(&pool->pool_lock, flags);
297 }
298 
299 void __rxe_drop_index_locked(struct rxe_pool_entry *elem)
300 {
301 	struct rxe_pool *pool = elem->pool;
302 
303 	clear_bit(elem->index - pool->index.min_index, pool->index.table);
304 	rb_erase(&elem->index_node, &pool->index.tree);
305 }
306 
307 void __rxe_drop_index(struct rxe_pool_entry *elem)
308 {
309 	struct rxe_pool *pool = elem->pool;
310 	unsigned long flags;
311 
312 	write_lock_irqsave(&pool->pool_lock, flags);
313 	__rxe_drop_index_locked(elem);
314 	write_unlock_irqrestore(&pool->pool_lock, flags);
315 }
316 
317 void *rxe_alloc_locked(struct rxe_pool *pool)
318 {
319 	struct rxe_type_info *info = &rxe_type_info[pool->type];
320 	struct rxe_pool_entry *elem;
321 	u8 *obj;
322 
323 	if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
324 		goto out_cnt;
325 
326 	obj = kzalloc(info->size, GFP_ATOMIC);
327 	if (!obj)
328 		goto out_cnt;
329 
330 	elem = (struct rxe_pool_entry *)(obj + info->elem_offset);
331 
332 	elem->pool = pool;
333 	kref_init(&elem->ref_cnt);
334 
335 	return obj;
336 
337 out_cnt:
338 	atomic_dec(&pool->num_elem);
339 	return NULL;
340 }
341 
342 void *rxe_alloc(struct rxe_pool *pool)
343 {
344 	struct rxe_type_info *info = &rxe_type_info[pool->type];
345 	struct rxe_pool_entry *elem;
346 	u8 *obj;
347 
348 	if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
349 		goto out_cnt;
350 
351 	obj = kzalloc(info->size, GFP_KERNEL);
352 	if (!obj)
353 		goto out_cnt;
354 
355 	elem = (struct rxe_pool_entry *)(obj + info->elem_offset);
356 
357 	elem->pool = pool;
358 	kref_init(&elem->ref_cnt);
359 
360 	return obj;
361 
362 out_cnt:
363 	atomic_dec(&pool->num_elem);
364 	return NULL;
365 }
366 
367 int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_entry *elem)
368 {
369 	if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
370 		goto out_cnt;
371 
372 	elem->pool = pool;
373 	kref_init(&elem->ref_cnt);
374 
375 	return 0;
376 
377 out_cnt:
378 	atomic_dec(&pool->num_elem);
379 	return -EINVAL;
380 }
381 
382 void rxe_elem_release(struct kref *kref)
383 {
384 	struct rxe_pool_entry *elem =
385 		container_of(kref, struct rxe_pool_entry, ref_cnt);
386 	struct rxe_pool *pool = elem->pool;
387 	struct rxe_type_info *info = &rxe_type_info[pool->type];
388 	u8 *obj;
389 
390 	if (pool->cleanup)
391 		pool->cleanup(elem);
392 
393 	if (!(pool->flags & RXE_POOL_NO_ALLOC)) {
394 		obj = (u8 *)elem - info->elem_offset;
395 		kfree(obj);
396 	}
397 
398 	atomic_dec(&pool->num_elem);
399 }
400 
401 void *rxe_pool_get_index_locked(struct rxe_pool *pool, u32 index)
402 {
403 	struct rxe_type_info *info = &rxe_type_info[pool->type];
404 	struct rb_node *node;
405 	struct rxe_pool_entry *elem;
406 	u8 *obj;
407 
408 	node = pool->index.tree.rb_node;
409 
410 	while (node) {
411 		elem = rb_entry(node, struct rxe_pool_entry, index_node);
412 
413 		if (elem->index > index)
414 			node = node->rb_left;
415 		else if (elem->index < index)
416 			node = node->rb_right;
417 		else
418 			break;
419 	}
420 
421 	if (node) {
422 		kref_get(&elem->ref_cnt);
423 		obj = (u8 *)elem - info->elem_offset;
424 	} else {
425 		obj = NULL;
426 	}
427 
428 	return obj;
429 }
430 
431 void *rxe_pool_get_index(struct rxe_pool *pool, u32 index)
432 {
433 	u8 *obj;
434 	unsigned long flags;
435 
436 	read_lock_irqsave(&pool->pool_lock, flags);
437 	obj = rxe_pool_get_index_locked(pool, index);
438 	read_unlock_irqrestore(&pool->pool_lock, flags);
439 
440 	return obj;
441 }
442 
443 void *rxe_pool_get_key_locked(struct rxe_pool *pool, void *key)
444 {
445 	struct rxe_type_info *info = &rxe_type_info[pool->type];
446 	struct rb_node *node;
447 	struct rxe_pool_entry *elem;
448 	u8 *obj;
449 	int cmp;
450 
451 	node = pool->key.tree.rb_node;
452 
453 	while (node) {
454 		elem = rb_entry(node, struct rxe_pool_entry, key_node);
455 
456 		cmp = memcmp((u8 *)elem + pool->key.key_offset,
457 			     key, pool->key.key_size);
458 
459 		if (cmp > 0)
460 			node = node->rb_left;
461 		else if (cmp < 0)
462 			node = node->rb_right;
463 		else
464 			break;
465 	}
466 
467 	if (node) {
468 		kref_get(&elem->ref_cnt);
469 		obj = (u8 *)elem - info->elem_offset;
470 	} else {
471 		obj = NULL;
472 	}
473 
474 	return obj;
475 }
476 
477 void *rxe_pool_get_key(struct rxe_pool *pool, void *key)
478 {
479 	u8 *obj;
480 	unsigned long flags;
481 
482 	read_lock_irqsave(&pool->pool_lock, flags);
483 	obj = rxe_pool_get_key_locked(pool, key);
484 	read_unlock_irqrestore(&pool->pool_lock, flags);
485 
486 	return obj;
487 }
488