1 /*
2  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *	   Redistribution and use in source and binary forms, with or
12  *	   without modification, are permitted provided that the following
13  *	   conditions are met:
14  *
15  *		- Redistributions of source code must retain the above
16  *		  copyright notice, this list of conditions and the following
17  *		  disclaimer.
18  *
19  *		- Redistributions in binary form must reproduce the above
20  *		  copyright notice, this list of conditions and the following
21  *		  disclaimer in the documentation and/or other materials
22  *		  provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include "rxe.h"
35 #include "rxe_loc.h"
36 
37 /* info about object pools
38  * note that mr and mw share a single index space
39  * so that one can map an lkey to the correct type of object
40  */
41 struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = {
42 	[RXE_TYPE_UC] = {
43 		.name		= "rxe-uc",
44 		.size		= sizeof(struct rxe_ucontext),
45 	},
46 	[RXE_TYPE_PD] = {
47 		.name		= "rxe-pd",
48 		.size		= sizeof(struct rxe_pd),
49 	},
50 	[RXE_TYPE_AH] = {
51 		.name		= "rxe-ah",
52 		.size		= sizeof(struct rxe_ah),
53 		.flags		= RXE_POOL_ATOMIC,
54 	},
55 	[RXE_TYPE_SRQ] = {
56 		.name		= "rxe-srq",
57 		.size		= sizeof(struct rxe_srq),
58 		.flags		= RXE_POOL_INDEX,
59 		.min_index	= RXE_MIN_SRQ_INDEX,
60 		.max_index	= RXE_MAX_SRQ_INDEX,
61 	},
62 	[RXE_TYPE_QP] = {
63 		.name		= "rxe-qp",
64 		.size		= sizeof(struct rxe_qp),
65 		.cleanup	= rxe_qp_cleanup,
66 		.flags		= RXE_POOL_INDEX,
67 		.min_index	= RXE_MIN_QP_INDEX,
68 		.max_index	= RXE_MAX_QP_INDEX,
69 	},
70 	[RXE_TYPE_CQ] = {
71 		.name		= "rxe-cq",
72 		.size		= sizeof(struct rxe_cq),
73 		.cleanup	= rxe_cq_cleanup,
74 	},
75 	[RXE_TYPE_MR] = {
76 		.name		= "rxe-mr",
77 		.size		= sizeof(struct rxe_mem),
78 		.cleanup	= rxe_mem_cleanup,
79 		.flags		= RXE_POOL_INDEX,
80 		.max_index	= RXE_MAX_MR_INDEX,
81 		.min_index	= RXE_MIN_MR_INDEX,
82 	},
83 	[RXE_TYPE_MW] = {
84 		.name		= "rxe-mw",
85 		.size		= sizeof(struct rxe_mem),
86 		.flags		= RXE_POOL_INDEX,
87 		.max_index	= RXE_MAX_MW_INDEX,
88 		.min_index	= RXE_MIN_MW_INDEX,
89 	},
90 	[RXE_TYPE_MC_GRP] = {
91 		.name		= "rxe-mc_grp",
92 		.size		= sizeof(struct rxe_mc_grp),
93 		.cleanup	= rxe_mc_cleanup,
94 		.flags		= RXE_POOL_KEY,
95 		.key_offset	= offsetof(struct rxe_mc_grp, mgid),
96 		.key_size	= sizeof(union ib_gid),
97 	},
98 	[RXE_TYPE_MC_ELEM] = {
99 		.name		= "rxe-mc_elem",
100 		.size		= sizeof(struct rxe_mc_elem),
101 		.flags		= RXE_POOL_ATOMIC,
102 	},
103 };
104 
105 static inline char *pool_name(struct rxe_pool *pool)
106 {
107 	return rxe_type_info[pool->type].name;
108 }
109 
110 static inline struct kmem_cache *pool_cache(struct rxe_pool *pool)
111 {
112 	return rxe_type_info[pool->type].cache;
113 }
114 
115 static inline enum rxe_elem_type rxe_type(void *arg)
116 {
117 	struct rxe_pool_entry *elem = arg;
118 
119 	return elem->pool->type;
120 }
121 
122 int rxe_cache_init(void)
123 {
124 	int err;
125 	int i;
126 	size_t size;
127 	struct rxe_type_info *type;
128 
129 	for (i = 0; i < RXE_NUM_TYPES; i++) {
130 		type = &rxe_type_info[i];
131 		size = ALIGN(type->size, RXE_POOL_ALIGN);
132 		type->cache = kmem_cache_create(type->name, size,
133 				RXE_POOL_ALIGN,
134 				RXE_POOL_CACHE_FLAGS, NULL);
135 		if (!type->cache) {
136 			pr_err("Unable to init kmem cache for %s\n",
137 			       type->name);
138 			err = -ENOMEM;
139 			goto err1;
140 		}
141 	}
142 
143 	return 0;
144 
145 err1:
146 	while (--i >= 0) {
147 		kmem_cache_destroy(type->cache);
148 		type->cache = NULL;
149 	}
150 
151 	return err;
152 }
153 
154 void rxe_cache_exit(void)
155 {
156 	int i;
157 	struct rxe_type_info *type;
158 
159 	for (i = 0; i < RXE_NUM_TYPES; i++) {
160 		type = &rxe_type_info[i];
161 		kmem_cache_destroy(type->cache);
162 		type->cache = NULL;
163 	}
164 }
165 
166 static int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min)
167 {
168 	int err = 0;
169 	size_t size;
170 
171 	if ((max - min + 1) < pool->max_elem) {
172 		pr_warn("not enough indices for max_elem\n");
173 		err = -EINVAL;
174 		goto out;
175 	}
176 
177 	pool->max_index = max;
178 	pool->min_index = min;
179 
180 	size = BITS_TO_LONGS(max - min + 1) * sizeof(long);
181 	pool->table = kmalloc(size, GFP_KERNEL);
182 	if (!pool->table) {
183 		pr_warn("no memory for bit table\n");
184 		err = -ENOMEM;
185 		goto out;
186 	}
187 
188 	pool->table_size = size;
189 	bitmap_zero(pool->table, max - min + 1);
190 
191 out:
192 	return err;
193 }
194 
195 int rxe_pool_init(
196 	struct rxe_dev		*rxe,
197 	struct rxe_pool		*pool,
198 	enum rxe_elem_type	type,
199 	unsigned		max_elem)
200 {
201 	int			err = 0;
202 	size_t			size = rxe_type_info[type].size;
203 
204 	memset(pool, 0, sizeof(*pool));
205 
206 	pool->rxe		= rxe;
207 	pool->type		= type;
208 	pool->max_elem		= max_elem;
209 	pool->elem_size		= ALIGN(size, RXE_POOL_ALIGN);
210 	pool->flags		= rxe_type_info[type].flags;
211 	pool->tree		= RB_ROOT;
212 	pool->cleanup		= rxe_type_info[type].cleanup;
213 
214 	atomic_set(&pool->num_elem, 0);
215 
216 	kref_init(&pool->ref_cnt);
217 
218 	spin_lock_init(&pool->pool_lock);
219 
220 	if (rxe_type_info[type].flags & RXE_POOL_INDEX) {
221 		err = rxe_pool_init_index(pool,
222 					  rxe_type_info[type].max_index,
223 					  rxe_type_info[type].min_index);
224 		if (err)
225 			goto out;
226 	}
227 
228 	if (rxe_type_info[type].flags & RXE_POOL_KEY) {
229 		pool->key_offset = rxe_type_info[type].key_offset;
230 		pool->key_size = rxe_type_info[type].key_size;
231 	}
232 
233 	pool->state = rxe_pool_valid;
234 
235 out:
236 	return err;
237 }
238 
239 static void rxe_pool_release(struct kref *kref)
240 {
241 	struct rxe_pool *pool = container_of(kref, struct rxe_pool, ref_cnt);
242 
243 	pool->state = rxe_pool_invalid;
244 	kfree(pool->table);
245 }
246 
247 static void rxe_pool_put(struct rxe_pool *pool)
248 {
249 	kref_put(&pool->ref_cnt, rxe_pool_release);
250 }
251 
252 int rxe_pool_cleanup(struct rxe_pool *pool)
253 {
254 	unsigned long flags;
255 
256 	spin_lock_irqsave(&pool->pool_lock, flags);
257 	pool->state = rxe_pool_invalid;
258 	if (atomic_read(&pool->num_elem) > 0)
259 		pr_warn("%s pool destroyed with unfree'd elem\n",
260 			pool_name(pool));
261 	spin_unlock_irqrestore(&pool->pool_lock, flags);
262 
263 	rxe_pool_put(pool);
264 
265 	return 0;
266 }
267 
268 static u32 alloc_index(struct rxe_pool *pool)
269 {
270 	u32 index;
271 	u32 range = pool->max_index - pool->min_index + 1;
272 
273 	index = find_next_zero_bit(pool->table, range, pool->last);
274 	if (index >= range)
275 		index = find_first_zero_bit(pool->table, range);
276 
277 	set_bit(index, pool->table);
278 	pool->last = index;
279 	return index + pool->min_index;
280 }
281 
282 static void insert_index(struct rxe_pool *pool, struct rxe_pool_entry *new)
283 {
284 	struct rb_node **link = &pool->tree.rb_node;
285 	struct rb_node *parent = NULL;
286 	struct rxe_pool_entry *elem;
287 
288 	while (*link) {
289 		parent = *link;
290 		elem = rb_entry(parent, struct rxe_pool_entry, node);
291 
292 		if (elem->index == new->index) {
293 			pr_warn("element already exists!\n");
294 			goto out;
295 		}
296 
297 		if (elem->index > new->index)
298 			link = &(*link)->rb_left;
299 		else
300 			link = &(*link)->rb_right;
301 	}
302 
303 	rb_link_node(&new->node, parent, link);
304 	rb_insert_color(&new->node, &pool->tree);
305 out:
306 	return;
307 }
308 
309 static void insert_key(struct rxe_pool *pool, struct rxe_pool_entry *new)
310 {
311 	struct rb_node **link = &pool->tree.rb_node;
312 	struct rb_node *parent = NULL;
313 	struct rxe_pool_entry *elem;
314 	int cmp;
315 
316 	while (*link) {
317 		parent = *link;
318 		elem = rb_entry(parent, struct rxe_pool_entry, node);
319 
320 		cmp = memcmp((u8 *)elem + pool->key_offset,
321 			     (u8 *)new + pool->key_offset, pool->key_size);
322 
323 		if (cmp == 0) {
324 			pr_warn("key already exists!\n");
325 			goto out;
326 		}
327 
328 		if (cmp > 0)
329 			link = &(*link)->rb_left;
330 		else
331 			link = &(*link)->rb_right;
332 	}
333 
334 	rb_link_node(&new->node, parent, link);
335 	rb_insert_color(&new->node, &pool->tree);
336 out:
337 	return;
338 }
339 
340 void rxe_add_key(void *arg, void *key)
341 {
342 	struct rxe_pool_entry *elem = arg;
343 	struct rxe_pool *pool = elem->pool;
344 	unsigned long flags;
345 
346 	spin_lock_irqsave(&pool->pool_lock, flags);
347 	memcpy((u8 *)elem + pool->key_offset, key, pool->key_size);
348 	insert_key(pool, elem);
349 	spin_unlock_irqrestore(&pool->pool_lock, flags);
350 }
351 
352 void rxe_drop_key(void *arg)
353 {
354 	struct rxe_pool_entry *elem = arg;
355 	struct rxe_pool *pool = elem->pool;
356 	unsigned long flags;
357 
358 	spin_lock_irqsave(&pool->pool_lock, flags);
359 	rb_erase(&elem->node, &pool->tree);
360 	spin_unlock_irqrestore(&pool->pool_lock, flags);
361 }
362 
363 void rxe_add_index(void *arg)
364 {
365 	struct rxe_pool_entry *elem = arg;
366 	struct rxe_pool *pool = elem->pool;
367 	unsigned long flags;
368 
369 	spin_lock_irqsave(&pool->pool_lock, flags);
370 	elem->index = alloc_index(pool);
371 	insert_index(pool, elem);
372 	spin_unlock_irqrestore(&pool->pool_lock, flags);
373 }
374 
375 void rxe_drop_index(void *arg)
376 {
377 	struct rxe_pool_entry *elem = arg;
378 	struct rxe_pool *pool = elem->pool;
379 	unsigned long flags;
380 
381 	spin_lock_irqsave(&pool->pool_lock, flags);
382 	clear_bit(elem->index - pool->min_index, pool->table);
383 	rb_erase(&elem->node, &pool->tree);
384 	spin_unlock_irqrestore(&pool->pool_lock, flags);
385 }
386 
387 void *rxe_alloc(struct rxe_pool *pool)
388 {
389 	struct rxe_pool_entry *elem;
390 	unsigned long flags;
391 
392 	might_sleep_if(!(pool->flags & RXE_POOL_ATOMIC));
393 
394 	spin_lock_irqsave(&pool->pool_lock, flags);
395 	if (pool->state != rxe_pool_valid) {
396 		spin_unlock_irqrestore(&pool->pool_lock, flags);
397 		return NULL;
398 	}
399 	kref_get(&pool->ref_cnt);
400 	spin_unlock_irqrestore(&pool->pool_lock, flags);
401 
402 	kref_get(&pool->rxe->ref_cnt);
403 
404 	if (atomic_inc_return(&pool->num_elem) > pool->max_elem) {
405 		atomic_dec(&pool->num_elem);
406 		rxe_dev_put(pool->rxe);
407 		rxe_pool_put(pool);
408 		return NULL;
409 	}
410 
411 	elem = kmem_cache_zalloc(pool_cache(pool),
412 				 (pool->flags & RXE_POOL_ATOMIC) ?
413 				 GFP_ATOMIC : GFP_KERNEL);
414 
415 	elem->pool = pool;
416 	kref_init(&elem->ref_cnt);
417 
418 	return elem;
419 }
420 
421 void rxe_elem_release(struct kref *kref)
422 {
423 	struct rxe_pool_entry *elem =
424 		container_of(kref, struct rxe_pool_entry, ref_cnt);
425 	struct rxe_pool *pool = elem->pool;
426 
427 	if (pool->cleanup)
428 		pool->cleanup(elem);
429 
430 	kmem_cache_free(pool_cache(pool), elem);
431 	atomic_dec(&pool->num_elem);
432 	rxe_dev_put(pool->rxe);
433 	rxe_pool_put(pool);
434 }
435 
436 void *rxe_pool_get_index(struct rxe_pool *pool, u32 index)
437 {
438 	struct rb_node *node = NULL;
439 	struct rxe_pool_entry *elem = NULL;
440 	unsigned long flags;
441 
442 	spin_lock_irqsave(&pool->pool_lock, flags);
443 
444 	if (pool->state != rxe_pool_valid)
445 		goto out;
446 
447 	node = pool->tree.rb_node;
448 
449 	while (node) {
450 		elem = rb_entry(node, struct rxe_pool_entry, node);
451 
452 		if (elem->index > index)
453 			node = node->rb_left;
454 		else if (elem->index < index)
455 			node = node->rb_right;
456 		else
457 			break;
458 	}
459 
460 	if (node)
461 		kref_get(&elem->ref_cnt);
462 
463 out:
464 	spin_unlock_irqrestore(&pool->pool_lock, flags);
465 	return node ? (void *)elem : NULL;
466 }
467 
468 void *rxe_pool_get_key(struct rxe_pool *pool, void *key)
469 {
470 	struct rb_node *node = NULL;
471 	struct rxe_pool_entry *elem = NULL;
472 	int cmp;
473 	unsigned long flags;
474 
475 	spin_lock_irqsave(&pool->pool_lock, flags);
476 
477 	if (pool->state != rxe_pool_valid)
478 		goto out;
479 
480 	node = pool->tree.rb_node;
481 
482 	while (node) {
483 		elem = rb_entry(node, struct rxe_pool_entry, node);
484 
485 		cmp = memcmp((u8 *)elem + pool->key_offset,
486 			     key, pool->key_size);
487 
488 		if (cmp > 0)
489 			node = node->rb_left;
490 		else if (cmp < 0)
491 			node = node->rb_right;
492 		else
493 			break;
494 	}
495 
496 	if (node)
497 		kref_get(&elem->ref_cnt);
498 
499 out:
500 	spin_unlock_irqrestore(&pool->pool_lock, flags);
501 	return node ? ((void *)elem) : NULL;
502 }
503