1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* 3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. 4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. 5 */ 6 7 #include "rxe.h" 8 #include "rxe_loc.h" 9 10 /* info about object pools 11 */ 12 struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = { 13 [RXE_TYPE_UC] = { 14 .name = "rxe-uc", 15 .size = sizeof(struct rxe_ucontext), 16 .elem_offset = offsetof(struct rxe_ucontext, pelem), 17 .flags = RXE_POOL_NO_ALLOC, 18 }, 19 [RXE_TYPE_PD] = { 20 .name = "rxe-pd", 21 .size = sizeof(struct rxe_pd), 22 .elem_offset = offsetof(struct rxe_pd, pelem), 23 .flags = RXE_POOL_NO_ALLOC, 24 }, 25 [RXE_TYPE_AH] = { 26 .name = "rxe-ah", 27 .size = sizeof(struct rxe_ah), 28 .elem_offset = offsetof(struct rxe_ah, pelem), 29 .flags = RXE_POOL_NO_ALLOC, 30 }, 31 [RXE_TYPE_SRQ] = { 32 .name = "rxe-srq", 33 .size = sizeof(struct rxe_srq), 34 .elem_offset = offsetof(struct rxe_srq, pelem), 35 .flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC, 36 .min_index = RXE_MIN_SRQ_INDEX, 37 .max_index = RXE_MAX_SRQ_INDEX, 38 }, 39 [RXE_TYPE_QP] = { 40 .name = "rxe-qp", 41 .size = sizeof(struct rxe_qp), 42 .elem_offset = offsetof(struct rxe_qp, pelem), 43 .cleanup = rxe_qp_cleanup, 44 .flags = RXE_POOL_INDEX, 45 .min_index = RXE_MIN_QP_INDEX, 46 .max_index = RXE_MAX_QP_INDEX, 47 }, 48 [RXE_TYPE_CQ] = { 49 .name = "rxe-cq", 50 .size = sizeof(struct rxe_cq), 51 .elem_offset = offsetof(struct rxe_cq, pelem), 52 .flags = RXE_POOL_NO_ALLOC, 53 .cleanup = rxe_cq_cleanup, 54 }, 55 [RXE_TYPE_MR] = { 56 .name = "rxe-mr", 57 .size = sizeof(struct rxe_mr), 58 .elem_offset = offsetof(struct rxe_mr, pelem), 59 .cleanup = rxe_mr_cleanup, 60 .flags = RXE_POOL_INDEX, 61 .max_index = RXE_MAX_MR_INDEX, 62 .min_index = RXE_MIN_MR_INDEX, 63 }, 64 [RXE_TYPE_MW] = { 65 .name = "rxe-mw", 66 .size = sizeof(struct rxe_mw), 67 .elem_offset = offsetof(struct rxe_mw, pelem), 68 .flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC, 69 .max_index = RXE_MAX_MW_INDEX, 70 .min_index = RXE_MIN_MW_INDEX, 71 }, 72 [RXE_TYPE_MC_GRP] = { 73 .name = "rxe-mc_grp", 74 .size = sizeof(struct rxe_mc_grp), 75 .elem_offset = offsetof(struct rxe_mc_grp, pelem), 76 .cleanup = rxe_mc_cleanup, 77 .flags = RXE_POOL_KEY, 78 .key_offset = offsetof(struct rxe_mc_grp, mgid), 79 .key_size = sizeof(union ib_gid), 80 }, 81 [RXE_TYPE_MC_ELEM] = { 82 .name = "rxe-mc_elem", 83 .size = sizeof(struct rxe_mc_elem), 84 .elem_offset = offsetof(struct rxe_mc_elem, pelem), 85 }, 86 }; 87 88 static inline const char *pool_name(struct rxe_pool *pool) 89 { 90 return rxe_type_info[pool->type].name; 91 } 92 93 static int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min) 94 { 95 int err = 0; 96 size_t size; 97 98 if ((max - min + 1) < pool->max_elem) { 99 pr_warn("not enough indices for max_elem\n"); 100 err = -EINVAL; 101 goto out; 102 } 103 104 pool->index.max_index = max; 105 pool->index.min_index = min; 106 107 size = BITS_TO_LONGS(max - min + 1) * sizeof(long); 108 pool->index.table = kmalloc(size, GFP_KERNEL); 109 if (!pool->index.table) { 110 err = -ENOMEM; 111 goto out; 112 } 113 114 pool->index.table_size = size; 115 bitmap_zero(pool->index.table, max - min + 1); 116 117 out: 118 return err; 119 } 120 121 int rxe_pool_init( 122 struct rxe_dev *rxe, 123 struct rxe_pool *pool, 124 enum rxe_elem_type type, 125 unsigned int max_elem) 126 { 127 int err = 0; 128 size_t size = rxe_type_info[type].size; 129 130 memset(pool, 0, sizeof(*pool)); 131 132 pool->rxe = rxe; 133 pool->type = type; 134 pool->max_elem = max_elem; 135 pool->elem_size = ALIGN(size, RXE_POOL_ALIGN); 136 pool->flags = rxe_type_info[type].flags; 137 pool->index.tree = RB_ROOT; 138 pool->key.tree = RB_ROOT; 139 pool->cleanup = rxe_type_info[type].cleanup; 140 141 atomic_set(&pool->num_elem, 0); 142 143 rwlock_init(&pool->pool_lock); 144 145 if (rxe_type_info[type].flags & RXE_POOL_INDEX) { 146 err = rxe_pool_init_index(pool, 147 rxe_type_info[type].max_index, 148 rxe_type_info[type].min_index); 149 if (err) 150 goto out; 151 } 152 153 if (rxe_type_info[type].flags & RXE_POOL_KEY) { 154 pool->key.key_offset = rxe_type_info[type].key_offset; 155 pool->key.key_size = rxe_type_info[type].key_size; 156 } 157 158 out: 159 return err; 160 } 161 162 void rxe_pool_cleanup(struct rxe_pool *pool) 163 { 164 if (atomic_read(&pool->num_elem) > 0) 165 pr_warn("%s pool destroyed with unfree'd elem\n", 166 pool_name(pool)); 167 168 kfree(pool->index.table); 169 } 170 171 static u32 alloc_index(struct rxe_pool *pool) 172 { 173 u32 index; 174 u32 range = pool->index.max_index - pool->index.min_index + 1; 175 176 index = find_next_zero_bit(pool->index.table, range, pool->index.last); 177 if (index >= range) 178 index = find_first_zero_bit(pool->index.table, range); 179 180 WARN_ON_ONCE(index >= range); 181 set_bit(index, pool->index.table); 182 pool->index.last = index; 183 return index + pool->index.min_index; 184 } 185 186 static void insert_index(struct rxe_pool *pool, struct rxe_pool_entry *new) 187 { 188 struct rb_node **link = &pool->index.tree.rb_node; 189 struct rb_node *parent = NULL; 190 struct rxe_pool_entry *elem; 191 192 while (*link) { 193 parent = *link; 194 elem = rb_entry(parent, struct rxe_pool_entry, index_node); 195 196 if (elem->index == new->index) { 197 pr_warn("element already exists!\n"); 198 goto out; 199 } 200 201 if (elem->index > new->index) 202 link = &(*link)->rb_left; 203 else 204 link = &(*link)->rb_right; 205 } 206 207 rb_link_node(&new->index_node, parent, link); 208 rb_insert_color(&new->index_node, &pool->index.tree); 209 out: 210 return; 211 } 212 213 static void insert_key(struct rxe_pool *pool, struct rxe_pool_entry *new) 214 { 215 struct rb_node **link = &pool->key.tree.rb_node; 216 struct rb_node *parent = NULL; 217 struct rxe_pool_entry *elem; 218 int cmp; 219 220 while (*link) { 221 parent = *link; 222 elem = rb_entry(parent, struct rxe_pool_entry, key_node); 223 224 cmp = memcmp((u8 *)elem + pool->key.key_offset, 225 (u8 *)new + pool->key.key_offset, pool->key.key_size); 226 227 if (cmp == 0) { 228 pr_warn("key already exists!\n"); 229 goto out; 230 } 231 232 if (cmp > 0) 233 link = &(*link)->rb_left; 234 else 235 link = &(*link)->rb_right; 236 } 237 238 rb_link_node(&new->key_node, parent, link); 239 rb_insert_color(&new->key_node, &pool->key.tree); 240 out: 241 return; 242 } 243 244 void __rxe_add_key_locked(struct rxe_pool_entry *elem, void *key) 245 { 246 struct rxe_pool *pool = elem->pool; 247 248 memcpy((u8 *)elem + pool->key.key_offset, key, pool->key.key_size); 249 insert_key(pool, elem); 250 } 251 252 void __rxe_add_key(struct rxe_pool_entry *elem, void *key) 253 { 254 struct rxe_pool *pool = elem->pool; 255 unsigned long flags; 256 257 write_lock_irqsave(&pool->pool_lock, flags); 258 __rxe_add_key_locked(elem, key); 259 write_unlock_irqrestore(&pool->pool_lock, flags); 260 } 261 262 void __rxe_drop_key_locked(struct rxe_pool_entry *elem) 263 { 264 struct rxe_pool *pool = elem->pool; 265 266 rb_erase(&elem->key_node, &pool->key.tree); 267 } 268 269 void __rxe_drop_key(struct rxe_pool_entry *elem) 270 { 271 struct rxe_pool *pool = elem->pool; 272 unsigned long flags; 273 274 write_lock_irqsave(&pool->pool_lock, flags); 275 __rxe_drop_key_locked(elem); 276 write_unlock_irqrestore(&pool->pool_lock, flags); 277 } 278 279 void __rxe_add_index_locked(struct rxe_pool_entry *elem) 280 { 281 struct rxe_pool *pool = elem->pool; 282 283 elem->index = alloc_index(pool); 284 insert_index(pool, elem); 285 } 286 287 void __rxe_add_index(struct rxe_pool_entry *elem) 288 { 289 struct rxe_pool *pool = elem->pool; 290 unsigned long flags; 291 292 write_lock_irqsave(&pool->pool_lock, flags); 293 __rxe_add_index_locked(elem); 294 write_unlock_irqrestore(&pool->pool_lock, flags); 295 } 296 297 void __rxe_drop_index_locked(struct rxe_pool_entry *elem) 298 { 299 struct rxe_pool *pool = elem->pool; 300 301 clear_bit(elem->index - pool->index.min_index, pool->index.table); 302 rb_erase(&elem->index_node, &pool->index.tree); 303 } 304 305 void __rxe_drop_index(struct rxe_pool_entry *elem) 306 { 307 struct rxe_pool *pool = elem->pool; 308 unsigned long flags; 309 310 write_lock_irqsave(&pool->pool_lock, flags); 311 __rxe_drop_index_locked(elem); 312 write_unlock_irqrestore(&pool->pool_lock, flags); 313 } 314 315 void *rxe_alloc_locked(struct rxe_pool *pool) 316 { 317 struct rxe_type_info *info = &rxe_type_info[pool->type]; 318 struct rxe_pool_entry *elem; 319 u8 *obj; 320 321 if (atomic_inc_return(&pool->num_elem) > pool->max_elem) 322 goto out_cnt; 323 324 obj = kzalloc(info->size, GFP_ATOMIC); 325 if (!obj) 326 goto out_cnt; 327 328 elem = (struct rxe_pool_entry *)(obj + info->elem_offset); 329 330 elem->pool = pool; 331 kref_init(&elem->ref_cnt); 332 333 return obj; 334 335 out_cnt: 336 atomic_dec(&pool->num_elem); 337 return NULL; 338 } 339 340 void *rxe_alloc(struct rxe_pool *pool) 341 { 342 struct rxe_type_info *info = &rxe_type_info[pool->type]; 343 struct rxe_pool_entry *elem; 344 u8 *obj; 345 346 if (atomic_inc_return(&pool->num_elem) > pool->max_elem) 347 goto out_cnt; 348 349 obj = kzalloc(info->size, GFP_KERNEL); 350 if (!obj) 351 goto out_cnt; 352 353 elem = (struct rxe_pool_entry *)(obj + info->elem_offset); 354 355 elem->pool = pool; 356 kref_init(&elem->ref_cnt); 357 358 return obj; 359 360 out_cnt: 361 atomic_dec(&pool->num_elem); 362 return NULL; 363 } 364 365 int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_entry *elem) 366 { 367 if (atomic_inc_return(&pool->num_elem) > pool->max_elem) 368 goto out_cnt; 369 370 elem->pool = pool; 371 kref_init(&elem->ref_cnt); 372 373 return 0; 374 375 out_cnt: 376 atomic_dec(&pool->num_elem); 377 return -EINVAL; 378 } 379 380 void rxe_elem_release(struct kref *kref) 381 { 382 struct rxe_pool_entry *elem = 383 container_of(kref, struct rxe_pool_entry, ref_cnt); 384 struct rxe_pool *pool = elem->pool; 385 struct rxe_type_info *info = &rxe_type_info[pool->type]; 386 u8 *obj; 387 388 if (pool->cleanup) 389 pool->cleanup(elem); 390 391 if (!(pool->flags & RXE_POOL_NO_ALLOC)) { 392 obj = (u8 *)elem - info->elem_offset; 393 kfree(obj); 394 } 395 396 atomic_dec(&pool->num_elem); 397 } 398 399 void *rxe_pool_get_index_locked(struct rxe_pool *pool, u32 index) 400 { 401 struct rxe_type_info *info = &rxe_type_info[pool->type]; 402 struct rb_node *node; 403 struct rxe_pool_entry *elem; 404 u8 *obj; 405 406 node = pool->index.tree.rb_node; 407 408 while (node) { 409 elem = rb_entry(node, struct rxe_pool_entry, index_node); 410 411 if (elem->index > index) 412 node = node->rb_left; 413 else if (elem->index < index) 414 node = node->rb_right; 415 else 416 break; 417 } 418 419 if (node) { 420 kref_get(&elem->ref_cnt); 421 obj = (u8 *)elem - info->elem_offset; 422 } else { 423 obj = NULL; 424 } 425 426 return obj; 427 } 428 429 void *rxe_pool_get_index(struct rxe_pool *pool, u32 index) 430 { 431 u8 *obj; 432 unsigned long flags; 433 434 read_lock_irqsave(&pool->pool_lock, flags); 435 obj = rxe_pool_get_index_locked(pool, index); 436 read_unlock_irqrestore(&pool->pool_lock, flags); 437 438 return obj; 439 } 440 441 void *rxe_pool_get_key_locked(struct rxe_pool *pool, void *key) 442 { 443 struct rxe_type_info *info = &rxe_type_info[pool->type]; 444 struct rb_node *node; 445 struct rxe_pool_entry *elem; 446 u8 *obj; 447 int cmp; 448 449 node = pool->key.tree.rb_node; 450 451 while (node) { 452 elem = rb_entry(node, struct rxe_pool_entry, key_node); 453 454 cmp = memcmp((u8 *)elem + pool->key.key_offset, 455 key, pool->key.key_size); 456 457 if (cmp > 0) 458 node = node->rb_left; 459 else if (cmp < 0) 460 node = node->rb_right; 461 else 462 break; 463 } 464 465 if (node) { 466 kref_get(&elem->ref_cnt); 467 obj = (u8 *)elem - info->elem_offset; 468 } else { 469 obj = NULL; 470 } 471 472 return obj; 473 } 474 475 void *rxe_pool_get_key(struct rxe_pool *pool, void *key) 476 { 477 u8 *obj; 478 unsigned long flags; 479 480 read_lock_irqsave(&pool->pool_lock, flags); 481 obj = rxe_pool_get_key_locked(pool, key); 482 read_unlock_irqrestore(&pool->pool_lock, flags); 483 484 return obj; 485 } 486